1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */
9 #include <linux/highmem.h>
10 #include <linux/hrtimer_api.h>
11 #include <linux/ktime_api.h>
12 #include <linux/sched/signal.h>
13 #include <linux/syscalls_api.h>
14 #include <linux/debug_locks.h>
15 #include <linux/prefetch.h>
16 #include <linux/capability.h>
17 #include <linux/pgtable_api.h>
18 #include <linux/wait_bit.h>
19 #include <linux/jiffies.h>
20 #include <linux/spinlock_api.h>
21 #include <linux/cpumask_api.h>
22 #include <linux/lockdep_api.h>
23 #include <linux/hardirq.h>
24 #include <linux/softirq.h>
25 #include <linux/refcount_api.h>
26 #include <linux/topology.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched/cond_resched.h>
29 #include <linux/sched/cputime.h>
30 #include <linux/sched/debug.h>
31 #include <linux/sched/hotplug.h>
32 #include <linux/sched/init.h>
33 #include <linux/sched/isolation.h>
34 #include <linux/sched/loadavg.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/nohz.h>
37 #include <linux/sched/rseq_api.h>
38 #include <linux/sched/rt.h>
39
40 #include <linux/blkdev.h>
41 #include <linux/context_tracking.h>
42 #include <linux/cpuset.h>
43 #include <linux/delayacct.h>
44 #include <linux/init_task.h>
45 #include <linux/interrupt.h>
46 #include <linux/ioprio.h>
47 #include <linux/kallsyms.h>
48 #include <linux/kcov.h>
49 #include <linux/kprobes.h>
50 #include <linux/llist_api.h>
51 #include <linux/mmu_context.h>
52 #include <linux/mmzone.h>
53 #include <linux/mutex_api.h>
54 #include <linux/nmi.h>
55 #include <linux/nospec.h>
56 #include <linux/perf_event_api.h>
57 #include <linux/profile.h>
58 #include <linux/psi.h>
59 #include <linux/rcuwait_api.h>
60 #include <linux/sched/wake_q.h>
61 #include <linux/scs.h>
62 #include <linux/slab.h>
63 #include <linux/syscalls.h>
64 #include <linux/vtime.h>
65 #include <linux/wait_api.h>
66 #include <linux/workqueue_api.h>
67
68 #ifdef CONFIG_PREEMPT_DYNAMIC
69 # ifdef CONFIG_GENERIC_ENTRY
70 # include <linux/entry-common.h>
71 # endif
72 #endif
73
74 #include <uapi/linux/sched/types.h>
75
76 #include <asm/irq_regs.h>
77 #include <asm/switch_to.h>
78 #include <asm/tlb.h>
79
80 #define CREATE_TRACE_POINTS
81 #include <linux/sched/rseq_api.h>
82 #include <trace/events/sched.h>
83 #include <trace/events/ipi.h>
84 #undef CREATE_TRACE_POINTS
85
86 #include "sched.h"
87 #include "stats.h"
88 #include "autogroup.h"
89
90 #include "autogroup.h"
91 #include "pelt.h"
92 #include "smp.h"
93 #include "stats.h"
94
95 #include "../workqueue_internal.h"
96 #include "../../io_uring/io-wq.h"
97 #include "../smpboot.h"
98
99 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
101
102 /*
103 * Export tracepoints that act as a bare tracehook (ie: have no trace event
104 * associated with them) to allow external modules to probe them.
105 */
106 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
117
118 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
119
120 #ifdef CONFIG_SCHED_DEBUG
121 /*
122 * Debugging: various feature bits
123 *
124 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
125 * sysctl_sched_features, defined in sched.h, to allow constants propagation
126 * at compile time and compiler optimization based on features default.
127 */
128 #define SCHED_FEAT(name, enabled) \
129 (1UL << __SCHED_FEAT_##name) * enabled |
130 const_debug unsigned int sysctl_sched_features =
131 #include "features.h"
132 0;
133 #undef SCHED_FEAT
134
135 /*
136 * Print a warning if need_resched is set for the given duration (if
137 * LATENCY_WARN is enabled).
138 *
139 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
140 * per boot.
141 */
142 __read_mostly int sysctl_resched_latency_warn_ms = 100;
143 __read_mostly int sysctl_resched_latency_warn_once = 1;
144 #endif /* CONFIG_SCHED_DEBUG */
145
146 /*
147 * Number of tasks to iterate in a single balance run.
148 * Limited because this is done with IRQs disabled.
149 */
150 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
151
152 __read_mostly int scheduler_running;
153
154 #ifdef CONFIG_SCHED_CORE
155
156 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
157
158 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)159 static inline int __task_prio(const struct task_struct *p)
160 {
161 if (p->sched_class == &stop_sched_class) /* trumps deadline */
162 return -2;
163
164 if (rt_prio(p->prio)) /* includes deadline */
165 return p->prio; /* [-1, 99] */
166
167 if (p->sched_class == &idle_sched_class)
168 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
169
170 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
171 }
172
173 /*
174 * l(a,b)
175 * le(a,b) := !l(b,a)
176 * g(a,b) := l(b,a)
177 * ge(a,b) := !l(a,b)
178 */
179
180 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)181 static inline bool prio_less(const struct task_struct *a,
182 const struct task_struct *b, bool in_fi)
183 {
184
185 int pa = __task_prio(a), pb = __task_prio(b);
186
187 if (-pa < -pb)
188 return true;
189
190 if (-pb < -pa)
191 return false;
192
193 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
194 return !dl_time_before(a->dl.deadline, b->dl.deadline);
195
196 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
197 return cfs_prio_less(a, b, in_fi);
198
199 return false;
200 }
201
__sched_core_less(const struct task_struct * a,const struct task_struct * b)202 static inline bool __sched_core_less(const struct task_struct *a,
203 const struct task_struct *b)
204 {
205 if (a->core_cookie < b->core_cookie)
206 return true;
207
208 if (a->core_cookie > b->core_cookie)
209 return false;
210
211 /* flip prio, so high prio is leftmost */
212 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
213 return true;
214
215 return false;
216 }
217
218 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
219
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)220 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
221 {
222 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
223 }
224
rb_sched_core_cmp(const void * key,const struct rb_node * node)225 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
226 {
227 const struct task_struct *p = __node_2_sc(node);
228 unsigned long cookie = (unsigned long)key;
229
230 if (cookie < p->core_cookie)
231 return -1;
232
233 if (cookie > p->core_cookie)
234 return 1;
235
236 return 0;
237 }
238
sched_core_enqueue(struct rq * rq,struct task_struct * p)239 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
240 {
241 rq->core->core_task_seq++;
242
243 if (!p->core_cookie)
244 return;
245
246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
247 }
248
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)249 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
250 {
251 rq->core->core_task_seq++;
252
253 if (sched_core_enqueued(p)) {
254 rb_erase(&p->core_node, &rq->core_tree);
255 RB_CLEAR_NODE(&p->core_node);
256 }
257
258 /*
259 * Migrating the last task off the cpu, with the cpu in forced idle
260 * state. Reschedule to create an accounting edge for forced idle,
261 * and re-examine whether the core is still in forced idle state.
262 */
263 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
264 rq->core->core_forceidle_count && rq->curr == rq->idle)
265 resched_curr(rq);
266 }
267
sched_task_is_throttled(struct task_struct * p,int cpu)268 static int sched_task_is_throttled(struct task_struct *p, int cpu)
269 {
270 if (p->sched_class->task_is_throttled)
271 return p->sched_class->task_is_throttled(p, cpu);
272
273 return 0;
274 }
275
sched_core_next(struct task_struct * p,unsigned long cookie)276 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
277 {
278 struct rb_node *node = &p->core_node;
279 int cpu = task_cpu(p);
280
281 do {
282 node = rb_next(node);
283 if (!node)
284 return NULL;
285
286 p = __node_2_sc(node);
287 if (p->core_cookie != cookie)
288 return NULL;
289
290 } while (sched_task_is_throttled(p, cpu));
291
292 return p;
293 }
294
295 /*
296 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
297 * If no suitable task is found, NULL will be returned.
298 */
sched_core_find(struct rq * rq,unsigned long cookie)299 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
300 {
301 struct task_struct *p;
302 struct rb_node *node;
303
304 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
305 if (!node)
306 return NULL;
307
308 p = __node_2_sc(node);
309 if (!sched_task_is_throttled(p, rq->cpu))
310 return p;
311
312 return sched_core_next(p, cookie);
313 }
314
315 /*
316 * Magic required such that:
317 *
318 * raw_spin_rq_lock(rq);
319 * ...
320 * raw_spin_rq_unlock(rq);
321 *
322 * ends up locking and unlocking the _same_ lock, and all CPUs
323 * always agree on what rq has what lock.
324 *
325 * XXX entirely possible to selectively enable cores, don't bother for now.
326 */
327
328 static DEFINE_MUTEX(sched_core_mutex);
329 static atomic_t sched_core_count;
330 static struct cpumask sched_core_mask;
331
sched_core_lock(int cpu,unsigned long * flags)332 static void sched_core_lock(int cpu, unsigned long *flags)
333 {
334 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
335 int t, i = 0;
336
337 local_irq_save(*flags);
338 for_each_cpu(t, smt_mask)
339 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
340 }
341
sched_core_unlock(int cpu,unsigned long * flags)342 static void sched_core_unlock(int cpu, unsigned long *flags)
343 {
344 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
345 int t;
346
347 for_each_cpu(t, smt_mask)
348 raw_spin_unlock(&cpu_rq(t)->__lock);
349 local_irq_restore(*flags);
350 }
351
__sched_core_flip(bool enabled)352 static void __sched_core_flip(bool enabled)
353 {
354 unsigned long flags;
355 int cpu, t;
356
357 cpus_read_lock();
358
359 /*
360 * Toggle the online cores, one by one.
361 */
362 cpumask_copy(&sched_core_mask, cpu_online_mask);
363 for_each_cpu(cpu, &sched_core_mask) {
364 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
365
366 sched_core_lock(cpu, &flags);
367
368 for_each_cpu(t, smt_mask)
369 cpu_rq(t)->core_enabled = enabled;
370
371 cpu_rq(cpu)->core->core_forceidle_start = 0;
372
373 sched_core_unlock(cpu, &flags);
374
375 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
376 }
377
378 /*
379 * Toggle the offline CPUs.
380 */
381 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
382 cpu_rq(cpu)->core_enabled = enabled;
383
384 cpus_read_unlock();
385 }
386
sched_core_assert_empty(void)387 static void sched_core_assert_empty(void)
388 {
389 int cpu;
390
391 for_each_possible_cpu(cpu)
392 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
393 }
394
__sched_core_enable(void)395 static void __sched_core_enable(void)
396 {
397 static_branch_enable(&__sched_core_enabled);
398 /*
399 * Ensure all previous instances of raw_spin_rq_*lock() have finished
400 * and future ones will observe !sched_core_disabled().
401 */
402 synchronize_rcu();
403 __sched_core_flip(true);
404 sched_core_assert_empty();
405 }
406
__sched_core_disable(void)407 static void __sched_core_disable(void)
408 {
409 sched_core_assert_empty();
410 __sched_core_flip(false);
411 static_branch_disable(&__sched_core_enabled);
412 }
413
sched_core_get(void)414 void sched_core_get(void)
415 {
416 if (atomic_inc_not_zero(&sched_core_count))
417 return;
418
419 mutex_lock(&sched_core_mutex);
420 if (!atomic_read(&sched_core_count))
421 __sched_core_enable();
422
423 smp_mb__before_atomic();
424 atomic_inc(&sched_core_count);
425 mutex_unlock(&sched_core_mutex);
426 }
427
__sched_core_put(struct work_struct * work)428 static void __sched_core_put(struct work_struct *work)
429 {
430 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
431 __sched_core_disable();
432 mutex_unlock(&sched_core_mutex);
433 }
434 }
435
sched_core_put(void)436 void sched_core_put(void)
437 {
438 static DECLARE_WORK(_work, __sched_core_put);
439
440 /*
441 * "There can be only one"
442 *
443 * Either this is the last one, or we don't actually need to do any
444 * 'work'. If it is the last *again*, we rely on
445 * WORK_STRUCT_PENDING_BIT.
446 */
447 if (!atomic_add_unless(&sched_core_count, -1, 1))
448 schedule_work(&_work);
449 }
450
451 #else /* !CONFIG_SCHED_CORE */
452
sched_core_enqueue(struct rq * rq,struct task_struct * p)453 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
454 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)455 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
456
457 #endif /* CONFIG_SCHED_CORE */
458
459 /*
460 * Serialization rules:
461 *
462 * Lock order:
463 *
464 * p->pi_lock
465 * rq->lock
466 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
467 *
468 * rq1->lock
469 * rq2->lock where: rq1 < rq2
470 *
471 * Regular state:
472 *
473 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
474 * local CPU's rq->lock, it optionally removes the task from the runqueue and
475 * always looks at the local rq data structures to find the most eligible task
476 * to run next.
477 *
478 * Task enqueue is also under rq->lock, possibly taken from another CPU.
479 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
480 * the local CPU to avoid bouncing the runqueue state around [ see
481 * ttwu_queue_wakelist() ]
482 *
483 * Task wakeup, specifically wakeups that involve migration, are horribly
484 * complicated to avoid having to take two rq->locks.
485 *
486 * Special state:
487 *
488 * System-calls and anything external will use task_rq_lock() which acquires
489 * both p->pi_lock and rq->lock. As a consequence the state they change is
490 * stable while holding either lock:
491 *
492 * - sched_setaffinity()/
493 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
494 * - set_user_nice(): p->se.load, p->*prio
495 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
496 * p->se.load, p->rt_priority,
497 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
498 * - sched_setnuma(): p->numa_preferred_nid
499 * - sched_move_task(): p->sched_task_group
500 * - uclamp_update_active() p->uclamp*
501 *
502 * p->state <- TASK_*:
503 *
504 * is changed locklessly using set_current_state(), __set_current_state() or
505 * set_special_state(), see their respective comments, or by
506 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
507 * concurrent self.
508 *
509 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
510 *
511 * is set by activate_task() and cleared by deactivate_task(), under
512 * rq->lock. Non-zero indicates the task is runnable, the special
513 * ON_RQ_MIGRATING state is used for migration without holding both
514 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
515 *
516 * p->on_cpu <- { 0, 1 }:
517 *
518 * is set by prepare_task() and cleared by finish_task() such that it will be
519 * set before p is scheduled-in and cleared after p is scheduled-out, both
520 * under rq->lock. Non-zero indicates the task is running on its CPU.
521 *
522 * [ The astute reader will observe that it is possible for two tasks on one
523 * CPU to have ->on_cpu = 1 at the same time. ]
524 *
525 * task_cpu(p): is changed by set_task_cpu(), the rules are:
526 *
527 * - Don't call set_task_cpu() on a blocked task:
528 *
529 * We don't care what CPU we're not running on, this simplifies hotplug,
530 * the CPU assignment of blocked tasks isn't required to be valid.
531 *
532 * - for try_to_wake_up(), called under p->pi_lock:
533 *
534 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
535 *
536 * - for migration called under rq->lock:
537 * [ see task_on_rq_migrating() in task_rq_lock() ]
538 *
539 * o move_queued_task()
540 * o detach_task()
541 *
542 * - for migration called under double_rq_lock():
543 *
544 * o __migrate_swap_task()
545 * o push_rt_task() / pull_rt_task()
546 * o push_dl_task() / pull_dl_task()
547 * o dl_task_offline_migration()
548 *
549 */
550
raw_spin_rq_lock_nested(struct rq * rq,int subclass)551 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
552 {
553 raw_spinlock_t *lock;
554
555 /* Matches synchronize_rcu() in __sched_core_enable() */
556 preempt_disable();
557 if (sched_core_disabled()) {
558 raw_spin_lock_nested(&rq->__lock, subclass);
559 /* preempt_count *MUST* be > 1 */
560 preempt_enable_no_resched();
561 return;
562 }
563
564 for (;;) {
565 lock = __rq_lockp(rq);
566 raw_spin_lock_nested(lock, subclass);
567 if (likely(lock == __rq_lockp(rq))) {
568 /* preempt_count *MUST* be > 1 */
569 preempt_enable_no_resched();
570 return;
571 }
572 raw_spin_unlock(lock);
573 }
574 }
575
raw_spin_rq_trylock(struct rq * rq)576 bool raw_spin_rq_trylock(struct rq *rq)
577 {
578 raw_spinlock_t *lock;
579 bool ret;
580
581 /* Matches synchronize_rcu() in __sched_core_enable() */
582 preempt_disable();
583 if (sched_core_disabled()) {
584 ret = raw_spin_trylock(&rq->__lock);
585 preempt_enable();
586 return ret;
587 }
588
589 for (;;) {
590 lock = __rq_lockp(rq);
591 ret = raw_spin_trylock(lock);
592 if (!ret || (likely(lock == __rq_lockp(rq)))) {
593 preempt_enable();
594 return ret;
595 }
596 raw_spin_unlock(lock);
597 }
598 }
599
raw_spin_rq_unlock(struct rq * rq)600 void raw_spin_rq_unlock(struct rq *rq)
601 {
602 raw_spin_unlock(rq_lockp(rq));
603 }
604
605 #ifdef CONFIG_SMP
606 /*
607 * double_rq_lock - safely lock two runqueues
608 */
double_rq_lock(struct rq * rq1,struct rq * rq2)609 void double_rq_lock(struct rq *rq1, struct rq *rq2)
610 {
611 lockdep_assert_irqs_disabled();
612
613 if (rq_order_less(rq2, rq1))
614 swap(rq1, rq2);
615
616 raw_spin_rq_lock(rq1);
617 if (__rq_lockp(rq1) != __rq_lockp(rq2))
618 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
619
620 double_rq_clock_clear_update(rq1, rq2);
621 }
622 #endif
623
624 /*
625 * __task_rq_lock - lock the rq @p resides on.
626 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)627 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
628 __acquires(rq->lock)
629 {
630 struct rq *rq;
631
632 lockdep_assert_held(&p->pi_lock);
633
634 for (;;) {
635 rq = task_rq(p);
636 raw_spin_rq_lock(rq);
637 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
638 rq_pin_lock(rq, rf);
639 return rq;
640 }
641 raw_spin_rq_unlock(rq);
642
643 while (unlikely(task_on_rq_migrating(p)))
644 cpu_relax();
645 }
646 }
647
648 /*
649 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
650 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)651 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
652 __acquires(p->pi_lock)
653 __acquires(rq->lock)
654 {
655 struct rq *rq;
656
657 for (;;) {
658 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
659 rq = task_rq(p);
660 raw_spin_rq_lock(rq);
661 /*
662 * move_queued_task() task_rq_lock()
663 *
664 * ACQUIRE (rq->lock)
665 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
666 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
667 * [S] ->cpu = new_cpu [L] task_rq()
668 * [L] ->on_rq
669 * RELEASE (rq->lock)
670 *
671 * If we observe the old CPU in task_rq_lock(), the acquire of
672 * the old rq->lock will fully serialize against the stores.
673 *
674 * If we observe the new CPU in task_rq_lock(), the address
675 * dependency headed by '[L] rq = task_rq()' and the acquire
676 * will pair with the WMB to ensure we then also see migrating.
677 */
678 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
679 rq_pin_lock(rq, rf);
680 return rq;
681 }
682 raw_spin_rq_unlock(rq);
683 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
684
685 while (unlikely(task_on_rq_migrating(p)))
686 cpu_relax();
687 }
688 }
689
690 /*
691 * RQ-clock updating methods:
692 */
693
update_rq_clock_task(struct rq * rq,s64 delta)694 static void update_rq_clock_task(struct rq *rq, s64 delta)
695 {
696 /*
697 * In theory, the compile should just see 0 here, and optimize out the call
698 * to sched_rt_avg_update. But I don't trust it...
699 */
700 s64 __maybe_unused steal = 0, irq_delta = 0;
701
702 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
703 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
704
705 /*
706 * Since irq_time is only updated on {soft,}irq_exit, we might run into
707 * this case when a previous update_rq_clock() happened inside a
708 * {soft,}irq region.
709 *
710 * When this happens, we stop ->clock_task and only update the
711 * prev_irq_time stamp to account for the part that fit, so that a next
712 * update will consume the rest. This ensures ->clock_task is
713 * monotonic.
714 *
715 * It does however cause some slight miss-attribution of {soft,}irq
716 * time, a more accurate solution would be to update the irq_time using
717 * the current rq->clock timestamp, except that would require using
718 * atomic ops.
719 */
720 if (irq_delta > delta)
721 irq_delta = delta;
722
723 rq->prev_irq_time += irq_delta;
724 delta -= irq_delta;
725 delayacct_irq(rq->curr, irq_delta);
726 #endif
727 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
728 if (static_key_false((¶virt_steal_rq_enabled))) {
729 steal = paravirt_steal_clock(cpu_of(rq));
730 steal -= rq->prev_steal_time_rq;
731
732 if (unlikely(steal > delta))
733 steal = delta;
734
735 rq->prev_steal_time_rq += steal;
736 delta -= steal;
737 }
738 #endif
739
740 rq->clock_task += delta;
741
742 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
743 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
744 update_irq_load_avg(rq, irq_delta + steal);
745 #endif
746 update_rq_clock_pelt(rq, delta);
747 }
748
update_rq_clock(struct rq * rq)749 void update_rq_clock(struct rq *rq)
750 {
751 s64 delta;
752
753 lockdep_assert_rq_held(rq);
754
755 if (rq->clock_update_flags & RQCF_ACT_SKIP)
756 return;
757
758 #ifdef CONFIG_SCHED_DEBUG
759 if (sched_feat(WARN_DOUBLE_CLOCK))
760 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
761 rq->clock_update_flags |= RQCF_UPDATED;
762 #endif
763
764 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
765 if (delta < 0)
766 return;
767 rq->clock += delta;
768 update_rq_clock_task(rq, delta);
769 }
770
771 #ifdef CONFIG_SCHED_HRTICK
772 /*
773 * Use HR-timers to deliver accurate preemption points.
774 */
775
hrtick_clear(struct rq * rq)776 static void hrtick_clear(struct rq *rq)
777 {
778 if (hrtimer_active(&rq->hrtick_timer))
779 hrtimer_cancel(&rq->hrtick_timer);
780 }
781
782 /*
783 * High-resolution timer tick.
784 * Runs from hardirq context with interrupts disabled.
785 */
hrtick(struct hrtimer * timer)786 static enum hrtimer_restart hrtick(struct hrtimer *timer)
787 {
788 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
789 struct rq_flags rf;
790
791 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
792
793 rq_lock(rq, &rf);
794 update_rq_clock(rq);
795 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
796 rq_unlock(rq, &rf);
797
798 return HRTIMER_NORESTART;
799 }
800
801 #ifdef CONFIG_SMP
802
__hrtick_restart(struct rq * rq)803 static void __hrtick_restart(struct rq *rq)
804 {
805 struct hrtimer *timer = &rq->hrtick_timer;
806 ktime_t time = rq->hrtick_time;
807
808 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
809 }
810
811 /*
812 * called from hardirq (IPI) context
813 */
__hrtick_start(void * arg)814 static void __hrtick_start(void *arg)
815 {
816 struct rq *rq = arg;
817 struct rq_flags rf;
818
819 rq_lock(rq, &rf);
820 __hrtick_restart(rq);
821 rq_unlock(rq, &rf);
822 }
823
824 /*
825 * Called to set the hrtick timer state.
826 *
827 * called with rq->lock held and irqs disabled
828 */
hrtick_start(struct rq * rq,u64 delay)829 void hrtick_start(struct rq *rq, u64 delay)
830 {
831 struct hrtimer *timer = &rq->hrtick_timer;
832 s64 delta;
833
834 /*
835 * Don't schedule slices shorter than 10000ns, that just
836 * doesn't make sense and can cause timer DoS.
837 */
838 delta = max_t(s64, delay, 10000LL);
839 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
840
841 if (rq == this_rq())
842 __hrtick_restart(rq);
843 else
844 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
845 }
846
847 #else
848 /*
849 * Called to set the hrtick timer state.
850 *
851 * called with rq->lock held and irqs disabled
852 */
hrtick_start(struct rq * rq,u64 delay)853 void hrtick_start(struct rq *rq, u64 delay)
854 {
855 /*
856 * Don't schedule slices shorter than 10000ns, that just
857 * doesn't make sense. Rely on vruntime for fairness.
858 */
859 delay = max_t(u64, delay, 10000LL);
860 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
861 HRTIMER_MODE_REL_PINNED_HARD);
862 }
863
864 #endif /* CONFIG_SMP */
865
hrtick_rq_init(struct rq * rq)866 static void hrtick_rq_init(struct rq *rq)
867 {
868 #ifdef CONFIG_SMP
869 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
870 #endif
871 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
872 rq->hrtick_timer.function = hrtick;
873 }
874 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)875 static inline void hrtick_clear(struct rq *rq)
876 {
877 }
878
hrtick_rq_init(struct rq * rq)879 static inline void hrtick_rq_init(struct rq *rq)
880 {
881 }
882 #endif /* CONFIG_SCHED_HRTICK */
883
884 /*
885 * cmpxchg based fetch_or, macro so it works for different integer types
886 */
887 #define fetch_or(ptr, mask) \
888 ({ \
889 typeof(ptr) _ptr = (ptr); \
890 typeof(mask) _mask = (mask); \
891 typeof(*_ptr) _val = *_ptr; \
892 \
893 do { \
894 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
895 _val; \
896 })
897
898 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
899 /*
900 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
901 * this avoids any races wrt polling state changes and thereby avoids
902 * spurious IPIs.
903 */
set_nr_and_not_polling(struct task_struct * p)904 static inline bool set_nr_and_not_polling(struct task_struct *p)
905 {
906 struct thread_info *ti = task_thread_info(p);
907 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
908 }
909
910 /*
911 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
912 *
913 * If this returns true, then the idle task promises to call
914 * sched_ttwu_pending() and reschedule soon.
915 */
set_nr_if_polling(struct task_struct * p)916 static bool set_nr_if_polling(struct task_struct *p)
917 {
918 struct thread_info *ti = task_thread_info(p);
919 typeof(ti->flags) val = READ_ONCE(ti->flags);
920
921 for (;;) {
922 if (!(val & _TIF_POLLING_NRFLAG))
923 return false;
924 if (val & _TIF_NEED_RESCHED)
925 return true;
926 if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
927 break;
928 }
929 return true;
930 }
931
932 #else
set_nr_and_not_polling(struct task_struct * p)933 static inline bool set_nr_and_not_polling(struct task_struct *p)
934 {
935 set_tsk_need_resched(p);
936 return true;
937 }
938
939 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)940 static inline bool set_nr_if_polling(struct task_struct *p)
941 {
942 return false;
943 }
944 #endif
945 #endif
946
__wake_q_add(struct wake_q_head * head,struct task_struct * task)947 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
948 {
949 struct wake_q_node *node = &task->wake_q;
950
951 /*
952 * Atomically grab the task, if ->wake_q is !nil already it means
953 * it's already queued (either by us or someone else) and will get the
954 * wakeup due to that.
955 *
956 * In order to ensure that a pending wakeup will observe our pending
957 * state, even in the failed case, an explicit smp_mb() must be used.
958 */
959 smp_mb__before_atomic();
960 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
961 return false;
962
963 /*
964 * The head is context local, there can be no concurrency.
965 */
966 *head->lastp = node;
967 head->lastp = &node->next;
968 return true;
969 }
970
971 /**
972 * wake_q_add() - queue a wakeup for 'later' waking.
973 * @head: the wake_q_head to add @task to
974 * @task: the task to queue for 'later' wakeup
975 *
976 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
977 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
978 * instantly.
979 *
980 * This function must be used as-if it were wake_up_process(); IOW the task
981 * must be ready to be woken at this location.
982 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)983 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
984 {
985 if (__wake_q_add(head, task))
986 get_task_struct(task);
987 }
988
989 /**
990 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
991 * @head: the wake_q_head to add @task to
992 * @task: the task to queue for 'later' wakeup
993 *
994 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
995 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
996 * instantly.
997 *
998 * This function must be used as-if it were wake_up_process(); IOW the task
999 * must be ready to be woken at this location.
1000 *
1001 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1002 * that already hold reference to @task can call the 'safe' version and trust
1003 * wake_q to do the right thing depending whether or not the @task is already
1004 * queued for wakeup.
1005 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1006 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1007 {
1008 if (!__wake_q_add(head, task))
1009 put_task_struct(task);
1010 }
1011
wake_up_q(struct wake_q_head * head)1012 void wake_up_q(struct wake_q_head *head)
1013 {
1014 struct wake_q_node *node = head->first;
1015
1016 while (node != WAKE_Q_TAIL) {
1017 struct task_struct *task;
1018
1019 task = container_of(node, struct task_struct, wake_q);
1020 /* Task can safely be re-inserted now: */
1021 node = node->next;
1022 task->wake_q.next = NULL;
1023
1024 /*
1025 * wake_up_process() executes a full barrier, which pairs with
1026 * the queueing in wake_q_add() so as not to miss wakeups.
1027 */
1028 wake_up_process(task);
1029 put_task_struct(task);
1030 }
1031 }
1032
1033 /*
1034 * resched_curr - mark rq's current task 'to be rescheduled now'.
1035 *
1036 * On UP this means the setting of the need_resched flag, on SMP it
1037 * might also involve a cross-CPU call to trigger the scheduler on
1038 * the target CPU.
1039 */
resched_curr(struct rq * rq)1040 void resched_curr(struct rq *rq)
1041 {
1042 struct task_struct *curr = rq->curr;
1043 int cpu;
1044
1045 lockdep_assert_rq_held(rq);
1046
1047 if (test_tsk_need_resched(curr))
1048 return;
1049
1050 cpu = cpu_of(rq);
1051
1052 if (cpu == smp_processor_id()) {
1053 set_tsk_need_resched(curr);
1054 set_preempt_need_resched();
1055 return;
1056 }
1057
1058 if (set_nr_and_not_polling(curr))
1059 smp_send_reschedule(cpu);
1060 else
1061 trace_sched_wake_idle_without_ipi(cpu);
1062 }
1063
resched_cpu(int cpu)1064 void resched_cpu(int cpu)
1065 {
1066 struct rq *rq = cpu_rq(cpu);
1067 unsigned long flags;
1068
1069 raw_spin_rq_lock_irqsave(rq, flags);
1070 if (cpu_online(cpu) || cpu == smp_processor_id())
1071 resched_curr(rq);
1072 raw_spin_rq_unlock_irqrestore(rq, flags);
1073 }
1074
1075 #ifdef CONFIG_SMP
1076 #ifdef CONFIG_NO_HZ_COMMON
1077 /*
1078 * In the semi idle case, use the nearest busy CPU for migrating timers
1079 * from an idle CPU. This is good for power-savings.
1080 *
1081 * We don't do similar optimization for completely idle system, as
1082 * selecting an idle CPU will add more delays to the timers than intended
1083 * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1084 */
get_nohz_timer_target(void)1085 int get_nohz_timer_target(void)
1086 {
1087 int i, cpu = smp_processor_id(), default_cpu = -1;
1088 struct sched_domain *sd;
1089 const struct cpumask *hk_mask;
1090
1091 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1092 if (!idle_cpu(cpu))
1093 return cpu;
1094 default_cpu = cpu;
1095 }
1096
1097 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1098
1099 guard(rcu)();
1100
1101 for_each_domain(cpu, sd) {
1102 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1103 if (cpu == i)
1104 continue;
1105
1106 if (!idle_cpu(i))
1107 return i;
1108 }
1109 }
1110
1111 if (default_cpu == -1)
1112 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1113
1114 return default_cpu;
1115 }
1116
1117 /*
1118 * When add_timer_on() enqueues a timer into the timer wheel of an
1119 * idle CPU then this timer might expire before the next timer event
1120 * which is scheduled to wake up that CPU. In case of a completely
1121 * idle system the next event might even be infinite time into the
1122 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1123 * leaves the inner idle loop so the newly added timer is taken into
1124 * account when the CPU goes back to idle and evaluates the timer
1125 * wheel for the next timer event.
1126 */
wake_up_idle_cpu(int cpu)1127 static void wake_up_idle_cpu(int cpu)
1128 {
1129 struct rq *rq = cpu_rq(cpu);
1130
1131 if (cpu == smp_processor_id())
1132 return;
1133
1134 if (set_nr_and_not_polling(rq->idle))
1135 smp_send_reschedule(cpu);
1136 else
1137 trace_sched_wake_idle_without_ipi(cpu);
1138 }
1139
wake_up_full_nohz_cpu(int cpu)1140 static bool wake_up_full_nohz_cpu(int cpu)
1141 {
1142 /*
1143 * We just need the target to call irq_exit() and re-evaluate
1144 * the next tick. The nohz full kick at least implies that.
1145 * If needed we can still optimize that later with an
1146 * empty IRQ.
1147 */
1148 if (cpu_is_offline(cpu))
1149 return true; /* Don't try to wake offline CPUs. */
1150 if (tick_nohz_full_cpu(cpu)) {
1151 if (cpu != smp_processor_id() ||
1152 tick_nohz_tick_stopped())
1153 tick_nohz_full_kick_cpu(cpu);
1154 return true;
1155 }
1156
1157 return false;
1158 }
1159
1160 /*
1161 * Wake up the specified CPU. If the CPU is going offline, it is the
1162 * caller's responsibility to deal with the lost wakeup, for example,
1163 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1164 */
wake_up_nohz_cpu(int cpu)1165 void wake_up_nohz_cpu(int cpu)
1166 {
1167 if (!wake_up_full_nohz_cpu(cpu))
1168 wake_up_idle_cpu(cpu);
1169 }
1170
nohz_csd_func(void * info)1171 static void nohz_csd_func(void *info)
1172 {
1173 struct rq *rq = info;
1174 int cpu = cpu_of(rq);
1175 unsigned int flags;
1176
1177 /*
1178 * Release the rq::nohz_csd.
1179 */
1180 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1181 WARN_ON(!(flags & NOHZ_KICK_MASK));
1182
1183 rq->idle_balance = idle_cpu(cpu);
1184 if (rq->idle_balance) {
1185 rq->nohz_idle_balance = flags;
1186 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1187 }
1188 }
1189
1190 #endif /* CONFIG_NO_HZ_COMMON */
1191
1192 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1193 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1194 {
1195 if (rq->nr_running != 1)
1196 return false;
1197
1198 if (p->sched_class != &fair_sched_class)
1199 return false;
1200
1201 if (!task_on_rq_queued(p))
1202 return false;
1203
1204 return true;
1205 }
1206
sched_can_stop_tick(struct rq * rq)1207 bool sched_can_stop_tick(struct rq *rq)
1208 {
1209 int fifo_nr_running;
1210
1211 /* Deadline tasks, even if single, need the tick */
1212 if (rq->dl.dl_nr_running)
1213 return false;
1214
1215 /*
1216 * If there are more than one RR tasks, we need the tick to affect the
1217 * actual RR behaviour.
1218 */
1219 if (rq->rt.rr_nr_running) {
1220 if (rq->rt.rr_nr_running == 1)
1221 return true;
1222 else
1223 return false;
1224 }
1225
1226 /*
1227 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1228 * forced preemption between FIFO tasks.
1229 */
1230 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1231 if (fifo_nr_running)
1232 return true;
1233
1234 /*
1235 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
1236 * if there's more than one we need the tick for involuntary
1237 * preemption.
1238 */
1239 if (rq->nr_running > 1)
1240 return false;
1241
1242 /*
1243 * If there is one task and it has CFS runtime bandwidth constraints
1244 * and it's on the cpu now we don't want to stop the tick.
1245 * This check prevents clearing the bit if a newly enqueued task here is
1246 * dequeued by migrating while the constrained task continues to run.
1247 * E.g. going from 2->1 without going through pick_next_task().
1248 */
1249 if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
1250 if (cfs_task_bw_constrained(rq->curr))
1251 return false;
1252 }
1253
1254 return true;
1255 }
1256 #endif /* CONFIG_NO_HZ_FULL */
1257 #endif /* CONFIG_SMP */
1258
1259 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1260 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1261 /*
1262 * Iterate task_group tree rooted at *from, calling @down when first entering a
1263 * node and @up when leaving it for the final time.
1264 *
1265 * Caller must hold rcu_lock or sufficient equivalent.
1266 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1267 int walk_tg_tree_from(struct task_group *from,
1268 tg_visitor down, tg_visitor up, void *data)
1269 {
1270 struct task_group *parent, *child;
1271 int ret;
1272
1273 parent = from;
1274
1275 down:
1276 ret = (*down)(parent, data);
1277 if (ret)
1278 goto out;
1279 list_for_each_entry_rcu(child, &parent->children, siblings) {
1280 parent = child;
1281 goto down;
1282
1283 up:
1284 continue;
1285 }
1286 ret = (*up)(parent, data);
1287 if (ret || parent == from)
1288 goto out;
1289
1290 child = parent;
1291 parent = parent->parent;
1292 if (parent)
1293 goto up;
1294 out:
1295 return ret;
1296 }
1297
tg_nop(struct task_group * tg,void * data)1298 int tg_nop(struct task_group *tg, void *data)
1299 {
1300 return 0;
1301 }
1302 #endif
1303
set_load_weight(struct task_struct * p,bool update_load)1304 static void set_load_weight(struct task_struct *p, bool update_load)
1305 {
1306 int prio = p->static_prio - MAX_RT_PRIO;
1307 struct load_weight lw;
1308
1309 if (task_has_idle_policy(p)) {
1310 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1311 lw.inv_weight = WMULT_IDLEPRIO;
1312 } else {
1313 lw.weight = scale_load(sched_prio_to_weight[prio]);
1314 lw.inv_weight = sched_prio_to_wmult[prio];
1315 }
1316
1317 /*
1318 * SCHED_OTHER tasks have to update their load when changing their
1319 * weight
1320 */
1321 if (update_load && p->sched_class == &fair_sched_class)
1322 reweight_task(p, &lw);
1323 else
1324 p->se.load = lw;
1325 }
1326
1327 #ifdef CONFIG_UCLAMP_TASK
1328 /*
1329 * Serializes updates of utilization clamp values
1330 *
1331 * The (slow-path) user-space triggers utilization clamp value updates which
1332 * can require updates on (fast-path) scheduler's data structures used to
1333 * support enqueue/dequeue operations.
1334 * While the per-CPU rq lock protects fast-path update operations, user-space
1335 * requests are serialized using a mutex to reduce the risk of conflicting
1336 * updates or API abuses.
1337 */
1338 static DEFINE_MUTEX(uclamp_mutex);
1339
1340 /* Max allowed minimum utilization */
1341 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1342
1343 /* Max allowed maximum utilization */
1344 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1345
1346 /*
1347 * By default RT tasks run at the maximum performance point/capacity of the
1348 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1349 * SCHED_CAPACITY_SCALE.
1350 *
1351 * This knob allows admins to change the default behavior when uclamp is being
1352 * used. In battery powered devices, particularly, running at the maximum
1353 * capacity and frequency will increase energy consumption and shorten the
1354 * battery life.
1355 *
1356 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1357 *
1358 * This knob will not override the system default sched_util_clamp_min defined
1359 * above.
1360 */
1361 static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1362
1363 /* All clamps are required to be less or equal than these values */
1364 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1365
1366 /*
1367 * This static key is used to reduce the uclamp overhead in the fast path. It
1368 * primarily disables the call to uclamp_rq_{inc, dec}() in
1369 * enqueue/dequeue_task().
1370 *
1371 * This allows users to continue to enable uclamp in their kernel config with
1372 * minimum uclamp overhead in the fast path.
1373 *
1374 * As soon as userspace modifies any of the uclamp knobs, the static key is
1375 * enabled, since we have an actual users that make use of uclamp
1376 * functionality.
1377 *
1378 * The knobs that would enable this static key are:
1379 *
1380 * * A task modifying its uclamp value with sched_setattr().
1381 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1382 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1383 */
1384 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1385
1386 /* Integer rounded range for each bucket */
1387 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
1388
1389 #define for_each_clamp_id(clamp_id) \
1390 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
1391
uclamp_bucket_id(unsigned int clamp_value)1392 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
1393 {
1394 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
1395 }
1396
uclamp_none(enum uclamp_id clamp_id)1397 static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
1398 {
1399 if (clamp_id == UCLAMP_MIN)
1400 return 0;
1401 return SCHED_CAPACITY_SCALE;
1402 }
1403
uclamp_se_set(struct uclamp_se * uc_se,unsigned int value,bool user_defined)1404 static inline void uclamp_se_set(struct uclamp_se *uc_se,
1405 unsigned int value, bool user_defined)
1406 {
1407 uc_se->value = value;
1408 uc_se->bucket_id = uclamp_bucket_id(value);
1409 uc_se->user_defined = user_defined;
1410 }
1411
1412 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1413 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1414 unsigned int clamp_value)
1415 {
1416 /*
1417 * Avoid blocked utilization pushing up the frequency when we go
1418 * idle (which drops the max-clamp) by retaining the last known
1419 * max-clamp.
1420 */
1421 if (clamp_id == UCLAMP_MAX) {
1422 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1423 return clamp_value;
1424 }
1425
1426 return uclamp_none(UCLAMP_MIN);
1427 }
1428
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1429 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1430 unsigned int clamp_value)
1431 {
1432 /* Reset max-clamp retention only on idle exit */
1433 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1434 return;
1435
1436 uclamp_rq_set(rq, clamp_id, clamp_value);
1437 }
1438
1439 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1440 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1441 unsigned int clamp_value)
1442 {
1443 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1444 int bucket_id = UCLAMP_BUCKETS - 1;
1445
1446 /*
1447 * Since both min and max clamps are max aggregated, find the
1448 * top most bucket with tasks in.
1449 */
1450 for ( ; bucket_id >= 0; bucket_id--) {
1451 if (!bucket[bucket_id].tasks)
1452 continue;
1453 return bucket[bucket_id].value;
1454 }
1455
1456 /* No tasks -- default clamp values */
1457 return uclamp_idle_value(rq, clamp_id, clamp_value);
1458 }
1459
__uclamp_update_util_min_rt_default(struct task_struct * p)1460 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1461 {
1462 unsigned int default_util_min;
1463 struct uclamp_se *uc_se;
1464
1465 lockdep_assert_held(&p->pi_lock);
1466
1467 uc_se = &p->uclamp_req[UCLAMP_MIN];
1468
1469 /* Only sync if user didn't override the default */
1470 if (uc_se->user_defined)
1471 return;
1472
1473 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1474 uclamp_se_set(uc_se, default_util_min, false);
1475 }
1476
uclamp_update_util_min_rt_default(struct task_struct * p)1477 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1478 {
1479 struct rq_flags rf;
1480 struct rq *rq;
1481
1482 if (!rt_task(p))
1483 return;
1484
1485 /* Protect updates to p->uclamp_* */
1486 rq = task_rq_lock(p, &rf);
1487 __uclamp_update_util_min_rt_default(p);
1488 task_rq_unlock(rq, p, &rf);
1489 }
1490
1491 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1492 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1493 {
1494 /* Copy by value as we could modify it */
1495 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1496 #ifdef CONFIG_UCLAMP_TASK_GROUP
1497 unsigned int tg_min, tg_max, value;
1498
1499 /*
1500 * Tasks in autogroups or root task group will be
1501 * restricted by system defaults.
1502 */
1503 if (task_group_is_autogroup(task_group(p)))
1504 return uc_req;
1505 if (task_group(p) == &root_task_group)
1506 return uc_req;
1507
1508 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1509 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1510 value = uc_req.value;
1511 value = clamp(value, tg_min, tg_max);
1512 uclamp_se_set(&uc_req, value, false);
1513 #endif
1514
1515 return uc_req;
1516 }
1517
1518 /*
1519 * The effective clamp bucket index of a task depends on, by increasing
1520 * priority:
1521 * - the task specific clamp value, when explicitly requested from userspace
1522 * - the task group effective clamp value, for tasks not either in the root
1523 * group or in an autogroup
1524 * - the system default clamp value, defined by the sysadmin
1525 */
1526 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1527 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1528 {
1529 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1530 struct uclamp_se uc_max = uclamp_default[clamp_id];
1531
1532 /* System default restrictions always apply */
1533 if (unlikely(uc_req.value > uc_max.value))
1534 return uc_max;
1535
1536 return uc_req;
1537 }
1538
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1539 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1540 {
1541 struct uclamp_se uc_eff;
1542
1543 /* Task currently refcounted: use back-annotated (effective) value */
1544 if (p->uclamp[clamp_id].active)
1545 return (unsigned long)p->uclamp[clamp_id].value;
1546
1547 uc_eff = uclamp_eff_get(p, clamp_id);
1548
1549 return (unsigned long)uc_eff.value;
1550 }
1551
1552 /*
1553 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1554 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1555 * updates the rq's clamp value if required.
1556 *
1557 * Tasks can have a task-specific value requested from user-space, track
1558 * within each bucket the maximum value for tasks refcounted in it.
1559 * This "local max aggregation" allows to track the exact "requested" value
1560 * for each bucket when all its RUNNABLE tasks require the same clamp.
1561 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1562 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1563 enum uclamp_id clamp_id)
1564 {
1565 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1566 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1567 struct uclamp_bucket *bucket;
1568
1569 lockdep_assert_rq_held(rq);
1570
1571 /* Update task effective clamp */
1572 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1573
1574 bucket = &uc_rq->bucket[uc_se->bucket_id];
1575 bucket->tasks++;
1576 uc_se->active = true;
1577
1578 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1579
1580 /*
1581 * Local max aggregation: rq buckets always track the max
1582 * "requested" clamp value of its RUNNABLE tasks.
1583 */
1584 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1585 bucket->value = uc_se->value;
1586
1587 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1588 uclamp_rq_set(rq, clamp_id, uc_se->value);
1589 }
1590
1591 /*
1592 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1593 * is released. If this is the last task reference counting the rq's max
1594 * active clamp value, then the rq's clamp value is updated.
1595 *
1596 * Both refcounted tasks and rq's cached clamp values are expected to be
1597 * always valid. If it's detected they are not, as defensive programming,
1598 * enforce the expected state and warn.
1599 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1600 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1601 enum uclamp_id clamp_id)
1602 {
1603 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1604 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1605 struct uclamp_bucket *bucket;
1606 unsigned int bkt_clamp;
1607 unsigned int rq_clamp;
1608
1609 lockdep_assert_rq_held(rq);
1610
1611 /*
1612 * If sched_uclamp_used was enabled after task @p was enqueued,
1613 * we could end up with unbalanced call to uclamp_rq_dec_id().
1614 *
1615 * In this case the uc_se->active flag should be false since no uclamp
1616 * accounting was performed at enqueue time and we can just return
1617 * here.
1618 *
1619 * Need to be careful of the following enqueue/dequeue ordering
1620 * problem too
1621 *
1622 * enqueue(taskA)
1623 * // sched_uclamp_used gets enabled
1624 * enqueue(taskB)
1625 * dequeue(taskA)
1626 * // Must not decrement bucket->tasks here
1627 * dequeue(taskB)
1628 *
1629 * where we could end up with stale data in uc_se and
1630 * bucket[uc_se->bucket_id].
1631 *
1632 * The following check here eliminates the possibility of such race.
1633 */
1634 if (unlikely(!uc_se->active))
1635 return;
1636
1637 bucket = &uc_rq->bucket[uc_se->bucket_id];
1638
1639 SCHED_WARN_ON(!bucket->tasks);
1640 if (likely(bucket->tasks))
1641 bucket->tasks--;
1642
1643 uc_se->active = false;
1644
1645 /*
1646 * Keep "local max aggregation" simple and accept to (possibly)
1647 * overboost some RUNNABLE tasks in the same bucket.
1648 * The rq clamp bucket value is reset to its base value whenever
1649 * there are no more RUNNABLE tasks refcounting it.
1650 */
1651 if (likely(bucket->tasks))
1652 return;
1653
1654 rq_clamp = uclamp_rq_get(rq, clamp_id);
1655 /*
1656 * Defensive programming: this should never happen. If it happens,
1657 * e.g. due to future modification, warn and fixup the expected value.
1658 */
1659 SCHED_WARN_ON(bucket->value > rq_clamp);
1660 if (bucket->value >= rq_clamp) {
1661 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1662 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1663 }
1664 }
1665
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1666 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1667 {
1668 enum uclamp_id clamp_id;
1669
1670 /*
1671 * Avoid any overhead until uclamp is actually used by the userspace.
1672 *
1673 * The condition is constructed such that a NOP is generated when
1674 * sched_uclamp_used is disabled.
1675 */
1676 if (!static_branch_unlikely(&sched_uclamp_used))
1677 return;
1678
1679 if (unlikely(!p->sched_class->uclamp_enabled))
1680 return;
1681
1682 for_each_clamp_id(clamp_id)
1683 uclamp_rq_inc_id(rq, p, clamp_id);
1684
1685 /* Reset clamp idle holding when there is one RUNNABLE task */
1686 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1687 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1688 }
1689
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1690 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1691 {
1692 enum uclamp_id clamp_id;
1693
1694 /*
1695 * Avoid any overhead until uclamp is actually used by the userspace.
1696 *
1697 * The condition is constructed such that a NOP is generated when
1698 * sched_uclamp_used is disabled.
1699 */
1700 if (!static_branch_unlikely(&sched_uclamp_used))
1701 return;
1702
1703 if (unlikely(!p->sched_class->uclamp_enabled))
1704 return;
1705
1706 for_each_clamp_id(clamp_id)
1707 uclamp_rq_dec_id(rq, p, clamp_id);
1708 }
1709
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1710 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1711 enum uclamp_id clamp_id)
1712 {
1713 if (!p->uclamp[clamp_id].active)
1714 return;
1715
1716 uclamp_rq_dec_id(rq, p, clamp_id);
1717 uclamp_rq_inc_id(rq, p, clamp_id);
1718
1719 /*
1720 * Make sure to clear the idle flag if we've transiently reached 0
1721 * active tasks on rq.
1722 */
1723 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1724 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1725 }
1726
1727 static inline void
uclamp_update_active(struct task_struct * p)1728 uclamp_update_active(struct task_struct *p)
1729 {
1730 enum uclamp_id clamp_id;
1731 struct rq_flags rf;
1732 struct rq *rq;
1733
1734 /*
1735 * Lock the task and the rq where the task is (or was) queued.
1736 *
1737 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1738 * price to pay to safely serialize util_{min,max} updates with
1739 * enqueues, dequeues and migration operations.
1740 * This is the same locking schema used by __set_cpus_allowed_ptr().
1741 */
1742 rq = task_rq_lock(p, &rf);
1743
1744 /*
1745 * Setting the clamp bucket is serialized by task_rq_lock().
1746 * If the task is not yet RUNNABLE and its task_struct is not
1747 * affecting a valid clamp bucket, the next time it's enqueued,
1748 * it will already see the updated clamp bucket value.
1749 */
1750 for_each_clamp_id(clamp_id)
1751 uclamp_rq_reinc_id(rq, p, clamp_id);
1752
1753 task_rq_unlock(rq, p, &rf);
1754 }
1755
1756 #ifdef CONFIG_UCLAMP_TASK_GROUP
1757 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1758 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1759 {
1760 struct css_task_iter it;
1761 struct task_struct *p;
1762
1763 css_task_iter_start(css, 0, &it);
1764 while ((p = css_task_iter_next(&it)))
1765 uclamp_update_active(p);
1766 css_task_iter_end(&it);
1767 }
1768
1769 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1770 #endif
1771
1772 #ifdef CONFIG_SYSCTL
1773 #ifdef CONFIG_UCLAMP_TASK
1774 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1775 static void uclamp_update_root_tg(void)
1776 {
1777 struct task_group *tg = &root_task_group;
1778
1779 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1780 sysctl_sched_uclamp_util_min, false);
1781 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1782 sysctl_sched_uclamp_util_max, false);
1783
1784 rcu_read_lock();
1785 cpu_util_update_eff(&root_task_group.css);
1786 rcu_read_unlock();
1787 }
1788 #else
uclamp_update_root_tg(void)1789 static void uclamp_update_root_tg(void) { }
1790 #endif
1791
uclamp_sync_util_min_rt_default(void)1792 static void uclamp_sync_util_min_rt_default(void)
1793 {
1794 struct task_struct *g, *p;
1795
1796 /*
1797 * copy_process() sysctl_uclamp
1798 * uclamp_min_rt = X;
1799 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1800 * // link thread smp_mb__after_spinlock()
1801 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1802 * sched_post_fork() for_each_process_thread()
1803 * __uclamp_sync_rt() __uclamp_sync_rt()
1804 *
1805 * Ensures that either sched_post_fork() will observe the new
1806 * uclamp_min_rt or for_each_process_thread() will observe the new
1807 * task.
1808 */
1809 read_lock(&tasklist_lock);
1810 smp_mb__after_spinlock();
1811 read_unlock(&tasklist_lock);
1812
1813 rcu_read_lock();
1814 for_each_process_thread(g, p)
1815 uclamp_update_util_min_rt_default(p);
1816 rcu_read_unlock();
1817 }
1818
sysctl_sched_uclamp_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1819 static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1820 void *buffer, size_t *lenp, loff_t *ppos)
1821 {
1822 bool update_root_tg = false;
1823 int old_min, old_max, old_min_rt;
1824 int result;
1825
1826 guard(mutex)(&uclamp_mutex);
1827
1828 old_min = sysctl_sched_uclamp_util_min;
1829 old_max = sysctl_sched_uclamp_util_max;
1830 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1831
1832 result = proc_dointvec(table, write, buffer, lenp, ppos);
1833 if (result)
1834 goto undo;
1835 if (!write)
1836 return 0;
1837
1838 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1839 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1840 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1841
1842 result = -EINVAL;
1843 goto undo;
1844 }
1845
1846 if (old_min != sysctl_sched_uclamp_util_min) {
1847 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1848 sysctl_sched_uclamp_util_min, false);
1849 update_root_tg = true;
1850 }
1851 if (old_max != sysctl_sched_uclamp_util_max) {
1852 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1853 sysctl_sched_uclamp_util_max, false);
1854 update_root_tg = true;
1855 }
1856
1857 if (update_root_tg) {
1858 static_branch_enable(&sched_uclamp_used);
1859 uclamp_update_root_tg();
1860 }
1861
1862 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1863 static_branch_enable(&sched_uclamp_used);
1864 uclamp_sync_util_min_rt_default();
1865 }
1866
1867 /*
1868 * We update all RUNNABLE tasks only when task groups are in use.
1869 * Otherwise, keep it simple and do just a lazy update at each next
1870 * task enqueue time.
1871 */
1872 return 0;
1873
1874 undo:
1875 sysctl_sched_uclamp_util_min = old_min;
1876 sysctl_sched_uclamp_util_max = old_max;
1877 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1878 return result;
1879 }
1880 #endif
1881 #endif
1882
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)1883 static int uclamp_validate(struct task_struct *p,
1884 const struct sched_attr *attr)
1885 {
1886 int util_min = p->uclamp_req[UCLAMP_MIN].value;
1887 int util_max = p->uclamp_req[UCLAMP_MAX].value;
1888
1889 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1890 util_min = attr->sched_util_min;
1891
1892 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1893 return -EINVAL;
1894 }
1895
1896 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1897 util_max = attr->sched_util_max;
1898
1899 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1900 return -EINVAL;
1901 }
1902
1903 if (util_min != -1 && util_max != -1 && util_min > util_max)
1904 return -EINVAL;
1905
1906 /*
1907 * We have valid uclamp attributes; make sure uclamp is enabled.
1908 *
1909 * We need to do that here, because enabling static branches is a
1910 * blocking operation which obviously cannot be done while holding
1911 * scheduler locks.
1912 */
1913 static_branch_enable(&sched_uclamp_used);
1914
1915 return 0;
1916 }
1917
uclamp_reset(const struct sched_attr * attr,enum uclamp_id clamp_id,struct uclamp_se * uc_se)1918 static bool uclamp_reset(const struct sched_attr *attr,
1919 enum uclamp_id clamp_id,
1920 struct uclamp_se *uc_se)
1921 {
1922 /* Reset on sched class change for a non user-defined clamp value. */
1923 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1924 !uc_se->user_defined)
1925 return true;
1926
1927 /* Reset on sched_util_{min,max} == -1. */
1928 if (clamp_id == UCLAMP_MIN &&
1929 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1930 attr->sched_util_min == -1) {
1931 return true;
1932 }
1933
1934 if (clamp_id == UCLAMP_MAX &&
1935 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1936 attr->sched_util_max == -1) {
1937 return true;
1938 }
1939
1940 return false;
1941 }
1942
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)1943 static void __setscheduler_uclamp(struct task_struct *p,
1944 const struct sched_attr *attr)
1945 {
1946 enum uclamp_id clamp_id;
1947
1948 for_each_clamp_id(clamp_id) {
1949 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1950 unsigned int value;
1951
1952 if (!uclamp_reset(attr, clamp_id, uc_se))
1953 continue;
1954
1955 /*
1956 * RT by default have a 100% boost value that could be modified
1957 * at runtime.
1958 */
1959 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1960 value = sysctl_sched_uclamp_util_min_rt_default;
1961 else
1962 value = uclamp_none(clamp_id);
1963
1964 uclamp_se_set(uc_se, value, false);
1965
1966 }
1967
1968 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1969 return;
1970
1971 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1972 attr->sched_util_min != -1) {
1973 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1974 attr->sched_util_min, true);
1975 }
1976
1977 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1978 attr->sched_util_max != -1) {
1979 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1980 attr->sched_util_max, true);
1981 }
1982 }
1983
uclamp_fork(struct task_struct * p)1984 static void uclamp_fork(struct task_struct *p)
1985 {
1986 enum uclamp_id clamp_id;
1987
1988 /*
1989 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1990 * as the task is still at its early fork stages.
1991 */
1992 for_each_clamp_id(clamp_id)
1993 p->uclamp[clamp_id].active = false;
1994
1995 if (likely(!p->sched_reset_on_fork))
1996 return;
1997
1998 for_each_clamp_id(clamp_id) {
1999 uclamp_se_set(&p->uclamp_req[clamp_id],
2000 uclamp_none(clamp_id), false);
2001 }
2002 }
2003
uclamp_post_fork(struct task_struct * p)2004 static void uclamp_post_fork(struct task_struct *p)
2005 {
2006 uclamp_update_util_min_rt_default(p);
2007 }
2008
init_uclamp_rq(struct rq * rq)2009 static void __init init_uclamp_rq(struct rq *rq)
2010 {
2011 enum uclamp_id clamp_id;
2012 struct uclamp_rq *uc_rq = rq->uclamp;
2013
2014 for_each_clamp_id(clamp_id) {
2015 uc_rq[clamp_id] = (struct uclamp_rq) {
2016 .value = uclamp_none(clamp_id)
2017 };
2018 }
2019
2020 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2021 }
2022
init_uclamp(void)2023 static void __init init_uclamp(void)
2024 {
2025 struct uclamp_se uc_max = {};
2026 enum uclamp_id clamp_id;
2027 int cpu;
2028
2029 for_each_possible_cpu(cpu)
2030 init_uclamp_rq(cpu_rq(cpu));
2031
2032 for_each_clamp_id(clamp_id) {
2033 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2034 uclamp_none(clamp_id), false);
2035 }
2036
2037 /* System defaults allow max clamp values for both indexes */
2038 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2039 for_each_clamp_id(clamp_id) {
2040 uclamp_default[clamp_id] = uc_max;
2041 #ifdef CONFIG_UCLAMP_TASK_GROUP
2042 root_task_group.uclamp_req[clamp_id] = uc_max;
2043 root_task_group.uclamp[clamp_id] = uc_max;
2044 #endif
2045 }
2046 }
2047
2048 #else /* CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)2049 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2050 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)2051 static inline int uclamp_validate(struct task_struct *p,
2052 const struct sched_attr *attr)
2053 {
2054 return -EOPNOTSUPP;
2055 }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)2056 static void __setscheduler_uclamp(struct task_struct *p,
2057 const struct sched_attr *attr) { }
uclamp_fork(struct task_struct * p)2058 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2059 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2060 static inline void init_uclamp(void) { }
2061 #endif /* CONFIG_UCLAMP_TASK */
2062
sched_task_on_rq(struct task_struct * p)2063 bool sched_task_on_rq(struct task_struct *p)
2064 {
2065 return task_on_rq_queued(p);
2066 }
2067
get_wchan(struct task_struct * p)2068 unsigned long get_wchan(struct task_struct *p)
2069 {
2070 unsigned long ip = 0;
2071 unsigned int state;
2072
2073 if (!p || p == current)
2074 return 0;
2075
2076 /* Only get wchan if task is blocked and we can keep it that way. */
2077 raw_spin_lock_irq(&p->pi_lock);
2078 state = READ_ONCE(p->__state);
2079 smp_rmb(); /* see try_to_wake_up() */
2080 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2081 ip = __get_wchan(p);
2082 raw_spin_unlock_irq(&p->pi_lock);
2083
2084 return ip;
2085 }
2086
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2087 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2088 {
2089 if (!(flags & ENQUEUE_NOCLOCK))
2090 update_rq_clock(rq);
2091
2092 if (!(flags & ENQUEUE_RESTORE)) {
2093 sched_info_enqueue(rq, p);
2094 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
2095 }
2096
2097 uclamp_rq_inc(rq, p);
2098 p->sched_class->enqueue_task(rq, p, flags);
2099
2100 if (sched_core_enabled(rq))
2101 sched_core_enqueue(rq, p);
2102 }
2103
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2104 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2105 {
2106 if (sched_core_enabled(rq))
2107 sched_core_dequeue(rq, p, flags);
2108
2109 if (!(flags & DEQUEUE_NOCLOCK))
2110 update_rq_clock(rq);
2111
2112 if (!(flags & DEQUEUE_SAVE)) {
2113 sched_info_dequeue(rq, p);
2114 psi_dequeue(p, flags & DEQUEUE_SLEEP);
2115 }
2116
2117 uclamp_rq_dec(rq, p);
2118 p->sched_class->dequeue_task(rq, p, flags);
2119 }
2120
activate_task(struct rq * rq,struct task_struct * p,int flags)2121 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2122 {
2123 if (task_on_rq_migrating(p))
2124 flags |= ENQUEUE_MIGRATED;
2125 if (flags & ENQUEUE_MIGRATED)
2126 sched_mm_cid_migrate_to(rq, p);
2127
2128 enqueue_task(rq, p, flags);
2129
2130 p->on_rq = TASK_ON_RQ_QUEUED;
2131 }
2132
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2133 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2134 {
2135 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
2136
2137 dequeue_task(rq, p, flags);
2138 }
2139
__normal_prio(int policy,int rt_prio,int nice)2140 static inline int __normal_prio(int policy, int rt_prio, int nice)
2141 {
2142 int prio;
2143
2144 if (dl_policy(policy))
2145 prio = MAX_DL_PRIO - 1;
2146 else if (rt_policy(policy))
2147 prio = MAX_RT_PRIO - 1 - rt_prio;
2148 else
2149 prio = NICE_TO_PRIO(nice);
2150
2151 return prio;
2152 }
2153
2154 /*
2155 * Calculate the expected normal priority: i.e. priority
2156 * without taking RT-inheritance into account. Might be
2157 * boosted by interactivity modifiers. Changes upon fork,
2158 * setprio syscalls, and whenever the interactivity
2159 * estimator recalculates.
2160 */
normal_prio(struct task_struct * p)2161 static inline int normal_prio(struct task_struct *p)
2162 {
2163 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
2164 }
2165
2166 /*
2167 * Calculate the current priority, i.e. the priority
2168 * taken into account by the scheduler. This value might
2169 * be boosted by RT tasks, or might be boosted by
2170 * interactivity modifiers. Will be RT if the task got
2171 * RT-boosted. If not then it returns p->normal_prio.
2172 */
effective_prio(struct task_struct * p)2173 static int effective_prio(struct task_struct *p)
2174 {
2175 p->normal_prio = normal_prio(p);
2176 /*
2177 * If we are RT tasks or we were boosted to RT priority,
2178 * keep the priority unchanged. Otherwise, update priority
2179 * to the normal priority:
2180 */
2181 if (!rt_prio(p->prio))
2182 return p->normal_prio;
2183 return p->prio;
2184 }
2185
2186 /**
2187 * task_curr - is this task currently executing on a CPU?
2188 * @p: the task in question.
2189 *
2190 * Return: 1 if the task is currently executing. 0 otherwise.
2191 */
task_curr(const struct task_struct * p)2192 inline int task_curr(const struct task_struct *p)
2193 {
2194 return cpu_curr(task_cpu(p)) == p;
2195 }
2196
2197 /*
2198 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2199 * use the balance_callback list if you want balancing.
2200 *
2201 * this means any call to check_class_changed() must be followed by a call to
2202 * balance_callback().
2203 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2204 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2205 const struct sched_class *prev_class,
2206 int oldprio)
2207 {
2208 if (prev_class != p->sched_class) {
2209 if (prev_class->switched_from)
2210 prev_class->switched_from(rq, p);
2211
2212 p->sched_class->switched_to(rq, p);
2213 } else if (oldprio != p->prio || dl_task(p))
2214 p->sched_class->prio_changed(rq, p, oldprio);
2215 }
2216
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2217 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2218 {
2219 if (p->sched_class == rq->curr->sched_class)
2220 rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2221 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2222 resched_curr(rq);
2223
2224 /*
2225 * A queue event has occurred, and we're going to schedule. In
2226 * this case, we can save a useless back to back clock update.
2227 */
2228 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2229 rq_clock_skip_update(rq);
2230 }
2231
2232 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2233 int __task_state_match(struct task_struct *p, unsigned int state)
2234 {
2235 if (READ_ONCE(p->__state) & state)
2236 return 1;
2237
2238 #ifdef CONFIG_PREEMPT_RT
2239 if (READ_ONCE(p->saved_state) & state)
2240 return -1;
2241 #endif
2242 return 0;
2243 }
2244
2245 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2246 int task_state_match(struct task_struct *p, unsigned int state)
2247 {
2248 #ifdef CONFIG_PREEMPT_RT
2249 int match;
2250
2251 /*
2252 * Serialize against current_save_and_set_rtlock_wait_state() and
2253 * current_restore_rtlock_saved_state().
2254 */
2255 raw_spin_lock_irq(&p->pi_lock);
2256 match = __task_state_match(p, state);
2257 raw_spin_unlock_irq(&p->pi_lock);
2258
2259 return match;
2260 #else
2261 return __task_state_match(p, state);
2262 #endif
2263 }
2264
2265 /*
2266 * wait_task_inactive - wait for a thread to unschedule.
2267 *
2268 * Wait for the thread to block in any of the states set in @match_state.
2269 * If it changes, i.e. @p might have woken up, then return zero. When we
2270 * succeed in waiting for @p to be off its CPU, we return a positive number
2271 * (its total switch count). If a second call a short while later returns the
2272 * same number, the caller can be sure that @p has remained unscheduled the
2273 * whole time.
2274 *
2275 * The caller must ensure that the task *will* unschedule sometime soon,
2276 * else this function might spin for a *long* time. This function can't
2277 * be called with interrupts off, or it may introduce deadlock with
2278 * smp_call_function() if an IPI is sent by the same process we are
2279 * waiting to become inactive.
2280 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2281 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2282 {
2283 int running, queued, match;
2284 struct rq_flags rf;
2285 unsigned long ncsw;
2286 struct rq *rq;
2287
2288 for (;;) {
2289 /*
2290 * We do the initial early heuristics without holding
2291 * any task-queue locks at all. We'll only try to get
2292 * the runqueue lock when things look like they will
2293 * work out!
2294 */
2295 rq = task_rq(p);
2296
2297 /*
2298 * If the task is actively running on another CPU
2299 * still, just relax and busy-wait without holding
2300 * any locks.
2301 *
2302 * NOTE! Since we don't hold any locks, it's not
2303 * even sure that "rq" stays as the right runqueue!
2304 * But we don't care, since "task_on_cpu()" will
2305 * return false if the runqueue has changed and p
2306 * is actually now running somewhere else!
2307 */
2308 while (task_on_cpu(rq, p)) {
2309 if (!task_state_match(p, match_state))
2310 return 0;
2311 cpu_relax();
2312 }
2313
2314 /*
2315 * Ok, time to look more closely! We need the rq
2316 * lock now, to be *sure*. If we're wrong, we'll
2317 * just go back and repeat.
2318 */
2319 rq = task_rq_lock(p, &rf);
2320 trace_sched_wait_task(p);
2321 running = task_on_cpu(rq, p);
2322 queued = task_on_rq_queued(p);
2323 ncsw = 0;
2324 if ((match = __task_state_match(p, match_state))) {
2325 /*
2326 * When matching on p->saved_state, consider this task
2327 * still queued so it will wait.
2328 */
2329 if (match < 0)
2330 queued = 1;
2331 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2332 }
2333 task_rq_unlock(rq, p, &rf);
2334
2335 /*
2336 * If it changed from the expected state, bail out now.
2337 */
2338 if (unlikely(!ncsw))
2339 break;
2340
2341 /*
2342 * Was it really running after all now that we
2343 * checked with the proper locks actually held?
2344 *
2345 * Oops. Go back and try again..
2346 */
2347 if (unlikely(running)) {
2348 cpu_relax();
2349 continue;
2350 }
2351
2352 /*
2353 * It's not enough that it's not actively running,
2354 * it must be off the runqueue _entirely_, and not
2355 * preempted!
2356 *
2357 * So if it was still runnable (but just not actively
2358 * running right now), it's preempted, and we should
2359 * yield - it could be a while.
2360 */
2361 if (unlikely(queued)) {
2362 ktime_t to = NSEC_PER_SEC / HZ;
2363
2364 set_current_state(TASK_UNINTERRUPTIBLE);
2365 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2366 continue;
2367 }
2368
2369 /*
2370 * Ahh, all good. It wasn't running, and it wasn't
2371 * runnable, which means that it will never become
2372 * running in the future either. We're all done!
2373 */
2374 break;
2375 }
2376
2377 return ncsw;
2378 }
2379
2380 #ifdef CONFIG_SMP
2381
2382 static void
2383 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2384
2385 static int __set_cpus_allowed_ptr(struct task_struct *p,
2386 struct affinity_context *ctx);
2387
migrate_disable_switch(struct rq * rq,struct task_struct * p)2388 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2389 {
2390 struct affinity_context ac = {
2391 .new_mask = cpumask_of(rq->cpu),
2392 .flags = SCA_MIGRATE_DISABLE,
2393 };
2394
2395 if (likely(!p->migration_disabled))
2396 return;
2397
2398 if (p->cpus_ptr != &p->cpus_mask)
2399 return;
2400
2401 /*
2402 * Violates locking rules! see comment in __do_set_cpus_allowed().
2403 */
2404 __do_set_cpus_allowed(p, &ac);
2405 }
2406
migrate_disable(void)2407 void migrate_disable(void)
2408 {
2409 struct task_struct *p = current;
2410
2411 if (p->migration_disabled) {
2412 p->migration_disabled++;
2413 return;
2414 }
2415
2416 preempt_disable();
2417 this_rq()->nr_pinned++;
2418 p->migration_disabled = 1;
2419 preempt_enable();
2420 }
2421 EXPORT_SYMBOL_GPL(migrate_disable);
2422
migrate_enable(void)2423 void migrate_enable(void)
2424 {
2425 struct task_struct *p = current;
2426 struct affinity_context ac = {
2427 .new_mask = &p->cpus_mask,
2428 .flags = SCA_MIGRATE_ENABLE,
2429 };
2430
2431 if (p->migration_disabled > 1) {
2432 p->migration_disabled--;
2433 return;
2434 }
2435
2436 if (WARN_ON_ONCE(!p->migration_disabled))
2437 return;
2438
2439 /*
2440 * Ensure stop_task runs either before or after this, and that
2441 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2442 */
2443 preempt_disable();
2444 if (p->cpus_ptr != &p->cpus_mask)
2445 __set_cpus_allowed_ptr(p, &ac);
2446 /*
2447 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2448 * regular cpus_mask, otherwise things that race (eg.
2449 * select_fallback_rq) get confused.
2450 */
2451 barrier();
2452 p->migration_disabled = 0;
2453 this_rq()->nr_pinned--;
2454 preempt_enable();
2455 }
2456 EXPORT_SYMBOL_GPL(migrate_enable);
2457
rq_has_pinned_tasks(struct rq * rq)2458 static inline bool rq_has_pinned_tasks(struct rq *rq)
2459 {
2460 return rq->nr_pinned;
2461 }
2462
2463 /*
2464 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2465 * __set_cpus_allowed_ptr() and select_fallback_rq().
2466 */
is_cpu_allowed(struct task_struct * p,int cpu)2467 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2468 {
2469 /* When not in the task's cpumask, no point in looking further. */
2470 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2471 return false;
2472
2473 /* migrate_disabled() must be allowed to finish. */
2474 if (is_migration_disabled(p))
2475 return cpu_online(cpu);
2476
2477 /* Non kernel threads are not allowed during either online or offline. */
2478 if (!(p->flags & PF_KTHREAD))
2479 return cpu_active(cpu) && task_cpu_possible(cpu, p);
2480
2481 /* KTHREAD_IS_PER_CPU is always allowed. */
2482 if (kthread_is_per_cpu(p))
2483 return cpu_online(cpu);
2484
2485 /* Regular kernel threads don't get to stay during offline. */
2486 if (cpu_dying(cpu))
2487 return false;
2488
2489 /* But are allowed during online. */
2490 return cpu_online(cpu);
2491 }
2492
2493 /*
2494 * This is how migration works:
2495 *
2496 * 1) we invoke migration_cpu_stop() on the target CPU using
2497 * stop_one_cpu().
2498 * 2) stopper starts to run (implicitly forcing the migrated thread
2499 * off the CPU)
2500 * 3) it checks whether the migrated task is still in the wrong runqueue.
2501 * 4) if it's in the wrong runqueue then the migration thread removes
2502 * it and puts it into the right queue.
2503 * 5) stopper completes and stop_one_cpu() returns and the migration
2504 * is done.
2505 */
2506
2507 /*
2508 * move_queued_task - move a queued task to new rq.
2509 *
2510 * Returns (locked) new rq. Old rq's lock is released.
2511 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2512 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2513 struct task_struct *p, int new_cpu)
2514 {
2515 lockdep_assert_rq_held(rq);
2516
2517 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2518 set_task_cpu(p, new_cpu);
2519 rq_unlock(rq, rf);
2520
2521 rq = cpu_rq(new_cpu);
2522
2523 rq_lock(rq, rf);
2524 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2525 activate_task(rq, p, 0);
2526 wakeup_preempt(rq, p, 0);
2527
2528 return rq;
2529 }
2530
2531 struct migration_arg {
2532 struct task_struct *task;
2533 int dest_cpu;
2534 struct set_affinity_pending *pending;
2535 };
2536
2537 /*
2538 * @refs: number of wait_for_completion()
2539 * @stop_pending: is @stop_work in use
2540 */
2541 struct set_affinity_pending {
2542 refcount_t refs;
2543 unsigned int stop_pending;
2544 struct completion done;
2545 struct cpu_stop_work stop_work;
2546 struct migration_arg arg;
2547 };
2548
2549 /*
2550 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2551 * this because either it can't run here any more (set_cpus_allowed()
2552 * away from this CPU, or CPU going down), or because we're
2553 * attempting to rebalance this task on exec (sched_exec).
2554 *
2555 * So we race with normal scheduler movements, but that's OK, as long
2556 * as the task is no longer on this CPU.
2557 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2558 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2559 struct task_struct *p, int dest_cpu)
2560 {
2561 /* Affinity changed (again). */
2562 if (!is_cpu_allowed(p, dest_cpu))
2563 return rq;
2564
2565 rq = move_queued_task(rq, rf, p, dest_cpu);
2566
2567 return rq;
2568 }
2569
2570 /*
2571 * migration_cpu_stop - this will be executed by a highprio stopper thread
2572 * and performs thread migration by bumping thread off CPU then
2573 * 'pushing' onto another runqueue.
2574 */
migration_cpu_stop(void * data)2575 static int migration_cpu_stop(void *data)
2576 {
2577 struct migration_arg *arg = data;
2578 struct set_affinity_pending *pending = arg->pending;
2579 struct task_struct *p = arg->task;
2580 struct rq *rq = this_rq();
2581 bool complete = false;
2582 struct rq_flags rf;
2583
2584 /*
2585 * The original target CPU might have gone down and we might
2586 * be on another CPU but it doesn't matter.
2587 */
2588 local_irq_save(rf.flags);
2589 /*
2590 * We need to explicitly wake pending tasks before running
2591 * __migrate_task() such that we will not miss enforcing cpus_ptr
2592 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2593 */
2594 flush_smp_call_function_queue();
2595
2596 raw_spin_lock(&p->pi_lock);
2597 rq_lock(rq, &rf);
2598
2599 /*
2600 * If we were passed a pending, then ->stop_pending was set, thus
2601 * p->migration_pending must have remained stable.
2602 */
2603 WARN_ON_ONCE(pending && pending != p->migration_pending);
2604
2605 /*
2606 * If task_rq(p) != rq, it cannot be migrated here, because we're
2607 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2608 * we're holding p->pi_lock.
2609 */
2610 if (task_rq(p) == rq) {
2611 if (is_migration_disabled(p))
2612 goto out;
2613
2614 if (pending) {
2615 p->migration_pending = NULL;
2616 complete = true;
2617
2618 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2619 goto out;
2620 }
2621
2622 if (task_on_rq_queued(p)) {
2623 update_rq_clock(rq);
2624 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2625 } else {
2626 p->wake_cpu = arg->dest_cpu;
2627 }
2628
2629 /*
2630 * XXX __migrate_task() can fail, at which point we might end
2631 * up running on a dodgy CPU, AFAICT this can only happen
2632 * during CPU hotplug, at which point we'll get pushed out
2633 * anyway, so it's probably not a big deal.
2634 */
2635
2636 } else if (pending) {
2637 /*
2638 * This happens when we get migrated between migrate_enable()'s
2639 * preempt_enable() and scheduling the stopper task. At that
2640 * point we're a regular task again and not current anymore.
2641 *
2642 * A !PREEMPT kernel has a giant hole here, which makes it far
2643 * more likely.
2644 */
2645
2646 /*
2647 * The task moved before the stopper got to run. We're holding
2648 * ->pi_lock, so the allowed mask is stable - if it got
2649 * somewhere allowed, we're done.
2650 */
2651 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2652 p->migration_pending = NULL;
2653 complete = true;
2654 goto out;
2655 }
2656
2657 /*
2658 * When migrate_enable() hits a rq mis-match we can't reliably
2659 * determine is_migration_disabled() and so have to chase after
2660 * it.
2661 */
2662 WARN_ON_ONCE(!pending->stop_pending);
2663 preempt_disable();
2664 task_rq_unlock(rq, p, &rf);
2665 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2666 &pending->arg, &pending->stop_work);
2667 preempt_enable();
2668 return 0;
2669 }
2670 out:
2671 if (pending)
2672 pending->stop_pending = false;
2673 task_rq_unlock(rq, p, &rf);
2674
2675 if (complete)
2676 complete_all(&pending->done);
2677
2678 return 0;
2679 }
2680
push_cpu_stop(void * arg)2681 int push_cpu_stop(void *arg)
2682 {
2683 struct rq *lowest_rq = NULL, *rq = this_rq();
2684 struct task_struct *p = arg;
2685
2686 raw_spin_lock_irq(&p->pi_lock);
2687 raw_spin_rq_lock(rq);
2688
2689 if (task_rq(p) != rq)
2690 goto out_unlock;
2691
2692 if (is_migration_disabled(p)) {
2693 p->migration_flags |= MDF_PUSH;
2694 goto out_unlock;
2695 }
2696
2697 p->migration_flags &= ~MDF_PUSH;
2698
2699 if (p->sched_class->find_lock_rq)
2700 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2701
2702 if (!lowest_rq)
2703 goto out_unlock;
2704
2705 // XXX validate p is still the highest prio task
2706 if (task_rq(p) == rq) {
2707 deactivate_task(rq, p, 0);
2708 set_task_cpu(p, lowest_rq->cpu);
2709 activate_task(lowest_rq, p, 0);
2710 resched_curr(lowest_rq);
2711 }
2712
2713 double_unlock_balance(rq, lowest_rq);
2714
2715 out_unlock:
2716 rq->push_busy = false;
2717 raw_spin_rq_unlock(rq);
2718 raw_spin_unlock_irq(&p->pi_lock);
2719
2720 put_task_struct(p);
2721 return 0;
2722 }
2723
2724 /*
2725 * sched_class::set_cpus_allowed must do the below, but is not required to
2726 * actually call this function.
2727 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2728 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2729 {
2730 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2731 p->cpus_ptr = ctx->new_mask;
2732 return;
2733 }
2734
2735 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2736 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2737
2738 /*
2739 * Swap in a new user_cpus_ptr if SCA_USER flag set
2740 */
2741 if (ctx->flags & SCA_USER)
2742 swap(p->user_cpus_ptr, ctx->user_mask);
2743 }
2744
2745 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2746 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2747 {
2748 struct rq *rq = task_rq(p);
2749 bool queued, running;
2750
2751 /*
2752 * This here violates the locking rules for affinity, since we're only
2753 * supposed to change these variables while holding both rq->lock and
2754 * p->pi_lock.
2755 *
2756 * HOWEVER, it magically works, because ttwu() is the only code that
2757 * accesses these variables under p->pi_lock and only does so after
2758 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2759 * before finish_task().
2760 *
2761 * XXX do further audits, this smells like something putrid.
2762 */
2763 if (ctx->flags & SCA_MIGRATE_DISABLE)
2764 SCHED_WARN_ON(!p->on_cpu);
2765 else
2766 lockdep_assert_held(&p->pi_lock);
2767
2768 queued = task_on_rq_queued(p);
2769 running = task_current(rq, p);
2770
2771 if (queued) {
2772 /*
2773 * Because __kthread_bind() calls this on blocked tasks without
2774 * holding rq->lock.
2775 */
2776 lockdep_assert_rq_held(rq);
2777 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2778 }
2779 if (running)
2780 put_prev_task(rq, p);
2781
2782 p->sched_class->set_cpus_allowed(p, ctx);
2783
2784 if (queued)
2785 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2786 if (running)
2787 set_next_task(rq, p);
2788 }
2789
2790 /*
2791 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2792 * affinity (if any) should be destroyed too.
2793 */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2794 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2795 {
2796 struct affinity_context ac = {
2797 .new_mask = new_mask,
2798 .user_mask = NULL,
2799 .flags = SCA_USER, /* clear the user requested mask */
2800 };
2801 union cpumask_rcuhead {
2802 cpumask_t cpumask;
2803 struct rcu_head rcu;
2804 };
2805
2806 __do_set_cpus_allowed(p, &ac);
2807
2808 /*
2809 * Because this is called with p->pi_lock held, it is not possible
2810 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2811 * kfree_rcu().
2812 */
2813 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2814 }
2815
alloc_user_cpus_ptr(int node)2816 static cpumask_t *alloc_user_cpus_ptr(int node)
2817 {
2818 /*
2819 * See do_set_cpus_allowed() above for the rcu_head usage.
2820 */
2821 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
2822
2823 return kmalloc_node(size, GFP_KERNEL, node);
2824 }
2825
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2826 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2827 int node)
2828 {
2829 cpumask_t *user_mask;
2830 unsigned long flags;
2831
2832 /*
2833 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2834 * may differ by now due to racing.
2835 */
2836 dst->user_cpus_ptr = NULL;
2837
2838 /*
2839 * This check is racy and losing the race is a valid situation.
2840 * It is not worth the extra overhead of taking the pi_lock on
2841 * every fork/clone.
2842 */
2843 if (data_race(!src->user_cpus_ptr))
2844 return 0;
2845
2846 user_mask = alloc_user_cpus_ptr(node);
2847 if (!user_mask)
2848 return -ENOMEM;
2849
2850 /*
2851 * Use pi_lock to protect content of user_cpus_ptr
2852 *
2853 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2854 * do_set_cpus_allowed().
2855 */
2856 raw_spin_lock_irqsave(&src->pi_lock, flags);
2857 if (src->user_cpus_ptr) {
2858 swap(dst->user_cpus_ptr, user_mask);
2859 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2860 }
2861 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2862
2863 if (unlikely(user_mask))
2864 kfree(user_mask);
2865
2866 return 0;
2867 }
2868
clear_user_cpus_ptr(struct task_struct * p)2869 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2870 {
2871 struct cpumask *user_mask = NULL;
2872
2873 swap(p->user_cpus_ptr, user_mask);
2874
2875 return user_mask;
2876 }
2877
release_user_cpus_ptr(struct task_struct * p)2878 void release_user_cpus_ptr(struct task_struct *p)
2879 {
2880 kfree(clear_user_cpus_ptr(p));
2881 }
2882
2883 /*
2884 * This function is wildly self concurrent; here be dragons.
2885 *
2886 *
2887 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2888 * designated task is enqueued on an allowed CPU. If that task is currently
2889 * running, we have to kick it out using the CPU stopper.
2890 *
2891 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2892 * Consider:
2893 *
2894 * Initial conditions: P0->cpus_mask = [0, 1]
2895 *
2896 * P0@CPU0 P1
2897 *
2898 * migrate_disable();
2899 * <preempted>
2900 * set_cpus_allowed_ptr(P0, [1]);
2901 *
2902 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2903 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2904 * This means we need the following scheme:
2905 *
2906 * P0@CPU0 P1
2907 *
2908 * migrate_disable();
2909 * <preempted>
2910 * set_cpus_allowed_ptr(P0, [1]);
2911 * <blocks>
2912 * <resumes>
2913 * migrate_enable();
2914 * __set_cpus_allowed_ptr();
2915 * <wakes local stopper>
2916 * `--> <woken on migration completion>
2917 *
2918 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2919 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2920 * task p are serialized by p->pi_lock, which we can leverage: the one that
2921 * should come into effect at the end of the Migrate-Disable region is the last
2922 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2923 * but we still need to properly signal those waiting tasks at the appropriate
2924 * moment.
2925 *
2926 * This is implemented using struct set_affinity_pending. The first
2927 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2928 * setup an instance of that struct and install it on the targeted task_struct.
2929 * Any and all further callers will reuse that instance. Those then wait for
2930 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2931 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2932 *
2933 *
2934 * (1) In the cases covered above. There is one more where the completion is
2935 * signaled within affine_move_task() itself: when a subsequent affinity request
2936 * occurs after the stopper bailed out due to the targeted task still being
2937 * Migrate-Disable. Consider:
2938 *
2939 * Initial conditions: P0->cpus_mask = [0, 1]
2940 *
2941 * CPU0 P1 P2
2942 * <P0>
2943 * migrate_disable();
2944 * <preempted>
2945 * set_cpus_allowed_ptr(P0, [1]);
2946 * <blocks>
2947 * <migration/0>
2948 * migration_cpu_stop()
2949 * is_migration_disabled()
2950 * <bails>
2951 * set_cpus_allowed_ptr(P0, [0, 1]);
2952 * <signal completion>
2953 * <awakes>
2954 *
2955 * Note that the above is safe vs a concurrent migrate_enable(), as any
2956 * pending affinity completion is preceded by an uninstallation of
2957 * p->migration_pending done with p->pi_lock held.
2958 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2959 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2960 int dest_cpu, unsigned int flags)
2961 __releases(rq->lock)
2962 __releases(p->pi_lock)
2963 {
2964 struct set_affinity_pending my_pending = { }, *pending = NULL;
2965 bool stop_pending, complete = false;
2966
2967 /* Can the task run on the task's current CPU? If so, we're done */
2968 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2969 struct task_struct *push_task = NULL;
2970
2971 if ((flags & SCA_MIGRATE_ENABLE) &&
2972 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2973 rq->push_busy = true;
2974 push_task = get_task_struct(p);
2975 }
2976
2977 /*
2978 * If there are pending waiters, but no pending stop_work,
2979 * then complete now.
2980 */
2981 pending = p->migration_pending;
2982 if (pending && !pending->stop_pending) {
2983 p->migration_pending = NULL;
2984 complete = true;
2985 }
2986
2987 preempt_disable();
2988 task_rq_unlock(rq, p, rf);
2989 if (push_task) {
2990 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2991 p, &rq->push_work);
2992 }
2993 preempt_enable();
2994
2995 if (complete)
2996 complete_all(&pending->done);
2997
2998 return 0;
2999 }
3000
3001 if (!(flags & SCA_MIGRATE_ENABLE)) {
3002 /* serialized by p->pi_lock */
3003 if (!p->migration_pending) {
3004 /* Install the request */
3005 refcount_set(&my_pending.refs, 1);
3006 init_completion(&my_pending.done);
3007 my_pending.arg = (struct migration_arg) {
3008 .task = p,
3009 .dest_cpu = dest_cpu,
3010 .pending = &my_pending,
3011 };
3012
3013 p->migration_pending = &my_pending;
3014 } else {
3015 pending = p->migration_pending;
3016 refcount_inc(&pending->refs);
3017 /*
3018 * Affinity has changed, but we've already installed a
3019 * pending. migration_cpu_stop() *must* see this, else
3020 * we risk a completion of the pending despite having a
3021 * task on a disallowed CPU.
3022 *
3023 * Serialized by p->pi_lock, so this is safe.
3024 */
3025 pending->arg.dest_cpu = dest_cpu;
3026 }
3027 }
3028 pending = p->migration_pending;
3029 /*
3030 * - !MIGRATE_ENABLE:
3031 * we'll have installed a pending if there wasn't one already.
3032 *
3033 * - MIGRATE_ENABLE:
3034 * we're here because the current CPU isn't matching anymore,
3035 * the only way that can happen is because of a concurrent
3036 * set_cpus_allowed_ptr() call, which should then still be
3037 * pending completion.
3038 *
3039 * Either way, we really should have a @pending here.
3040 */
3041 if (WARN_ON_ONCE(!pending)) {
3042 task_rq_unlock(rq, p, rf);
3043 return -EINVAL;
3044 }
3045
3046 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3047 /*
3048 * MIGRATE_ENABLE gets here because 'p == current', but for
3049 * anything else we cannot do is_migration_disabled(), punt
3050 * and have the stopper function handle it all race-free.
3051 */
3052 stop_pending = pending->stop_pending;
3053 if (!stop_pending)
3054 pending->stop_pending = true;
3055
3056 if (flags & SCA_MIGRATE_ENABLE)
3057 p->migration_flags &= ~MDF_PUSH;
3058
3059 preempt_disable();
3060 task_rq_unlock(rq, p, rf);
3061 if (!stop_pending) {
3062 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3063 &pending->arg, &pending->stop_work);
3064 }
3065 preempt_enable();
3066
3067 if (flags & SCA_MIGRATE_ENABLE)
3068 return 0;
3069 } else {
3070
3071 if (!is_migration_disabled(p)) {
3072 if (task_on_rq_queued(p))
3073 rq = move_queued_task(rq, rf, p, dest_cpu);
3074
3075 if (!pending->stop_pending) {
3076 p->migration_pending = NULL;
3077 complete = true;
3078 }
3079 }
3080 task_rq_unlock(rq, p, rf);
3081
3082 if (complete)
3083 complete_all(&pending->done);
3084 }
3085
3086 wait_for_completion(&pending->done);
3087
3088 if (refcount_dec_and_test(&pending->refs))
3089 wake_up_var(&pending->refs); /* No UaF, just an address */
3090
3091 /*
3092 * Block the original owner of &pending until all subsequent callers
3093 * have seen the completion and decremented the refcount
3094 */
3095 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3096
3097 /* ARGH */
3098 WARN_ON_ONCE(my_pending.stop_pending);
3099
3100 return 0;
3101 }
3102
3103 /*
3104 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3105 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3106 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3107 struct affinity_context *ctx,
3108 struct rq *rq,
3109 struct rq_flags *rf)
3110 __releases(rq->lock)
3111 __releases(p->pi_lock)
3112 {
3113 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3114 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3115 bool kthread = p->flags & PF_KTHREAD;
3116 unsigned int dest_cpu;
3117 int ret = 0;
3118
3119 update_rq_clock(rq);
3120
3121 if (kthread || is_migration_disabled(p)) {
3122 /*
3123 * Kernel threads are allowed on online && !active CPUs,
3124 * however, during cpu-hot-unplug, even these might get pushed
3125 * away if not KTHREAD_IS_PER_CPU.
3126 *
3127 * Specifically, migration_disabled() tasks must not fail the
3128 * cpumask_any_and_distribute() pick below, esp. so on
3129 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3130 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3131 */
3132 cpu_valid_mask = cpu_online_mask;
3133 }
3134
3135 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3136 ret = -EINVAL;
3137 goto out;
3138 }
3139
3140 /*
3141 * Must re-check here, to close a race against __kthread_bind(),
3142 * sched_setaffinity() is not guaranteed to observe the flag.
3143 */
3144 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3145 ret = -EINVAL;
3146 goto out;
3147 }
3148
3149 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3150 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3151 if (ctx->flags & SCA_USER)
3152 swap(p->user_cpus_ptr, ctx->user_mask);
3153 goto out;
3154 }
3155
3156 if (WARN_ON_ONCE(p == current &&
3157 is_migration_disabled(p) &&
3158 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3159 ret = -EBUSY;
3160 goto out;
3161 }
3162 }
3163
3164 /*
3165 * Picking a ~random cpu helps in cases where we are changing affinity
3166 * for groups of tasks (ie. cpuset), so that load balancing is not
3167 * immediately required to distribute the tasks within their new mask.
3168 */
3169 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3170 if (dest_cpu >= nr_cpu_ids) {
3171 ret = -EINVAL;
3172 goto out;
3173 }
3174
3175 __do_set_cpus_allowed(p, ctx);
3176
3177 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3178
3179 out:
3180 task_rq_unlock(rq, p, rf);
3181
3182 return ret;
3183 }
3184
3185 /*
3186 * Change a given task's CPU affinity. Migrate the thread to a
3187 * proper CPU and schedule it away if the CPU it's executing on
3188 * is removed from the allowed bitmask.
3189 *
3190 * NOTE: the caller must have a valid reference to the task, the
3191 * task must not exit() & deallocate itself prematurely. The
3192 * call is not atomic; no spinlocks may be held.
3193 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3194 static int __set_cpus_allowed_ptr(struct task_struct *p,
3195 struct affinity_context *ctx)
3196 {
3197 struct rq_flags rf;
3198 struct rq *rq;
3199
3200 rq = task_rq_lock(p, &rf);
3201 /*
3202 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3203 * flags are set.
3204 */
3205 if (p->user_cpus_ptr &&
3206 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3207 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3208 ctx->new_mask = rq->scratch_mask;
3209
3210 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3211 }
3212
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3213 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3214 {
3215 struct affinity_context ac = {
3216 .new_mask = new_mask,
3217 .flags = 0,
3218 };
3219
3220 return __set_cpus_allowed_ptr(p, &ac);
3221 }
3222 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3223
3224 /*
3225 * Change a given task's CPU affinity to the intersection of its current
3226 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3227 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3228 * affinity or use cpu_online_mask instead.
3229 *
3230 * If the resulting mask is empty, leave the affinity unchanged and return
3231 * -EINVAL.
3232 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3233 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3234 struct cpumask *new_mask,
3235 const struct cpumask *subset_mask)
3236 {
3237 struct affinity_context ac = {
3238 .new_mask = new_mask,
3239 .flags = 0,
3240 };
3241 struct rq_flags rf;
3242 struct rq *rq;
3243 int err;
3244
3245 rq = task_rq_lock(p, &rf);
3246
3247 /*
3248 * Forcefully restricting the affinity of a deadline task is
3249 * likely to cause problems, so fail and noisily override the
3250 * mask entirely.
3251 */
3252 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3253 err = -EPERM;
3254 goto err_unlock;
3255 }
3256
3257 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3258 err = -EINVAL;
3259 goto err_unlock;
3260 }
3261
3262 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3263
3264 err_unlock:
3265 task_rq_unlock(rq, p, &rf);
3266 return err;
3267 }
3268
3269 /*
3270 * Restrict the CPU affinity of task @p so that it is a subset of
3271 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3272 * old affinity mask. If the resulting mask is empty, we warn and walk
3273 * up the cpuset hierarchy until we find a suitable mask.
3274 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3275 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3276 {
3277 cpumask_var_t new_mask;
3278 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3279
3280 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3281
3282 /*
3283 * __migrate_task() can fail silently in the face of concurrent
3284 * offlining of the chosen destination CPU, so take the hotplug
3285 * lock to ensure that the migration succeeds.
3286 */
3287 cpus_read_lock();
3288 if (!cpumask_available(new_mask))
3289 goto out_set_mask;
3290
3291 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3292 goto out_free_mask;
3293
3294 /*
3295 * We failed to find a valid subset of the affinity mask for the
3296 * task, so override it based on its cpuset hierarchy.
3297 */
3298 cpuset_cpus_allowed(p, new_mask);
3299 override_mask = new_mask;
3300
3301 out_set_mask:
3302 if (printk_ratelimit()) {
3303 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3304 task_pid_nr(p), p->comm,
3305 cpumask_pr_args(override_mask));
3306 }
3307
3308 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3309 out_free_mask:
3310 cpus_read_unlock();
3311 free_cpumask_var(new_mask);
3312 }
3313
3314 static int
3315 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
3316
3317 /*
3318 * Restore the affinity of a task @p which was previously restricted by a
3319 * call to force_compatible_cpus_allowed_ptr().
3320 *
3321 * It is the caller's responsibility to serialise this with any calls to
3322 * force_compatible_cpus_allowed_ptr(@p).
3323 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3324 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3325 {
3326 struct affinity_context ac = {
3327 .new_mask = task_user_cpus(p),
3328 .flags = 0,
3329 };
3330 int ret;
3331
3332 /*
3333 * Try to restore the old affinity mask with __sched_setaffinity().
3334 * Cpuset masking will be done there too.
3335 */
3336 ret = __sched_setaffinity(p, &ac);
3337 WARN_ON_ONCE(ret);
3338 }
3339
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3340 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3341 {
3342 #ifdef CONFIG_SCHED_DEBUG
3343 unsigned int state = READ_ONCE(p->__state);
3344
3345 /*
3346 * We should never call set_task_cpu() on a blocked task,
3347 * ttwu() will sort out the placement.
3348 */
3349 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3350
3351 /*
3352 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3353 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3354 * time relying on p->on_rq.
3355 */
3356 WARN_ON_ONCE(state == TASK_RUNNING &&
3357 p->sched_class == &fair_sched_class &&
3358 (p->on_rq && !task_on_rq_migrating(p)));
3359
3360 #ifdef CONFIG_LOCKDEP
3361 /*
3362 * The caller should hold either p->pi_lock or rq->lock, when changing
3363 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3364 *
3365 * sched_move_task() holds both and thus holding either pins the cgroup,
3366 * see task_group().
3367 *
3368 * Furthermore, all task_rq users should acquire both locks, see
3369 * task_rq_lock().
3370 */
3371 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3372 lockdep_is_held(__rq_lockp(task_rq(p)))));
3373 #endif
3374 /*
3375 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3376 */
3377 WARN_ON_ONCE(!cpu_online(new_cpu));
3378
3379 WARN_ON_ONCE(is_migration_disabled(p));
3380 #endif
3381
3382 trace_sched_migrate_task(p, new_cpu);
3383
3384 if (task_cpu(p) != new_cpu) {
3385 if (p->sched_class->migrate_task_rq)
3386 p->sched_class->migrate_task_rq(p, new_cpu);
3387 p->se.nr_migrations++;
3388 rseq_migrate(p);
3389 sched_mm_cid_migrate_from(p);
3390 perf_event_task_migrate(p);
3391 }
3392
3393 __set_task_cpu(p, new_cpu);
3394 }
3395
3396 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3397 static void __migrate_swap_task(struct task_struct *p, int cpu)
3398 {
3399 if (task_on_rq_queued(p)) {
3400 struct rq *src_rq, *dst_rq;
3401 struct rq_flags srf, drf;
3402
3403 src_rq = task_rq(p);
3404 dst_rq = cpu_rq(cpu);
3405
3406 rq_pin_lock(src_rq, &srf);
3407 rq_pin_lock(dst_rq, &drf);
3408
3409 deactivate_task(src_rq, p, 0);
3410 set_task_cpu(p, cpu);
3411 activate_task(dst_rq, p, 0);
3412 wakeup_preempt(dst_rq, p, 0);
3413
3414 rq_unpin_lock(dst_rq, &drf);
3415 rq_unpin_lock(src_rq, &srf);
3416
3417 } else {
3418 /*
3419 * Task isn't running anymore; make it appear like we migrated
3420 * it before it went to sleep. This means on wakeup we make the
3421 * previous CPU our target instead of where it really is.
3422 */
3423 p->wake_cpu = cpu;
3424 }
3425 }
3426
3427 struct migration_swap_arg {
3428 struct task_struct *src_task, *dst_task;
3429 int src_cpu, dst_cpu;
3430 };
3431
migrate_swap_stop(void * data)3432 static int migrate_swap_stop(void *data)
3433 {
3434 struct migration_swap_arg *arg = data;
3435 struct rq *src_rq, *dst_rq;
3436
3437 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3438 return -EAGAIN;
3439
3440 src_rq = cpu_rq(arg->src_cpu);
3441 dst_rq = cpu_rq(arg->dst_cpu);
3442
3443 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3444 guard(double_rq_lock)(src_rq, dst_rq);
3445
3446 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3447 return -EAGAIN;
3448
3449 if (task_cpu(arg->src_task) != arg->src_cpu)
3450 return -EAGAIN;
3451
3452 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3453 return -EAGAIN;
3454
3455 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3456 return -EAGAIN;
3457
3458 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3459 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3460
3461 return 0;
3462 }
3463
3464 /*
3465 * Cross migrate two tasks
3466 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3467 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3468 int target_cpu, int curr_cpu)
3469 {
3470 struct migration_swap_arg arg;
3471 int ret = -EINVAL;
3472
3473 arg = (struct migration_swap_arg){
3474 .src_task = cur,
3475 .src_cpu = curr_cpu,
3476 .dst_task = p,
3477 .dst_cpu = target_cpu,
3478 };
3479
3480 if (arg.src_cpu == arg.dst_cpu)
3481 goto out;
3482
3483 /*
3484 * These three tests are all lockless; this is OK since all of them
3485 * will be re-checked with proper locks held further down the line.
3486 */
3487 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3488 goto out;
3489
3490 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3491 goto out;
3492
3493 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3494 goto out;
3495
3496 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3497 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3498
3499 out:
3500 return ret;
3501 }
3502 #endif /* CONFIG_NUMA_BALANCING */
3503
3504 /***
3505 * kick_process - kick a running thread to enter/exit the kernel
3506 * @p: the to-be-kicked thread
3507 *
3508 * Cause a process which is running on another CPU to enter
3509 * kernel-mode, without any delay. (to get signals handled.)
3510 *
3511 * NOTE: this function doesn't have to take the runqueue lock,
3512 * because all it wants to ensure is that the remote task enters
3513 * the kernel. If the IPI races and the task has been migrated
3514 * to another CPU then no harm is done and the purpose has been
3515 * achieved as well.
3516 */
kick_process(struct task_struct * p)3517 void kick_process(struct task_struct *p)
3518 {
3519 int cpu;
3520
3521 preempt_disable();
3522 cpu = task_cpu(p);
3523 if ((cpu != smp_processor_id()) && task_curr(p))
3524 smp_send_reschedule(cpu);
3525 preempt_enable();
3526 }
3527 EXPORT_SYMBOL_GPL(kick_process);
3528
3529 /*
3530 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3531 *
3532 * A few notes on cpu_active vs cpu_online:
3533 *
3534 * - cpu_active must be a subset of cpu_online
3535 *
3536 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3537 * see __set_cpus_allowed_ptr(). At this point the newly online
3538 * CPU isn't yet part of the sched domains, and balancing will not
3539 * see it.
3540 *
3541 * - on CPU-down we clear cpu_active() to mask the sched domains and
3542 * avoid the load balancer to place new tasks on the to be removed
3543 * CPU. Existing tasks will remain running there and will be taken
3544 * off.
3545 *
3546 * This means that fallback selection must not select !active CPUs.
3547 * And can assume that any active CPU must be online. Conversely
3548 * select_task_rq() below may allow selection of !active CPUs in order
3549 * to satisfy the above rules.
3550 */
select_fallback_rq(int cpu,struct task_struct * p)3551 static int select_fallback_rq(int cpu, struct task_struct *p)
3552 {
3553 int nid = cpu_to_node(cpu);
3554 const struct cpumask *nodemask = NULL;
3555 enum { cpuset, possible, fail } state = cpuset;
3556 int dest_cpu;
3557
3558 /*
3559 * If the node that the CPU is on has been offlined, cpu_to_node()
3560 * will return -1. There is no CPU on the node, and we should
3561 * select the CPU on the other node.
3562 */
3563 if (nid != -1) {
3564 nodemask = cpumask_of_node(nid);
3565
3566 /* Look for allowed, online CPU in same node. */
3567 for_each_cpu(dest_cpu, nodemask) {
3568 if (is_cpu_allowed(p, dest_cpu))
3569 return dest_cpu;
3570 }
3571 }
3572
3573 for (;;) {
3574 /* Any allowed, online CPU? */
3575 for_each_cpu(dest_cpu, p->cpus_ptr) {
3576 if (!is_cpu_allowed(p, dest_cpu))
3577 continue;
3578
3579 goto out;
3580 }
3581
3582 /* No more Mr. Nice Guy. */
3583 switch (state) {
3584 case cpuset:
3585 if (cpuset_cpus_allowed_fallback(p)) {
3586 state = possible;
3587 break;
3588 }
3589 fallthrough;
3590 case possible:
3591 /*
3592 * XXX When called from select_task_rq() we only
3593 * hold p->pi_lock and again violate locking order.
3594 *
3595 * More yuck to audit.
3596 */
3597 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3598 state = fail;
3599 break;
3600 case fail:
3601 BUG();
3602 break;
3603 }
3604 }
3605
3606 out:
3607 if (state != cpuset) {
3608 /*
3609 * Don't tell them about moving exiting tasks or
3610 * kernel threads (both mm NULL), since they never
3611 * leave kernel.
3612 */
3613 if (p->mm && printk_ratelimit()) {
3614 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3615 task_pid_nr(p), p->comm, cpu);
3616 }
3617 }
3618
3619 return dest_cpu;
3620 }
3621
3622 /*
3623 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3624 */
3625 static inline
select_task_rq(struct task_struct * p,int cpu,int wake_flags)3626 int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3627 {
3628 lockdep_assert_held(&p->pi_lock);
3629
3630 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3631 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3632 else
3633 cpu = cpumask_any(p->cpus_ptr);
3634
3635 /*
3636 * In order not to call set_task_cpu() on a blocking task we need
3637 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3638 * CPU.
3639 *
3640 * Since this is common to all placement strategies, this lives here.
3641 *
3642 * [ this allows ->select_task() to simply return task_cpu(p) and
3643 * not worry about this generic constraint ]
3644 */
3645 if (unlikely(!is_cpu_allowed(p, cpu)))
3646 cpu = select_fallback_rq(task_cpu(p), p);
3647
3648 return cpu;
3649 }
3650
sched_set_stop_task(int cpu,struct task_struct * stop)3651 void sched_set_stop_task(int cpu, struct task_struct *stop)
3652 {
3653 static struct lock_class_key stop_pi_lock;
3654 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3655 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3656
3657 if (stop) {
3658 /*
3659 * Make it appear like a SCHED_FIFO task, its something
3660 * userspace knows about and won't get confused about.
3661 *
3662 * Also, it will make PI more or less work without too
3663 * much confusion -- but then, stop work should not
3664 * rely on PI working anyway.
3665 */
3666 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3667
3668 stop->sched_class = &stop_sched_class;
3669
3670 /*
3671 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3672 * adjust the effective priority of a task. As a result,
3673 * rt_mutex_setprio() can trigger (RT) balancing operations,
3674 * which can then trigger wakeups of the stop thread to push
3675 * around the current task.
3676 *
3677 * The stop task itself will never be part of the PI-chain, it
3678 * never blocks, therefore that ->pi_lock recursion is safe.
3679 * Tell lockdep about this by placing the stop->pi_lock in its
3680 * own class.
3681 */
3682 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3683 }
3684
3685 cpu_rq(cpu)->stop = stop;
3686
3687 if (old_stop) {
3688 /*
3689 * Reset it back to a normal scheduling class so that
3690 * it can die in pieces.
3691 */
3692 old_stop->sched_class = &rt_sched_class;
3693 }
3694 }
3695
3696 #else /* CONFIG_SMP */
3697
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3698 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
3699 struct affinity_context *ctx)
3700 {
3701 return set_cpus_allowed_ptr(p, ctx->new_mask);
3702 }
3703
migrate_disable_switch(struct rq * rq,struct task_struct * p)3704 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3705
rq_has_pinned_tasks(struct rq * rq)3706 static inline bool rq_has_pinned_tasks(struct rq *rq)
3707 {
3708 return false;
3709 }
3710
alloc_user_cpus_ptr(int node)3711 static inline cpumask_t *alloc_user_cpus_ptr(int node)
3712 {
3713 return NULL;
3714 }
3715
3716 #endif /* !CONFIG_SMP */
3717
3718 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3719 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3720 {
3721 struct rq *rq;
3722
3723 if (!schedstat_enabled())
3724 return;
3725
3726 rq = this_rq();
3727
3728 #ifdef CONFIG_SMP
3729 if (cpu == rq->cpu) {
3730 __schedstat_inc(rq->ttwu_local);
3731 __schedstat_inc(p->stats.nr_wakeups_local);
3732 } else {
3733 struct sched_domain *sd;
3734
3735 __schedstat_inc(p->stats.nr_wakeups_remote);
3736
3737 guard(rcu)();
3738 for_each_domain(rq->cpu, sd) {
3739 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3740 __schedstat_inc(sd->ttwu_wake_remote);
3741 break;
3742 }
3743 }
3744 }
3745
3746 if (wake_flags & WF_MIGRATED)
3747 __schedstat_inc(p->stats.nr_wakeups_migrate);
3748 #endif /* CONFIG_SMP */
3749
3750 __schedstat_inc(rq->ttwu_count);
3751 __schedstat_inc(p->stats.nr_wakeups);
3752
3753 if (wake_flags & WF_SYNC)
3754 __schedstat_inc(p->stats.nr_wakeups_sync);
3755 }
3756
3757 /*
3758 * Mark the task runnable.
3759 */
ttwu_do_wakeup(struct task_struct * p)3760 static inline void ttwu_do_wakeup(struct task_struct *p)
3761 {
3762 WRITE_ONCE(p->__state, TASK_RUNNING);
3763 trace_sched_wakeup(p);
3764 }
3765
3766 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3767 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3768 struct rq_flags *rf)
3769 {
3770 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3771
3772 lockdep_assert_rq_held(rq);
3773
3774 if (p->sched_contributes_to_load)
3775 rq->nr_uninterruptible--;
3776
3777 #ifdef CONFIG_SMP
3778 if (wake_flags & WF_MIGRATED)
3779 en_flags |= ENQUEUE_MIGRATED;
3780 else
3781 #endif
3782 if (p->in_iowait) {
3783 delayacct_blkio_end(p);
3784 atomic_dec(&task_rq(p)->nr_iowait);
3785 }
3786
3787 activate_task(rq, p, en_flags);
3788 wakeup_preempt(rq, p, wake_flags);
3789
3790 ttwu_do_wakeup(p);
3791
3792 #ifdef CONFIG_SMP
3793 if (p->sched_class->task_woken) {
3794 /*
3795 * Our task @p is fully woken up and running; so it's safe to
3796 * drop the rq->lock, hereafter rq is only used for statistics.
3797 */
3798 rq_unpin_lock(rq, rf);
3799 p->sched_class->task_woken(rq, p);
3800 rq_repin_lock(rq, rf);
3801 }
3802
3803 if (rq->idle_stamp) {
3804 u64 delta = rq_clock(rq) - rq->idle_stamp;
3805 u64 max = 2*rq->max_idle_balance_cost;
3806
3807 update_avg(&rq->avg_idle, delta);
3808
3809 if (rq->avg_idle > max)
3810 rq->avg_idle = max;
3811
3812 rq->wake_stamp = jiffies;
3813 rq->wake_avg_idle = rq->avg_idle / 2;
3814
3815 rq->idle_stamp = 0;
3816 }
3817 #endif
3818 }
3819
3820 /*
3821 * Consider @p being inside a wait loop:
3822 *
3823 * for (;;) {
3824 * set_current_state(TASK_UNINTERRUPTIBLE);
3825 *
3826 * if (CONDITION)
3827 * break;
3828 *
3829 * schedule();
3830 * }
3831 * __set_current_state(TASK_RUNNING);
3832 *
3833 * between set_current_state() and schedule(). In this case @p is still
3834 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3835 * an atomic manner.
3836 *
3837 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3838 * then schedule() must still happen and p->state can be changed to
3839 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3840 * need to do a full wakeup with enqueue.
3841 *
3842 * Returns: %true when the wakeup is done,
3843 * %false otherwise.
3844 */
ttwu_runnable(struct task_struct * p,int wake_flags)3845 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3846 {
3847 struct rq_flags rf;
3848 struct rq *rq;
3849 int ret = 0;
3850
3851 rq = __task_rq_lock(p, &rf);
3852 if (task_on_rq_queued(p)) {
3853 if (!task_on_cpu(rq, p)) {
3854 /*
3855 * When on_rq && !on_cpu the task is preempted, see if
3856 * it should preempt the task that is current now.
3857 */
3858 update_rq_clock(rq);
3859 wakeup_preempt(rq, p, wake_flags);
3860 }
3861 ttwu_do_wakeup(p);
3862 ret = 1;
3863 }
3864 __task_rq_unlock(rq, &rf);
3865
3866 return ret;
3867 }
3868
3869 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3870 void sched_ttwu_pending(void *arg)
3871 {
3872 struct llist_node *llist = arg;
3873 struct rq *rq = this_rq();
3874 struct task_struct *p, *t;
3875 struct rq_flags rf;
3876
3877 if (!llist)
3878 return;
3879
3880 rq_lock_irqsave(rq, &rf);
3881 update_rq_clock(rq);
3882
3883 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3884 if (WARN_ON_ONCE(p->on_cpu))
3885 smp_cond_load_acquire(&p->on_cpu, !VAL);
3886
3887 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3888 set_task_cpu(p, cpu_of(rq));
3889
3890 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3891 }
3892
3893 /*
3894 * Must be after enqueueing at least once task such that
3895 * idle_cpu() does not observe a false-negative -- if it does,
3896 * it is possible for select_idle_siblings() to stack a number
3897 * of tasks on this CPU during that window.
3898 *
3899 * It is ok to clear ttwu_pending when another task pending.
3900 * We will receive IPI after local irq enabled and then enqueue it.
3901 * Since now nr_running > 0, idle_cpu() will always get correct result.
3902 */
3903 WRITE_ONCE(rq->ttwu_pending, 0);
3904 rq_unlock_irqrestore(rq, &rf);
3905 }
3906
3907 /*
3908 * Prepare the scene for sending an IPI for a remote smp_call
3909 *
3910 * Returns true if the caller can proceed with sending the IPI.
3911 * Returns false otherwise.
3912 */
call_function_single_prep_ipi(int cpu)3913 bool call_function_single_prep_ipi(int cpu)
3914 {
3915 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3916 trace_sched_wake_idle_without_ipi(cpu);
3917 return false;
3918 }
3919
3920 return true;
3921 }
3922
3923 /*
3924 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3925 * necessary. The wakee CPU on receipt of the IPI will queue the task
3926 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3927 * of the wakeup instead of the waker.
3928 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3929 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3930 {
3931 struct rq *rq = cpu_rq(cpu);
3932
3933 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3934
3935 WRITE_ONCE(rq->ttwu_pending, 1);
3936 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3937 }
3938
wake_up_if_idle(int cpu)3939 void wake_up_if_idle(int cpu)
3940 {
3941 struct rq *rq = cpu_rq(cpu);
3942
3943 guard(rcu)();
3944 if (is_idle_task(rcu_dereference(rq->curr))) {
3945 guard(rq_lock_irqsave)(rq);
3946 if (is_idle_task(rq->curr))
3947 resched_curr(rq);
3948 }
3949 }
3950
cpus_share_cache(int this_cpu,int that_cpu)3951 bool cpus_share_cache(int this_cpu, int that_cpu)
3952 {
3953 if (this_cpu == that_cpu)
3954 return true;
3955
3956 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3957 }
3958
ttwu_queue_cond(struct task_struct * p,int cpu)3959 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3960 {
3961 /*
3962 * Do not complicate things with the async wake_list while the CPU is
3963 * in hotplug state.
3964 */
3965 if (!cpu_active(cpu))
3966 return false;
3967
3968 /* Ensure the task will still be allowed to run on the CPU. */
3969 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3970 return false;
3971
3972 /*
3973 * If the CPU does not share cache, then queue the task on the
3974 * remote rqs wakelist to avoid accessing remote data.
3975 */
3976 if (!cpus_share_cache(smp_processor_id(), cpu))
3977 return true;
3978
3979 if (cpu == smp_processor_id())
3980 return false;
3981
3982 /*
3983 * If the wakee cpu is idle, or the task is descheduling and the
3984 * only running task on the CPU, then use the wakelist to offload
3985 * the task activation to the idle (or soon-to-be-idle) CPU as
3986 * the current CPU is likely busy. nr_running is checked to
3987 * avoid unnecessary task stacking.
3988 *
3989 * Note that we can only get here with (wakee) p->on_rq=0,
3990 * p->on_cpu can be whatever, we've done the dequeue, so
3991 * the wakee has been accounted out of ->nr_running.
3992 */
3993 if (!cpu_rq(cpu)->nr_running)
3994 return true;
3995
3996 return false;
3997 }
3998
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3999 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
4000 {
4001 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
4002 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
4003 __ttwu_queue_wakelist(p, cpu, wake_flags);
4004 return true;
4005 }
4006
4007 return false;
4008 }
4009
4010 #else /* !CONFIG_SMP */
4011
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)4012 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
4013 {
4014 return false;
4015 }
4016
4017 #endif /* CONFIG_SMP */
4018
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)4019 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
4020 {
4021 struct rq *rq = cpu_rq(cpu);
4022 struct rq_flags rf;
4023
4024 if (ttwu_queue_wakelist(p, cpu, wake_flags))
4025 return;
4026
4027 rq_lock(rq, &rf);
4028 update_rq_clock(rq);
4029 ttwu_do_activate(rq, p, wake_flags, &rf);
4030 rq_unlock(rq, &rf);
4031 }
4032
4033 /*
4034 * Invoked from try_to_wake_up() to check whether the task can be woken up.
4035 *
4036 * The caller holds p::pi_lock if p != current or has preemption
4037 * disabled when p == current.
4038 *
4039 * The rules of PREEMPT_RT saved_state:
4040 *
4041 * The related locking code always holds p::pi_lock when updating
4042 * p::saved_state, which means the code is fully serialized in both cases.
4043 *
4044 * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
4045 * bits set. This allows to distinguish all wakeup scenarios.
4046 */
4047 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)4048 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4049 {
4050 int match;
4051
4052 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4053 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4054 state != TASK_RTLOCK_WAIT);
4055 }
4056
4057 *success = !!(match = __task_state_match(p, state));
4058
4059 #ifdef CONFIG_PREEMPT_RT
4060 /*
4061 * Saved state preserves the task state across blocking on
4062 * an RT lock. If the state matches, set p::saved_state to
4063 * TASK_RUNNING, but do not wake the task because it waits
4064 * for a lock wakeup. Also indicate success because from
4065 * the regular waker's point of view this has succeeded.
4066 *
4067 * After acquiring the lock the task will restore p::__state
4068 * from p::saved_state which ensures that the regular
4069 * wakeup is not lost. The restore will also set
4070 * p::saved_state to TASK_RUNNING so any further tests will
4071 * not result in false positives vs. @success
4072 */
4073 if (match < 0)
4074 p->saved_state = TASK_RUNNING;
4075 #endif
4076 return match > 0;
4077 }
4078
4079 /*
4080 * Notes on Program-Order guarantees on SMP systems.
4081 *
4082 * MIGRATION
4083 *
4084 * The basic program-order guarantee on SMP systems is that when a task [t]
4085 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4086 * execution on its new CPU [c1].
4087 *
4088 * For migration (of runnable tasks) this is provided by the following means:
4089 *
4090 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4091 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4092 * rq(c1)->lock (if not at the same time, then in that order).
4093 * C) LOCK of the rq(c1)->lock scheduling in task
4094 *
4095 * Release/acquire chaining guarantees that B happens after A and C after B.
4096 * Note: the CPU doing B need not be c0 or c1
4097 *
4098 * Example:
4099 *
4100 * CPU0 CPU1 CPU2
4101 *
4102 * LOCK rq(0)->lock
4103 * sched-out X
4104 * sched-in Y
4105 * UNLOCK rq(0)->lock
4106 *
4107 * LOCK rq(0)->lock // orders against CPU0
4108 * dequeue X
4109 * UNLOCK rq(0)->lock
4110 *
4111 * LOCK rq(1)->lock
4112 * enqueue X
4113 * UNLOCK rq(1)->lock
4114 *
4115 * LOCK rq(1)->lock // orders against CPU2
4116 * sched-out Z
4117 * sched-in X
4118 * UNLOCK rq(1)->lock
4119 *
4120 *
4121 * BLOCKING -- aka. SLEEP + WAKEUP
4122 *
4123 * For blocking we (obviously) need to provide the same guarantee as for
4124 * migration. However the means are completely different as there is no lock
4125 * chain to provide order. Instead we do:
4126 *
4127 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4128 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4129 *
4130 * Example:
4131 *
4132 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4133 *
4134 * LOCK rq(0)->lock LOCK X->pi_lock
4135 * dequeue X
4136 * sched-out X
4137 * smp_store_release(X->on_cpu, 0);
4138 *
4139 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4140 * X->state = WAKING
4141 * set_task_cpu(X,2)
4142 *
4143 * LOCK rq(2)->lock
4144 * enqueue X
4145 * X->state = RUNNING
4146 * UNLOCK rq(2)->lock
4147 *
4148 * LOCK rq(2)->lock // orders against CPU1
4149 * sched-out Z
4150 * sched-in X
4151 * UNLOCK rq(2)->lock
4152 *
4153 * UNLOCK X->pi_lock
4154 * UNLOCK rq(0)->lock
4155 *
4156 *
4157 * However, for wakeups there is a second guarantee we must provide, namely we
4158 * must ensure that CONDITION=1 done by the caller can not be reordered with
4159 * accesses to the task state; see try_to_wake_up() and set_current_state().
4160 */
4161
4162 /**
4163 * try_to_wake_up - wake up a thread
4164 * @p: the thread to be awakened
4165 * @state: the mask of task states that can be woken
4166 * @wake_flags: wake modifier flags (WF_*)
4167 *
4168 * Conceptually does:
4169 *
4170 * If (@state & @p->state) @p->state = TASK_RUNNING.
4171 *
4172 * If the task was not queued/runnable, also place it back on a runqueue.
4173 *
4174 * This function is atomic against schedule() which would dequeue the task.
4175 *
4176 * It issues a full memory barrier before accessing @p->state, see the comment
4177 * with set_current_state().
4178 *
4179 * Uses p->pi_lock to serialize against concurrent wake-ups.
4180 *
4181 * Relies on p->pi_lock stabilizing:
4182 * - p->sched_class
4183 * - p->cpus_ptr
4184 * - p->sched_task_group
4185 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4186 *
4187 * Tries really hard to only take one task_rq(p)->lock for performance.
4188 * Takes rq->lock in:
4189 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4190 * - ttwu_queue() -- new rq, for enqueue of the task;
4191 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4192 *
4193 * As a consequence we race really badly with just about everything. See the
4194 * many memory barriers and their comments for details.
4195 *
4196 * Return: %true if @p->state changes (an actual wakeup was done),
4197 * %false otherwise.
4198 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4199 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4200 {
4201 guard(preempt)();
4202 int cpu, success = 0;
4203
4204 if (p == current) {
4205 /*
4206 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4207 * == smp_processor_id()'. Together this means we can special
4208 * case the whole 'p->on_rq && ttwu_runnable()' case below
4209 * without taking any locks.
4210 *
4211 * In particular:
4212 * - we rely on Program-Order guarantees for all the ordering,
4213 * - we're serialized against set_special_state() by virtue of
4214 * it disabling IRQs (this allows not taking ->pi_lock).
4215 */
4216 if (!ttwu_state_match(p, state, &success))
4217 goto out;
4218
4219 trace_sched_waking(p);
4220 ttwu_do_wakeup(p);
4221 goto out;
4222 }
4223
4224 /*
4225 * If we are going to wake up a thread waiting for CONDITION we
4226 * need to ensure that CONDITION=1 done by the caller can not be
4227 * reordered with p->state check below. This pairs with smp_store_mb()
4228 * in set_current_state() that the waiting thread does.
4229 */
4230 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4231 smp_mb__after_spinlock();
4232 if (!ttwu_state_match(p, state, &success))
4233 break;
4234
4235 trace_sched_waking(p);
4236
4237 /*
4238 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4239 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4240 * in smp_cond_load_acquire() below.
4241 *
4242 * sched_ttwu_pending() try_to_wake_up()
4243 * STORE p->on_rq = 1 LOAD p->state
4244 * UNLOCK rq->lock
4245 *
4246 * __schedule() (switch to task 'p')
4247 * LOCK rq->lock smp_rmb();
4248 * smp_mb__after_spinlock();
4249 * UNLOCK rq->lock
4250 *
4251 * [task p]
4252 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4253 *
4254 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4255 * __schedule(). See the comment for smp_mb__after_spinlock().
4256 *
4257 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
4258 */
4259 smp_rmb();
4260 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4261 break;
4262
4263 #ifdef CONFIG_SMP
4264 /*
4265 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4266 * possible to, falsely, observe p->on_cpu == 0.
4267 *
4268 * One must be running (->on_cpu == 1) in order to remove oneself
4269 * from the runqueue.
4270 *
4271 * __schedule() (switch to task 'p') try_to_wake_up()
4272 * STORE p->on_cpu = 1 LOAD p->on_rq
4273 * UNLOCK rq->lock
4274 *
4275 * __schedule() (put 'p' to sleep)
4276 * LOCK rq->lock smp_rmb();
4277 * smp_mb__after_spinlock();
4278 * STORE p->on_rq = 0 LOAD p->on_cpu
4279 *
4280 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4281 * __schedule(). See the comment for smp_mb__after_spinlock().
4282 *
4283 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4284 * schedule()'s deactivate_task() has 'happened' and p will no longer
4285 * care about it's own p->state. See the comment in __schedule().
4286 */
4287 smp_acquire__after_ctrl_dep();
4288
4289 /*
4290 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4291 * == 0), which means we need to do an enqueue, change p->state to
4292 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4293 * enqueue, such as ttwu_queue_wakelist().
4294 */
4295 WRITE_ONCE(p->__state, TASK_WAKING);
4296
4297 /*
4298 * If the owning (remote) CPU is still in the middle of schedule() with
4299 * this task as prev, considering queueing p on the remote CPUs wake_list
4300 * which potentially sends an IPI instead of spinning on p->on_cpu to
4301 * let the waker make forward progress. This is safe because IRQs are
4302 * disabled and the IPI will deliver after on_cpu is cleared.
4303 *
4304 * Ensure we load task_cpu(p) after p->on_cpu:
4305 *
4306 * set_task_cpu(p, cpu);
4307 * STORE p->cpu = @cpu
4308 * __schedule() (switch to task 'p')
4309 * LOCK rq->lock
4310 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4311 * STORE p->on_cpu = 1 LOAD p->cpu
4312 *
4313 * to ensure we observe the correct CPU on which the task is currently
4314 * scheduling.
4315 */
4316 if (smp_load_acquire(&p->on_cpu) &&
4317 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4318 break;
4319
4320 /*
4321 * If the owning (remote) CPU is still in the middle of schedule() with
4322 * this task as prev, wait until it's done referencing the task.
4323 *
4324 * Pairs with the smp_store_release() in finish_task().
4325 *
4326 * This ensures that tasks getting woken will be fully ordered against
4327 * their previous state and preserve Program Order.
4328 */
4329 smp_cond_load_acquire(&p->on_cpu, !VAL);
4330
4331 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4332 if (task_cpu(p) != cpu) {
4333 if (p->in_iowait) {
4334 delayacct_blkio_end(p);
4335 atomic_dec(&task_rq(p)->nr_iowait);
4336 }
4337
4338 wake_flags |= WF_MIGRATED;
4339 psi_ttwu_dequeue(p);
4340 set_task_cpu(p, cpu);
4341 }
4342 #else
4343 cpu = task_cpu(p);
4344 #endif /* CONFIG_SMP */
4345
4346 ttwu_queue(p, cpu, wake_flags);
4347 }
4348 out:
4349 if (success)
4350 ttwu_stat(p, task_cpu(p), wake_flags);
4351
4352 return success;
4353 }
4354
__task_needs_rq_lock(struct task_struct * p)4355 static bool __task_needs_rq_lock(struct task_struct *p)
4356 {
4357 unsigned int state = READ_ONCE(p->__state);
4358
4359 /*
4360 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4361 * the task is blocked. Make sure to check @state since ttwu() can drop
4362 * locks at the end, see ttwu_queue_wakelist().
4363 */
4364 if (state == TASK_RUNNING || state == TASK_WAKING)
4365 return true;
4366
4367 /*
4368 * Ensure we load p->on_rq after p->__state, otherwise it would be
4369 * possible to, falsely, observe p->on_rq == 0.
4370 *
4371 * See try_to_wake_up() for a longer comment.
4372 */
4373 smp_rmb();
4374 if (p->on_rq)
4375 return true;
4376
4377 #ifdef CONFIG_SMP
4378 /*
4379 * Ensure the task has finished __schedule() and will not be referenced
4380 * anymore. Again, see try_to_wake_up() for a longer comment.
4381 */
4382 smp_rmb();
4383 smp_cond_load_acquire(&p->on_cpu, !VAL);
4384 #endif
4385
4386 return false;
4387 }
4388
4389 /**
4390 * task_call_func - Invoke a function on task in fixed state
4391 * @p: Process for which the function is to be invoked, can be @current.
4392 * @func: Function to invoke.
4393 * @arg: Argument to function.
4394 *
4395 * Fix the task in it's current state by avoiding wakeups and or rq operations
4396 * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
4397 * to work out what the state is, if required. Given that @func can be invoked
4398 * with a runqueue lock held, it had better be quite lightweight.
4399 *
4400 * Returns:
4401 * Whatever @func returns
4402 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4403 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4404 {
4405 struct rq *rq = NULL;
4406 struct rq_flags rf;
4407 int ret;
4408
4409 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4410
4411 if (__task_needs_rq_lock(p))
4412 rq = __task_rq_lock(p, &rf);
4413
4414 /*
4415 * At this point the task is pinned; either:
4416 * - blocked and we're holding off wakeups (pi->lock)
4417 * - woken, and we're holding off enqueue (rq->lock)
4418 * - queued, and we're holding off schedule (rq->lock)
4419 * - running, and we're holding off de-schedule (rq->lock)
4420 *
4421 * The called function (@func) can use: task_curr(), p->on_rq and
4422 * p->__state to differentiate between these states.
4423 */
4424 ret = func(p, arg);
4425
4426 if (rq)
4427 rq_unlock(rq, &rf);
4428
4429 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4430 return ret;
4431 }
4432
4433 /**
4434 * cpu_curr_snapshot - Return a snapshot of the currently running task
4435 * @cpu: The CPU on which to snapshot the task.
4436 *
4437 * Returns the task_struct pointer of the task "currently" running on
4438 * the specified CPU.
4439 *
4440 * If the specified CPU was offline, the return value is whatever it
4441 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4442 * task, but there is no guarantee. Callers wishing a useful return
4443 * value must take some action to ensure that the specified CPU remains
4444 * online throughout.
4445 *
4446 * This function executes full memory barriers before and after fetching
4447 * the pointer, which permits the caller to confine this function's fetch
4448 * with respect to the caller's accesses to other shared variables.
4449 */
cpu_curr_snapshot(int cpu)4450 struct task_struct *cpu_curr_snapshot(int cpu)
4451 {
4452 struct rq *rq = cpu_rq(cpu);
4453 struct task_struct *t;
4454 struct rq_flags rf;
4455
4456 rq_lock_irqsave(rq, &rf);
4457 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4458 t = rcu_dereference(cpu_curr(cpu));
4459 rq_unlock_irqrestore(rq, &rf);
4460 smp_mb(); /* Pairing determined by caller's synchronization design. */
4461
4462 return t;
4463 }
4464
4465 /**
4466 * wake_up_process - Wake up a specific process
4467 * @p: The process to be woken up.
4468 *
4469 * Attempt to wake up the nominated process and move it to the set of runnable
4470 * processes.
4471 *
4472 * Return: 1 if the process was woken up, 0 if it was already running.
4473 *
4474 * This function executes a full memory barrier before accessing the task state.
4475 */
wake_up_process(struct task_struct * p)4476 int wake_up_process(struct task_struct *p)
4477 {
4478 return try_to_wake_up(p, TASK_NORMAL, 0);
4479 }
4480 EXPORT_SYMBOL(wake_up_process);
4481
wake_up_state(struct task_struct * p,unsigned int state)4482 int wake_up_state(struct task_struct *p, unsigned int state)
4483 {
4484 return try_to_wake_up(p, state, 0);
4485 }
4486
4487 /*
4488 * Perform scheduler related setup for a newly forked process p.
4489 * p is forked by current.
4490 *
4491 * __sched_fork() is basic setup used by init_idle() too:
4492 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4493 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4494 {
4495 p->on_rq = 0;
4496
4497 p->se.on_rq = 0;
4498 p->se.exec_start = 0;
4499 p->se.sum_exec_runtime = 0;
4500 p->se.prev_sum_exec_runtime = 0;
4501 p->se.nr_migrations = 0;
4502 p->se.vruntime = 0;
4503 p->se.vlag = 0;
4504 p->se.slice = sysctl_sched_base_slice;
4505 INIT_LIST_HEAD(&p->se.group_node);
4506
4507 #ifdef CONFIG_FAIR_GROUP_SCHED
4508 p->se.cfs_rq = NULL;
4509 #endif
4510
4511 #ifdef CONFIG_SCHEDSTATS
4512 /* Even if schedstat is disabled, there should not be garbage */
4513 memset(&p->stats, 0, sizeof(p->stats));
4514 #endif
4515
4516 init_dl_entity(&p->dl);
4517
4518 INIT_LIST_HEAD(&p->rt.run_list);
4519 p->rt.timeout = 0;
4520 p->rt.time_slice = sched_rr_timeslice;
4521 p->rt.on_rq = 0;
4522 p->rt.on_list = 0;
4523
4524 #ifdef CONFIG_PREEMPT_NOTIFIERS
4525 INIT_HLIST_HEAD(&p->preempt_notifiers);
4526 #endif
4527
4528 #ifdef CONFIG_COMPACTION
4529 p->capture_control = NULL;
4530 #endif
4531 init_numa_balancing(clone_flags, p);
4532 #ifdef CONFIG_SMP
4533 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4534 p->migration_pending = NULL;
4535 #endif
4536 init_sched_mm_cid(p);
4537 }
4538
4539 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4540
4541 #ifdef CONFIG_NUMA_BALANCING
4542
4543 int sysctl_numa_balancing_mode;
4544
__set_numabalancing_state(bool enabled)4545 static void __set_numabalancing_state(bool enabled)
4546 {
4547 if (enabled)
4548 static_branch_enable(&sched_numa_balancing);
4549 else
4550 static_branch_disable(&sched_numa_balancing);
4551 }
4552
set_numabalancing_state(bool enabled)4553 void set_numabalancing_state(bool enabled)
4554 {
4555 if (enabled)
4556 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4557 else
4558 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4559 __set_numabalancing_state(enabled);
4560 }
4561
4562 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4563 static void reset_memory_tiering(void)
4564 {
4565 struct pglist_data *pgdat;
4566
4567 for_each_online_pgdat(pgdat) {
4568 pgdat->nbp_threshold = 0;
4569 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4570 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4571 }
4572 }
4573
sysctl_numa_balancing(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4574 static int sysctl_numa_balancing(struct ctl_table *table, int write,
4575 void *buffer, size_t *lenp, loff_t *ppos)
4576 {
4577 struct ctl_table t;
4578 int err;
4579 int state = sysctl_numa_balancing_mode;
4580
4581 if (write && !capable(CAP_SYS_ADMIN))
4582 return -EPERM;
4583
4584 t = *table;
4585 t.data = &state;
4586 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4587 if (err < 0)
4588 return err;
4589 if (write) {
4590 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4591 (state & NUMA_BALANCING_MEMORY_TIERING))
4592 reset_memory_tiering();
4593 sysctl_numa_balancing_mode = state;
4594 __set_numabalancing_state(state);
4595 }
4596 return err;
4597 }
4598 #endif
4599 #endif
4600
4601 #ifdef CONFIG_SCHEDSTATS
4602
4603 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4604
set_schedstats(bool enabled)4605 static void set_schedstats(bool enabled)
4606 {
4607 if (enabled)
4608 static_branch_enable(&sched_schedstats);
4609 else
4610 static_branch_disable(&sched_schedstats);
4611 }
4612
force_schedstat_enabled(void)4613 void force_schedstat_enabled(void)
4614 {
4615 if (!schedstat_enabled()) {
4616 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4617 static_branch_enable(&sched_schedstats);
4618 }
4619 }
4620
setup_schedstats(char * str)4621 static int __init setup_schedstats(char *str)
4622 {
4623 int ret = 0;
4624 if (!str)
4625 goto out;
4626
4627 if (!strcmp(str, "enable")) {
4628 set_schedstats(true);
4629 ret = 1;
4630 } else if (!strcmp(str, "disable")) {
4631 set_schedstats(false);
4632 ret = 1;
4633 }
4634 out:
4635 if (!ret)
4636 pr_warn("Unable to parse schedstats=\n");
4637
4638 return ret;
4639 }
4640 __setup("schedstats=", setup_schedstats);
4641
4642 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4643 static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
4644 size_t *lenp, loff_t *ppos)
4645 {
4646 struct ctl_table t;
4647 int err;
4648 int state = static_branch_likely(&sched_schedstats);
4649
4650 if (write && !capable(CAP_SYS_ADMIN))
4651 return -EPERM;
4652
4653 t = *table;
4654 t.data = &state;
4655 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4656 if (err < 0)
4657 return err;
4658 if (write)
4659 set_schedstats(state);
4660 return err;
4661 }
4662 #endif /* CONFIG_PROC_SYSCTL */
4663 #endif /* CONFIG_SCHEDSTATS */
4664
4665 #ifdef CONFIG_SYSCTL
4666 static struct ctl_table sched_core_sysctls[] = {
4667 #ifdef CONFIG_SCHEDSTATS
4668 {
4669 .procname = "sched_schedstats",
4670 .data = NULL,
4671 .maxlen = sizeof(unsigned int),
4672 .mode = 0644,
4673 .proc_handler = sysctl_schedstats,
4674 .extra1 = SYSCTL_ZERO,
4675 .extra2 = SYSCTL_ONE,
4676 },
4677 #endif /* CONFIG_SCHEDSTATS */
4678 #ifdef CONFIG_UCLAMP_TASK
4679 {
4680 .procname = "sched_util_clamp_min",
4681 .data = &sysctl_sched_uclamp_util_min,
4682 .maxlen = sizeof(unsigned int),
4683 .mode = 0644,
4684 .proc_handler = sysctl_sched_uclamp_handler,
4685 },
4686 {
4687 .procname = "sched_util_clamp_max",
4688 .data = &sysctl_sched_uclamp_util_max,
4689 .maxlen = sizeof(unsigned int),
4690 .mode = 0644,
4691 .proc_handler = sysctl_sched_uclamp_handler,
4692 },
4693 {
4694 .procname = "sched_util_clamp_min_rt_default",
4695 .data = &sysctl_sched_uclamp_util_min_rt_default,
4696 .maxlen = sizeof(unsigned int),
4697 .mode = 0644,
4698 .proc_handler = sysctl_sched_uclamp_handler,
4699 },
4700 #endif /* CONFIG_UCLAMP_TASK */
4701 #ifdef CONFIG_NUMA_BALANCING
4702 {
4703 .procname = "numa_balancing",
4704 .data = NULL, /* filled in by handler */
4705 .maxlen = sizeof(unsigned int),
4706 .mode = 0644,
4707 .proc_handler = sysctl_numa_balancing,
4708 .extra1 = SYSCTL_ZERO,
4709 .extra2 = SYSCTL_FOUR,
4710 },
4711 #endif /* CONFIG_NUMA_BALANCING */
4712 {}
4713 };
sched_core_sysctl_init(void)4714 static int __init sched_core_sysctl_init(void)
4715 {
4716 register_sysctl_init("kernel", sched_core_sysctls);
4717 return 0;
4718 }
4719 late_initcall(sched_core_sysctl_init);
4720 #endif /* CONFIG_SYSCTL */
4721
4722 /*
4723 * fork()/clone()-time setup:
4724 */
sched_fork(unsigned long clone_flags,struct task_struct * p)4725 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4726 {
4727 __sched_fork(clone_flags, p);
4728 /*
4729 * We mark the process as NEW here. This guarantees that
4730 * nobody will actually run it, and a signal or other external
4731 * event cannot wake it up and insert it on the runqueue either.
4732 */
4733 p->__state = TASK_NEW;
4734
4735 /*
4736 * Make sure we do not leak PI boosting priority to the child.
4737 */
4738 p->prio = current->normal_prio;
4739
4740 uclamp_fork(p);
4741
4742 /*
4743 * Revert to default priority/policy on fork if requested.
4744 */
4745 if (unlikely(p->sched_reset_on_fork)) {
4746 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4747 p->policy = SCHED_NORMAL;
4748 p->static_prio = NICE_TO_PRIO(0);
4749 p->rt_priority = 0;
4750 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4751 p->static_prio = NICE_TO_PRIO(0);
4752
4753 p->prio = p->normal_prio = p->static_prio;
4754 set_load_weight(p, false);
4755
4756 /*
4757 * We don't need the reset flag anymore after the fork. It has
4758 * fulfilled its duty:
4759 */
4760 p->sched_reset_on_fork = 0;
4761 }
4762
4763 if (dl_prio(p->prio))
4764 return -EAGAIN;
4765 else if (rt_prio(p->prio))
4766 p->sched_class = &rt_sched_class;
4767 else
4768 p->sched_class = &fair_sched_class;
4769
4770 init_entity_runnable_average(&p->se);
4771
4772
4773 #ifdef CONFIG_SCHED_INFO
4774 if (likely(sched_info_on()))
4775 memset(&p->sched_info, 0, sizeof(p->sched_info));
4776 #endif
4777 #if defined(CONFIG_SMP)
4778 p->on_cpu = 0;
4779 #endif
4780 init_task_preempt_count(p);
4781 #ifdef CONFIG_SMP
4782 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4783 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4784 #endif
4785 return 0;
4786 }
4787
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4788 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4789 {
4790 unsigned long flags;
4791
4792 /*
4793 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4794 * required yet, but lockdep gets upset if rules are violated.
4795 */
4796 raw_spin_lock_irqsave(&p->pi_lock, flags);
4797 #ifdef CONFIG_CGROUP_SCHED
4798 if (1) {
4799 struct task_group *tg;
4800 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4801 struct task_group, css);
4802 tg = autogroup_task_group(p, tg);
4803 p->sched_task_group = tg;
4804 }
4805 #endif
4806 rseq_migrate(p);
4807 /*
4808 * We're setting the CPU for the first time, we don't migrate,
4809 * so use __set_task_cpu().
4810 */
4811 __set_task_cpu(p, smp_processor_id());
4812 if (p->sched_class->task_fork)
4813 p->sched_class->task_fork(p);
4814 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4815 }
4816
sched_post_fork(struct task_struct * p)4817 void sched_post_fork(struct task_struct *p)
4818 {
4819 uclamp_post_fork(p);
4820 }
4821
to_ratio(u64 period,u64 runtime)4822 unsigned long to_ratio(u64 period, u64 runtime)
4823 {
4824 if (runtime == RUNTIME_INF)
4825 return BW_UNIT;
4826
4827 /*
4828 * Doing this here saves a lot of checks in all
4829 * the calling paths, and returning zero seems
4830 * safe for them anyway.
4831 */
4832 if (period == 0)
4833 return 0;
4834
4835 return div64_u64(runtime << BW_SHIFT, period);
4836 }
4837
4838 /*
4839 * wake_up_new_task - wake up a newly created task for the first time.
4840 *
4841 * This function will do some initial scheduler statistics housekeeping
4842 * that must be done for every newly created context, then puts the task
4843 * on the runqueue and wakes it.
4844 */
wake_up_new_task(struct task_struct * p)4845 void wake_up_new_task(struct task_struct *p)
4846 {
4847 struct rq_flags rf;
4848 struct rq *rq;
4849
4850 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4851 WRITE_ONCE(p->__state, TASK_RUNNING);
4852 #ifdef CONFIG_SMP
4853 /*
4854 * Fork balancing, do it here and not earlier because:
4855 * - cpus_ptr can change in the fork path
4856 * - any previously selected CPU might disappear through hotplug
4857 *
4858 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4859 * as we're not fully set-up yet.
4860 */
4861 p->recent_used_cpu = task_cpu(p);
4862 rseq_migrate(p);
4863 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4864 #endif
4865 rq = __task_rq_lock(p, &rf);
4866 update_rq_clock(rq);
4867 post_init_entity_util_avg(p);
4868
4869 activate_task(rq, p, ENQUEUE_NOCLOCK);
4870 trace_sched_wakeup_new(p);
4871 wakeup_preempt(rq, p, WF_FORK);
4872 #ifdef CONFIG_SMP
4873 if (p->sched_class->task_woken) {
4874 /*
4875 * Nothing relies on rq->lock after this, so it's fine to
4876 * drop it.
4877 */
4878 rq_unpin_lock(rq, &rf);
4879 p->sched_class->task_woken(rq, p);
4880 rq_repin_lock(rq, &rf);
4881 }
4882 #endif
4883 task_rq_unlock(rq, p, &rf);
4884 }
4885
4886 #ifdef CONFIG_PREEMPT_NOTIFIERS
4887
4888 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4889
preempt_notifier_inc(void)4890 void preempt_notifier_inc(void)
4891 {
4892 static_branch_inc(&preempt_notifier_key);
4893 }
4894 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4895
preempt_notifier_dec(void)4896 void preempt_notifier_dec(void)
4897 {
4898 static_branch_dec(&preempt_notifier_key);
4899 }
4900 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4901
4902 /**
4903 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4904 * @notifier: notifier struct to register
4905 */
preempt_notifier_register(struct preempt_notifier * notifier)4906 void preempt_notifier_register(struct preempt_notifier *notifier)
4907 {
4908 if (!static_branch_unlikely(&preempt_notifier_key))
4909 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4910
4911 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4912 }
4913 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4914
4915 /**
4916 * preempt_notifier_unregister - no longer interested in preemption notifications
4917 * @notifier: notifier struct to unregister
4918 *
4919 * This is *not* safe to call from within a preemption notifier.
4920 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4921 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4922 {
4923 hlist_del(¬ifier->link);
4924 }
4925 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4926
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4927 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4928 {
4929 struct preempt_notifier *notifier;
4930
4931 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4932 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4933 }
4934
fire_sched_in_preempt_notifiers(struct task_struct * curr)4935 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4936 {
4937 if (static_branch_unlikely(&preempt_notifier_key))
4938 __fire_sched_in_preempt_notifiers(curr);
4939 }
4940
4941 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4942 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4943 struct task_struct *next)
4944 {
4945 struct preempt_notifier *notifier;
4946
4947 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4948 notifier->ops->sched_out(notifier, next);
4949 }
4950
4951 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4952 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4953 struct task_struct *next)
4954 {
4955 if (static_branch_unlikely(&preempt_notifier_key))
4956 __fire_sched_out_preempt_notifiers(curr, next);
4957 }
4958
4959 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4960
fire_sched_in_preempt_notifiers(struct task_struct * curr)4961 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4962 {
4963 }
4964
4965 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4966 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4967 struct task_struct *next)
4968 {
4969 }
4970
4971 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4972
prepare_task(struct task_struct * next)4973 static inline void prepare_task(struct task_struct *next)
4974 {
4975 #ifdef CONFIG_SMP
4976 /*
4977 * Claim the task as running, we do this before switching to it
4978 * such that any running task will have this set.
4979 *
4980 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4981 * its ordering comment.
4982 */
4983 WRITE_ONCE(next->on_cpu, 1);
4984 #endif
4985 }
4986
finish_task(struct task_struct * prev)4987 static inline void finish_task(struct task_struct *prev)
4988 {
4989 #ifdef CONFIG_SMP
4990 /*
4991 * This must be the very last reference to @prev from this CPU. After
4992 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4993 * must ensure this doesn't happen until the switch is completely
4994 * finished.
4995 *
4996 * In particular, the load of prev->state in finish_task_switch() must
4997 * happen before this.
4998 *
4999 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5000 */
5001 smp_store_release(&prev->on_cpu, 0);
5002 #endif
5003 }
5004
5005 #ifdef CONFIG_SMP
5006
do_balance_callbacks(struct rq * rq,struct balance_callback * head)5007 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5008 {
5009 void (*func)(struct rq *rq);
5010 struct balance_callback *next;
5011
5012 lockdep_assert_rq_held(rq);
5013
5014 while (head) {
5015 func = (void (*)(struct rq *))head->func;
5016 next = head->next;
5017 head->next = NULL;
5018 head = next;
5019
5020 func(rq);
5021 }
5022 }
5023
5024 static void balance_push(struct rq *rq);
5025
5026 /*
5027 * balance_push_callback is a right abuse of the callback interface and plays
5028 * by significantly different rules.
5029 *
5030 * Where the normal balance_callback's purpose is to be ran in the same context
5031 * that queued it (only later, when it's safe to drop rq->lock again),
5032 * balance_push_callback is specifically targeted at __schedule().
5033 *
5034 * This abuse is tolerated because it places all the unlikely/odd cases behind
5035 * a single test, namely: rq->balance_callback == NULL.
5036 */
5037 struct balance_callback balance_push_callback = {
5038 .next = NULL,
5039 .func = balance_push,
5040 };
5041
5042 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5043 __splice_balance_callbacks(struct rq *rq, bool split)
5044 {
5045 struct balance_callback *head = rq->balance_callback;
5046
5047 if (likely(!head))
5048 return NULL;
5049
5050 lockdep_assert_rq_held(rq);
5051 /*
5052 * Must not take balance_push_callback off the list when
5053 * splice_balance_callbacks() and balance_callbacks() are not
5054 * in the same rq->lock section.
5055 *
5056 * In that case it would be possible for __schedule() to interleave
5057 * and observe the list empty.
5058 */
5059 if (split && head == &balance_push_callback)
5060 head = NULL;
5061 else
5062 rq->balance_callback = NULL;
5063
5064 return head;
5065 }
5066
splice_balance_callbacks(struct rq * rq)5067 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5068 {
5069 return __splice_balance_callbacks(rq, true);
5070 }
5071
__balance_callbacks(struct rq * rq)5072 static void __balance_callbacks(struct rq *rq)
5073 {
5074 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5075 }
5076
balance_callbacks(struct rq * rq,struct balance_callback * head)5077 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5078 {
5079 unsigned long flags;
5080
5081 if (unlikely(head)) {
5082 raw_spin_rq_lock_irqsave(rq, flags);
5083 do_balance_callbacks(rq, head);
5084 raw_spin_rq_unlock_irqrestore(rq, flags);
5085 }
5086 }
5087
5088 #else
5089
__balance_callbacks(struct rq * rq)5090 static inline void __balance_callbacks(struct rq *rq)
5091 {
5092 }
5093
splice_balance_callbacks(struct rq * rq)5094 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5095 {
5096 return NULL;
5097 }
5098
balance_callbacks(struct rq * rq,struct balance_callback * head)5099 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5100 {
5101 }
5102
5103 #endif
5104
5105 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5106 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5107 {
5108 /*
5109 * Since the runqueue lock will be released by the next
5110 * task (which is an invalid locking op but in the case
5111 * of the scheduler it's an obvious special-case), so we
5112 * do an early lockdep release here:
5113 */
5114 rq_unpin_lock(rq, rf);
5115 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5116 #ifdef CONFIG_DEBUG_SPINLOCK
5117 /* this is a valid case when another task releases the spinlock */
5118 rq_lockp(rq)->owner = next;
5119 #endif
5120 }
5121
finish_lock_switch(struct rq * rq)5122 static inline void finish_lock_switch(struct rq *rq)
5123 {
5124 /*
5125 * If we are tracking spinlock dependencies then we have to
5126 * fix up the runqueue lock - which gets 'carried over' from
5127 * prev into current:
5128 */
5129 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5130 __balance_callbacks(rq);
5131 raw_spin_rq_unlock_irq(rq);
5132 }
5133
5134 /*
5135 * NOP if the arch has not defined these:
5136 */
5137
5138 #ifndef prepare_arch_switch
5139 # define prepare_arch_switch(next) do { } while (0)
5140 #endif
5141
5142 #ifndef finish_arch_post_lock_switch
5143 # define finish_arch_post_lock_switch() do { } while (0)
5144 #endif
5145
kmap_local_sched_out(void)5146 static inline void kmap_local_sched_out(void)
5147 {
5148 #ifdef CONFIG_KMAP_LOCAL
5149 if (unlikely(current->kmap_ctrl.idx))
5150 __kmap_local_sched_out();
5151 #endif
5152 }
5153
kmap_local_sched_in(void)5154 static inline void kmap_local_sched_in(void)
5155 {
5156 #ifdef CONFIG_KMAP_LOCAL
5157 if (unlikely(current->kmap_ctrl.idx))
5158 __kmap_local_sched_in();
5159 #endif
5160 }
5161
5162 /**
5163 * prepare_task_switch - prepare to switch tasks
5164 * @rq: the runqueue preparing to switch
5165 * @prev: the current task that is being switched out
5166 * @next: the task we are going to switch to.
5167 *
5168 * This is called with the rq lock held and interrupts off. It must
5169 * be paired with a subsequent finish_task_switch after the context
5170 * switch.
5171 *
5172 * prepare_task_switch sets up locking and calls architecture specific
5173 * hooks.
5174 */
5175 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5176 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5177 struct task_struct *next)
5178 {
5179 kcov_prepare_switch(prev);
5180 sched_info_switch(rq, prev, next);
5181 perf_event_task_sched_out(prev, next);
5182 rseq_preempt(prev);
5183 fire_sched_out_preempt_notifiers(prev, next);
5184 kmap_local_sched_out();
5185 prepare_task(next);
5186 prepare_arch_switch(next);
5187 }
5188
5189 /**
5190 * finish_task_switch - clean up after a task-switch
5191 * @prev: the thread we just switched away from.
5192 *
5193 * finish_task_switch must be called after the context switch, paired
5194 * with a prepare_task_switch call before the context switch.
5195 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5196 * and do any other architecture-specific cleanup actions.
5197 *
5198 * Note that we may have delayed dropping an mm in context_switch(). If
5199 * so, we finish that here outside of the runqueue lock. (Doing it
5200 * with the lock held can cause deadlocks; see schedule() for
5201 * details.)
5202 *
5203 * The context switch have flipped the stack from under us and restored the
5204 * local variables which were saved when this task called schedule() in the
5205 * past. prev == current is still correct but we need to recalculate this_rq
5206 * because prev may have moved to another CPU.
5207 */
finish_task_switch(struct task_struct * prev)5208 static struct rq *finish_task_switch(struct task_struct *prev)
5209 __releases(rq->lock)
5210 {
5211 struct rq *rq = this_rq();
5212 struct mm_struct *mm = rq->prev_mm;
5213 unsigned int prev_state;
5214
5215 /*
5216 * The previous task will have left us with a preempt_count of 2
5217 * because it left us after:
5218 *
5219 * schedule()
5220 * preempt_disable(); // 1
5221 * __schedule()
5222 * raw_spin_lock_irq(&rq->lock) // 2
5223 *
5224 * Also, see FORK_PREEMPT_COUNT.
5225 */
5226 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5227 "corrupted preempt_count: %s/%d/0x%x\n",
5228 current->comm, current->pid, preempt_count()))
5229 preempt_count_set(FORK_PREEMPT_COUNT);
5230
5231 rq->prev_mm = NULL;
5232
5233 /*
5234 * A task struct has one reference for the use as "current".
5235 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5236 * schedule one last time. The schedule call will never return, and
5237 * the scheduled task must drop that reference.
5238 *
5239 * We must observe prev->state before clearing prev->on_cpu (in
5240 * finish_task), otherwise a concurrent wakeup can get prev
5241 * running on another CPU and we could rave with its RUNNING -> DEAD
5242 * transition, resulting in a double drop.
5243 */
5244 prev_state = READ_ONCE(prev->__state);
5245 vtime_task_switch(prev);
5246 perf_event_task_sched_in(prev, current);
5247 finish_task(prev);
5248 tick_nohz_task_switch();
5249 finish_lock_switch(rq);
5250 finish_arch_post_lock_switch();
5251 kcov_finish_switch(current);
5252 /*
5253 * kmap_local_sched_out() is invoked with rq::lock held and
5254 * interrupts disabled. There is no requirement for that, but the
5255 * sched out code does not have an interrupt enabled section.
5256 * Restoring the maps on sched in does not require interrupts being
5257 * disabled either.
5258 */
5259 kmap_local_sched_in();
5260
5261 fire_sched_in_preempt_notifiers(current);
5262 /*
5263 * When switching through a kernel thread, the loop in
5264 * membarrier_{private,global}_expedited() may have observed that
5265 * kernel thread and not issued an IPI. It is therefore possible to
5266 * schedule between user->kernel->user threads without passing though
5267 * switch_mm(). Membarrier requires a barrier after storing to
5268 * rq->curr, before returning to userspace, so provide them here:
5269 *
5270 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5271 * provided by mmdrop_lazy_tlb(),
5272 * - a sync_core for SYNC_CORE.
5273 */
5274 if (mm) {
5275 membarrier_mm_sync_core_before_usermode(mm);
5276 mmdrop_lazy_tlb_sched(mm);
5277 }
5278
5279 if (unlikely(prev_state == TASK_DEAD)) {
5280 if (prev->sched_class->task_dead)
5281 prev->sched_class->task_dead(prev);
5282
5283 /* Task is done with its stack. */
5284 put_task_stack(prev);
5285
5286 put_task_struct_rcu_user(prev);
5287 }
5288
5289 return rq;
5290 }
5291
5292 /**
5293 * schedule_tail - first thing a freshly forked thread must call.
5294 * @prev: the thread we just switched away from.
5295 */
schedule_tail(struct task_struct * prev)5296 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5297 __releases(rq->lock)
5298 {
5299 /*
5300 * New tasks start with FORK_PREEMPT_COUNT, see there and
5301 * finish_task_switch() for details.
5302 *
5303 * finish_task_switch() will drop rq->lock() and lower preempt_count
5304 * and the preempt_enable() will end up enabling preemption (on
5305 * PREEMPT_COUNT kernels).
5306 */
5307
5308 finish_task_switch(prev);
5309 preempt_enable();
5310
5311 if (current->set_child_tid)
5312 put_user(task_pid_vnr(current), current->set_child_tid);
5313
5314 calculate_sigpending();
5315 }
5316
5317 /*
5318 * context_switch - switch to the new MM and the new thread's register state.
5319 */
5320 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5321 context_switch(struct rq *rq, struct task_struct *prev,
5322 struct task_struct *next, struct rq_flags *rf)
5323 {
5324 prepare_task_switch(rq, prev, next);
5325
5326 /*
5327 * For paravirt, this is coupled with an exit in switch_to to
5328 * combine the page table reload and the switch backend into
5329 * one hypercall.
5330 */
5331 arch_start_context_switch(prev);
5332
5333 /*
5334 * kernel -> kernel lazy + transfer active
5335 * user -> kernel lazy + mmgrab_lazy_tlb() active
5336 *
5337 * kernel -> user switch + mmdrop_lazy_tlb() active
5338 * user -> user switch
5339 *
5340 * switch_mm_cid() needs to be updated if the barriers provided
5341 * by context_switch() are modified.
5342 */
5343 if (!next->mm) { // to kernel
5344 enter_lazy_tlb(prev->active_mm, next);
5345
5346 next->active_mm = prev->active_mm;
5347 if (prev->mm) // from user
5348 mmgrab_lazy_tlb(prev->active_mm);
5349 else
5350 prev->active_mm = NULL;
5351 } else { // to user
5352 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5353 /*
5354 * sys_membarrier() requires an smp_mb() between setting
5355 * rq->curr / membarrier_switch_mm() and returning to userspace.
5356 *
5357 * The below provides this either through switch_mm(), or in
5358 * case 'prev->active_mm == next->mm' through
5359 * finish_task_switch()'s mmdrop().
5360 */
5361 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5362 lru_gen_use_mm(next->mm);
5363
5364 if (!prev->mm) { // from kernel
5365 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5366 rq->prev_mm = prev->active_mm;
5367 prev->active_mm = NULL;
5368 }
5369 }
5370
5371 /* switch_mm_cid() requires the memory barriers above. */
5372 switch_mm_cid(rq, prev, next);
5373
5374 prepare_lock_switch(rq, next, rf);
5375
5376 /* Here we just switch the register state and the stack. */
5377 switch_to(prev, next, prev);
5378 barrier();
5379
5380 return finish_task_switch(prev);
5381 }
5382
5383 /*
5384 * nr_running and nr_context_switches:
5385 *
5386 * externally visible scheduler statistics: current number of runnable
5387 * threads, total number of context switches performed since bootup.
5388 */
nr_running(void)5389 unsigned int nr_running(void)
5390 {
5391 unsigned int i, sum = 0;
5392
5393 for_each_online_cpu(i)
5394 sum += cpu_rq(i)->nr_running;
5395
5396 return sum;
5397 }
5398
5399 /*
5400 * Check if only the current task is running on the CPU.
5401 *
5402 * Caution: this function does not check that the caller has disabled
5403 * preemption, thus the result might have a time-of-check-to-time-of-use
5404 * race. The caller is responsible to use it correctly, for example:
5405 *
5406 * - from a non-preemptible section (of course)
5407 *
5408 * - from a thread that is bound to a single CPU
5409 *
5410 * - in a loop with very short iterations (e.g. a polling loop)
5411 */
single_task_running(void)5412 bool single_task_running(void)
5413 {
5414 return raw_rq()->nr_running == 1;
5415 }
5416 EXPORT_SYMBOL(single_task_running);
5417
nr_context_switches_cpu(int cpu)5418 unsigned long long nr_context_switches_cpu(int cpu)
5419 {
5420 return cpu_rq(cpu)->nr_switches;
5421 }
5422
nr_context_switches(void)5423 unsigned long long nr_context_switches(void)
5424 {
5425 int i;
5426 unsigned long long sum = 0;
5427
5428 for_each_possible_cpu(i)
5429 sum += cpu_rq(i)->nr_switches;
5430
5431 return sum;
5432 }
5433
5434 /*
5435 * Consumers of these two interfaces, like for example the cpuidle menu
5436 * governor, are using nonsensical data. Preferring shallow idle state selection
5437 * for a CPU that has IO-wait which might not even end up running the task when
5438 * it does become runnable.
5439 */
5440
nr_iowait_cpu(int cpu)5441 unsigned int nr_iowait_cpu(int cpu)
5442 {
5443 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5444 }
5445
5446 /*
5447 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5448 *
5449 * The idea behind IO-wait account is to account the idle time that we could
5450 * have spend running if it were not for IO. That is, if we were to improve the
5451 * storage performance, we'd have a proportional reduction in IO-wait time.
5452 *
5453 * This all works nicely on UP, where, when a task blocks on IO, we account
5454 * idle time as IO-wait, because if the storage were faster, it could've been
5455 * running and we'd not be idle.
5456 *
5457 * This has been extended to SMP, by doing the same for each CPU. This however
5458 * is broken.
5459 *
5460 * Imagine for instance the case where two tasks block on one CPU, only the one
5461 * CPU will have IO-wait accounted, while the other has regular idle. Even
5462 * though, if the storage were faster, both could've ran at the same time,
5463 * utilising both CPUs.
5464 *
5465 * This means, that when looking globally, the current IO-wait accounting on
5466 * SMP is a lower bound, by reason of under accounting.
5467 *
5468 * Worse, since the numbers are provided per CPU, they are sometimes
5469 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5470 * associated with any one particular CPU, it can wake to another CPU than it
5471 * blocked on. This means the per CPU IO-wait number is meaningless.
5472 *
5473 * Task CPU affinities can make all that even more 'interesting'.
5474 */
5475
nr_iowait(void)5476 unsigned int nr_iowait(void)
5477 {
5478 unsigned int i, sum = 0;
5479
5480 for_each_possible_cpu(i)
5481 sum += nr_iowait_cpu(i);
5482
5483 return sum;
5484 }
5485
5486 #ifdef CONFIG_SMP
5487
5488 /*
5489 * sched_exec - execve() is a valuable balancing opportunity, because at
5490 * this point the task has the smallest effective memory and cache footprint.
5491 */
sched_exec(void)5492 void sched_exec(void)
5493 {
5494 struct task_struct *p = current;
5495 struct migration_arg arg;
5496 int dest_cpu;
5497
5498 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5499 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5500 if (dest_cpu == smp_processor_id())
5501 return;
5502
5503 if (unlikely(!cpu_active(dest_cpu)))
5504 return;
5505
5506 arg = (struct migration_arg){ p, dest_cpu };
5507 }
5508 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5509 }
5510
5511 #endif
5512
5513 DEFINE_PER_CPU(struct kernel_stat, kstat);
5514 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5515
5516 EXPORT_PER_CPU_SYMBOL(kstat);
5517 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5518
5519 /*
5520 * The function fair_sched_class.update_curr accesses the struct curr
5521 * and its field curr->exec_start; when called from task_sched_runtime(),
5522 * we observe a high rate of cache misses in practice.
5523 * Prefetching this data results in improved performance.
5524 */
prefetch_curr_exec_start(struct task_struct * p)5525 static inline void prefetch_curr_exec_start(struct task_struct *p)
5526 {
5527 #ifdef CONFIG_FAIR_GROUP_SCHED
5528 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
5529 #else
5530 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
5531 #endif
5532 prefetch(curr);
5533 prefetch(&curr->exec_start);
5534 }
5535
5536 /*
5537 * Return accounted runtime for the task.
5538 * In case the task is currently running, return the runtime plus current's
5539 * pending runtime that have not been accounted yet.
5540 */
task_sched_runtime(struct task_struct * p)5541 unsigned long long task_sched_runtime(struct task_struct *p)
5542 {
5543 struct rq_flags rf;
5544 struct rq *rq;
5545 u64 ns;
5546
5547 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5548 /*
5549 * 64-bit doesn't need locks to atomically read a 64-bit value.
5550 * So we have a optimization chance when the task's delta_exec is 0.
5551 * Reading ->on_cpu is racy, but this is ok.
5552 *
5553 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5554 * If we race with it entering CPU, unaccounted time is 0. This is
5555 * indistinguishable from the read occurring a few cycles earlier.
5556 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5557 * been accounted, so we're correct here as well.
5558 */
5559 if (!p->on_cpu || !task_on_rq_queued(p))
5560 return p->se.sum_exec_runtime;
5561 #endif
5562
5563 rq = task_rq_lock(p, &rf);
5564 /*
5565 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5566 * project cycles that may never be accounted to this
5567 * thread, breaking clock_gettime().
5568 */
5569 if (task_current(rq, p) && task_on_rq_queued(p)) {
5570 prefetch_curr_exec_start(p);
5571 update_rq_clock(rq);
5572 p->sched_class->update_curr(rq);
5573 }
5574 ns = p->se.sum_exec_runtime;
5575 task_rq_unlock(rq, p, &rf);
5576
5577 return ns;
5578 }
5579
5580 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5581 static u64 cpu_resched_latency(struct rq *rq)
5582 {
5583 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5584 u64 resched_latency, now = rq_clock(rq);
5585 static bool warned_once;
5586
5587 if (sysctl_resched_latency_warn_once && warned_once)
5588 return 0;
5589
5590 if (!need_resched() || !latency_warn_ms)
5591 return 0;
5592
5593 if (system_state == SYSTEM_BOOTING)
5594 return 0;
5595
5596 if (!rq->last_seen_need_resched_ns) {
5597 rq->last_seen_need_resched_ns = now;
5598 rq->ticks_without_resched = 0;
5599 return 0;
5600 }
5601
5602 rq->ticks_without_resched++;
5603 resched_latency = now - rq->last_seen_need_resched_ns;
5604 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5605 return 0;
5606
5607 warned_once = true;
5608
5609 return resched_latency;
5610 }
5611
setup_resched_latency_warn_ms(char * str)5612 static int __init setup_resched_latency_warn_ms(char *str)
5613 {
5614 long val;
5615
5616 if ((kstrtol(str, 0, &val))) {
5617 pr_warn("Unable to set resched_latency_warn_ms\n");
5618 return 1;
5619 }
5620
5621 sysctl_resched_latency_warn_ms = val;
5622 return 1;
5623 }
5624 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5625 #else
cpu_resched_latency(struct rq * rq)5626 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5627 #endif /* CONFIG_SCHED_DEBUG */
5628
5629 /*
5630 * This function gets called by the timer code, with HZ frequency.
5631 * We call it with interrupts disabled.
5632 */
scheduler_tick(void)5633 void scheduler_tick(void)
5634 {
5635 int cpu = smp_processor_id();
5636 struct rq *rq = cpu_rq(cpu);
5637 struct task_struct *curr;
5638 struct rq_flags rf;
5639 unsigned long thermal_pressure;
5640 u64 resched_latency;
5641
5642 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5643 arch_scale_freq_tick();
5644
5645 sched_clock_tick();
5646
5647 rq_lock(rq, &rf);
5648
5649 curr = rq->curr;
5650 psi_account_irqtime(rq, curr, NULL);
5651
5652 update_rq_clock(rq);
5653 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
5654 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
5655 curr->sched_class->task_tick(rq, curr, 0);
5656 if (sched_feat(LATENCY_WARN))
5657 resched_latency = cpu_resched_latency(rq);
5658 calc_global_load_tick(rq);
5659 sched_core_tick(rq);
5660 task_tick_mm_cid(rq, curr);
5661
5662 rq_unlock(rq, &rf);
5663
5664 if (sched_feat(LATENCY_WARN) && resched_latency)
5665 resched_latency_warn(cpu, resched_latency);
5666
5667 perf_event_task_tick();
5668
5669 if (curr->flags & PF_WQ_WORKER)
5670 wq_worker_tick(curr);
5671
5672 #ifdef CONFIG_SMP
5673 rq->idle_balance = idle_cpu(cpu);
5674 trigger_load_balance(rq);
5675 #endif
5676 }
5677
5678 #ifdef CONFIG_NO_HZ_FULL
5679
5680 struct tick_work {
5681 int cpu;
5682 atomic_t state;
5683 struct delayed_work work;
5684 };
5685 /* Values for ->state, see diagram below. */
5686 #define TICK_SCHED_REMOTE_OFFLINE 0
5687 #define TICK_SCHED_REMOTE_OFFLINING 1
5688 #define TICK_SCHED_REMOTE_RUNNING 2
5689
5690 /*
5691 * State diagram for ->state:
5692 *
5693 *
5694 * TICK_SCHED_REMOTE_OFFLINE
5695 * | ^
5696 * | |
5697 * | | sched_tick_remote()
5698 * | |
5699 * | |
5700 * +--TICK_SCHED_REMOTE_OFFLINING
5701 * | ^
5702 * | |
5703 * sched_tick_start() | | sched_tick_stop()
5704 * | |
5705 * V |
5706 * TICK_SCHED_REMOTE_RUNNING
5707 *
5708 *
5709 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5710 * and sched_tick_start() are happy to leave the state in RUNNING.
5711 */
5712
5713 static struct tick_work __percpu *tick_work_cpu;
5714
sched_tick_remote(struct work_struct * work)5715 static void sched_tick_remote(struct work_struct *work)
5716 {
5717 struct delayed_work *dwork = to_delayed_work(work);
5718 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5719 int cpu = twork->cpu;
5720 struct rq *rq = cpu_rq(cpu);
5721 int os;
5722
5723 /*
5724 * Handle the tick only if it appears the remote CPU is running in full
5725 * dynticks mode. The check is racy by nature, but missing a tick or
5726 * having one too much is no big deal because the scheduler tick updates
5727 * statistics and checks timeslices in a time-independent way, regardless
5728 * of when exactly it is running.
5729 */
5730 if (tick_nohz_tick_stopped_cpu(cpu)) {
5731 guard(rq_lock_irq)(rq);
5732 struct task_struct *curr = rq->curr;
5733
5734 if (cpu_online(cpu)) {
5735 update_rq_clock(rq);
5736
5737 if (!is_idle_task(curr)) {
5738 /*
5739 * Make sure the next tick runs within a
5740 * reasonable amount of time.
5741 */
5742 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5743 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5744 }
5745 curr->sched_class->task_tick(rq, curr, 0);
5746
5747 calc_load_nohz_remote(rq);
5748 }
5749 }
5750
5751 /*
5752 * Run the remote tick once per second (1Hz). This arbitrary
5753 * frequency is large enough to avoid overload but short enough
5754 * to keep scheduler internal stats reasonably up to date. But
5755 * first update state to reflect hotplug activity if required.
5756 */
5757 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5758 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5759 if (os == TICK_SCHED_REMOTE_RUNNING)
5760 queue_delayed_work(system_unbound_wq, dwork, HZ);
5761 }
5762
sched_tick_start(int cpu)5763 static void sched_tick_start(int cpu)
5764 {
5765 int os;
5766 struct tick_work *twork;
5767
5768 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5769 return;
5770
5771 WARN_ON_ONCE(!tick_work_cpu);
5772
5773 twork = per_cpu_ptr(tick_work_cpu, cpu);
5774 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5775 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5776 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5777 twork->cpu = cpu;
5778 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5779 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5780 }
5781 }
5782
5783 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5784 static void sched_tick_stop(int cpu)
5785 {
5786 struct tick_work *twork;
5787 int os;
5788
5789 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5790 return;
5791
5792 WARN_ON_ONCE(!tick_work_cpu);
5793
5794 twork = per_cpu_ptr(tick_work_cpu, cpu);
5795 /* There cannot be competing actions, but don't rely on stop-machine. */
5796 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5797 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5798 /* Don't cancel, as this would mess up the state machine. */
5799 }
5800 #endif /* CONFIG_HOTPLUG_CPU */
5801
sched_tick_offload_init(void)5802 int __init sched_tick_offload_init(void)
5803 {
5804 tick_work_cpu = alloc_percpu(struct tick_work);
5805 BUG_ON(!tick_work_cpu);
5806 return 0;
5807 }
5808
5809 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5810 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5811 static inline void sched_tick_stop(int cpu) { }
5812 #endif
5813
5814 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5815 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5816 /*
5817 * If the value passed in is equal to the current preempt count
5818 * then we just disabled preemption. Start timing the latency.
5819 */
preempt_latency_start(int val)5820 static inline void preempt_latency_start(int val)
5821 {
5822 if (preempt_count() == val) {
5823 unsigned long ip = get_lock_parent_ip();
5824 #ifdef CONFIG_DEBUG_PREEMPT
5825 current->preempt_disable_ip = ip;
5826 #endif
5827 trace_preempt_off(CALLER_ADDR0, ip);
5828 }
5829 }
5830
preempt_count_add(int val)5831 void preempt_count_add(int val)
5832 {
5833 #ifdef CONFIG_DEBUG_PREEMPT
5834 /*
5835 * Underflow?
5836 */
5837 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5838 return;
5839 #endif
5840 __preempt_count_add(val);
5841 #ifdef CONFIG_DEBUG_PREEMPT
5842 /*
5843 * Spinlock count overflowing soon?
5844 */
5845 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5846 PREEMPT_MASK - 10);
5847 #endif
5848 preempt_latency_start(val);
5849 }
5850 EXPORT_SYMBOL(preempt_count_add);
5851 NOKPROBE_SYMBOL(preempt_count_add);
5852
5853 /*
5854 * If the value passed in equals to the current preempt count
5855 * then we just enabled preemption. Stop timing the latency.
5856 */
preempt_latency_stop(int val)5857 static inline void preempt_latency_stop(int val)
5858 {
5859 if (preempt_count() == val)
5860 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5861 }
5862
preempt_count_sub(int val)5863 void preempt_count_sub(int val)
5864 {
5865 #ifdef CONFIG_DEBUG_PREEMPT
5866 /*
5867 * Underflow?
5868 */
5869 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5870 return;
5871 /*
5872 * Is the spinlock portion underflowing?
5873 */
5874 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5875 !(preempt_count() & PREEMPT_MASK)))
5876 return;
5877 #endif
5878
5879 preempt_latency_stop(val);
5880 __preempt_count_sub(val);
5881 }
5882 EXPORT_SYMBOL(preempt_count_sub);
5883 NOKPROBE_SYMBOL(preempt_count_sub);
5884
5885 #else
preempt_latency_start(int val)5886 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5887 static inline void preempt_latency_stop(int val) { }
5888 #endif
5889
get_preempt_disable_ip(struct task_struct * p)5890 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5891 {
5892 #ifdef CONFIG_DEBUG_PREEMPT
5893 return p->preempt_disable_ip;
5894 #else
5895 return 0;
5896 #endif
5897 }
5898
5899 /*
5900 * Print scheduling while atomic bug:
5901 */
__schedule_bug(struct task_struct * prev)5902 static noinline void __schedule_bug(struct task_struct *prev)
5903 {
5904 /* Save this before calling printk(), since that will clobber it */
5905 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5906
5907 if (oops_in_progress)
5908 return;
5909
5910 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5911 prev->comm, prev->pid, preempt_count());
5912
5913 debug_show_held_locks(prev);
5914 print_modules();
5915 if (irqs_disabled())
5916 print_irqtrace_events(prev);
5917 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
5918 && in_atomic_preempt_off()) {
5919 pr_err("Preemption disabled at:");
5920 print_ip_sym(KERN_ERR, preempt_disable_ip);
5921 }
5922 check_panic_on_warn("scheduling while atomic");
5923
5924 dump_stack();
5925 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5926 }
5927
5928 /*
5929 * Various schedule()-time debugging checks and statistics:
5930 */
schedule_debug(struct task_struct * prev,bool preempt)5931 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5932 {
5933 #ifdef CONFIG_SCHED_STACK_END_CHECK
5934 if (task_stack_end_corrupted(prev))
5935 panic("corrupted stack end detected inside scheduler\n");
5936
5937 if (task_scs_end_corrupted(prev))
5938 panic("corrupted shadow stack detected inside scheduler\n");
5939 #endif
5940
5941 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5942 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5943 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5944 prev->comm, prev->pid, prev->non_block_count);
5945 dump_stack();
5946 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5947 }
5948 #endif
5949
5950 if (unlikely(in_atomic_preempt_off())) {
5951 __schedule_bug(prev);
5952 preempt_count_set(PREEMPT_DISABLED);
5953 }
5954 rcu_sleep_check();
5955 SCHED_WARN_ON(ct_state() == CONTEXT_USER);
5956
5957 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5958
5959 schedstat_inc(this_rq()->sched_count);
5960 }
5961
put_prev_task_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5962 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5963 struct rq_flags *rf)
5964 {
5965 #ifdef CONFIG_SMP
5966 const struct sched_class *class;
5967 /*
5968 * We must do the balancing pass before put_prev_task(), such
5969 * that when we release the rq->lock the task is in the same
5970 * state as before we took rq->lock.
5971 *
5972 * We can terminate the balance pass as soon as we know there is
5973 * a runnable task of @class priority or higher.
5974 */
5975 for_class_range(class, prev->sched_class, &idle_sched_class) {
5976 if (class->balance(rq, prev, rf))
5977 break;
5978 }
5979 #endif
5980
5981 put_prev_task(rq, prev);
5982 }
5983
5984 /*
5985 * Pick up the highest-prio task:
5986 */
5987 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5988 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5989 {
5990 const struct sched_class *class;
5991 struct task_struct *p;
5992
5993 /*
5994 * Optimization: we know that if all tasks are in the fair class we can
5995 * call that function directly, but only if the @prev task wasn't of a
5996 * higher scheduling class, because otherwise those lose the
5997 * opportunity to pull in more work from other CPUs.
5998 */
5999 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6000 rq->nr_running == rq->cfs.h_nr_running)) {
6001
6002 p = pick_next_task_fair(rq, prev, rf);
6003 if (unlikely(p == RETRY_TASK))
6004 goto restart;
6005
6006 /* Assume the next prioritized class is idle_sched_class */
6007 if (!p) {
6008 put_prev_task(rq, prev);
6009 p = pick_next_task_idle(rq);
6010 }
6011
6012 return p;
6013 }
6014
6015 restart:
6016 put_prev_task_balance(rq, prev, rf);
6017
6018 for_each_class(class) {
6019 p = class->pick_next_task(rq);
6020 if (p)
6021 return p;
6022 }
6023
6024 BUG(); /* The idle class should always have a runnable task. */
6025 }
6026
6027 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6028 static inline bool is_task_rq_idle(struct task_struct *t)
6029 {
6030 return (task_rq(t)->idle == t);
6031 }
6032
cookie_equals(struct task_struct * a,unsigned long cookie)6033 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6034 {
6035 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6036 }
6037
cookie_match(struct task_struct * a,struct task_struct * b)6038 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6039 {
6040 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6041 return true;
6042
6043 return a->core_cookie == b->core_cookie;
6044 }
6045
pick_task(struct rq * rq)6046 static inline struct task_struct *pick_task(struct rq *rq)
6047 {
6048 const struct sched_class *class;
6049 struct task_struct *p;
6050
6051 for_each_class(class) {
6052 p = class->pick_task(rq);
6053 if (p)
6054 return p;
6055 }
6056
6057 BUG(); /* The idle class should always have a runnable task. */
6058 }
6059
6060 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6061
6062 static void queue_core_balance(struct rq *rq);
6063
6064 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6065 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6066 {
6067 struct task_struct *next, *p, *max = NULL;
6068 const struct cpumask *smt_mask;
6069 bool fi_before = false;
6070 bool core_clock_updated = (rq == rq->core);
6071 unsigned long cookie;
6072 int i, cpu, occ = 0;
6073 struct rq *rq_i;
6074 bool need_sync;
6075
6076 if (!sched_core_enabled(rq))
6077 return __pick_next_task(rq, prev, rf);
6078
6079 cpu = cpu_of(rq);
6080
6081 /* Stopper task is switching into idle, no need core-wide selection. */
6082 if (cpu_is_offline(cpu)) {
6083 /*
6084 * Reset core_pick so that we don't enter the fastpath when
6085 * coming online. core_pick would already be migrated to
6086 * another cpu during offline.
6087 */
6088 rq->core_pick = NULL;
6089 return __pick_next_task(rq, prev, rf);
6090 }
6091
6092 /*
6093 * If there were no {en,de}queues since we picked (IOW, the task
6094 * pointers are all still valid), and we haven't scheduled the last
6095 * pick yet, do so now.
6096 *
6097 * rq->core_pick can be NULL if no selection was made for a CPU because
6098 * it was either offline or went offline during a sibling's core-wide
6099 * selection. In this case, do a core-wide selection.
6100 */
6101 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6102 rq->core->core_pick_seq != rq->core_sched_seq &&
6103 rq->core_pick) {
6104 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6105
6106 next = rq->core_pick;
6107 if (next != prev) {
6108 put_prev_task(rq, prev);
6109 set_next_task(rq, next);
6110 }
6111
6112 rq->core_pick = NULL;
6113 goto out;
6114 }
6115
6116 put_prev_task_balance(rq, prev, rf);
6117
6118 smt_mask = cpu_smt_mask(cpu);
6119 need_sync = !!rq->core->core_cookie;
6120
6121 /* reset state */
6122 rq->core->core_cookie = 0UL;
6123 if (rq->core->core_forceidle_count) {
6124 if (!core_clock_updated) {
6125 update_rq_clock(rq->core);
6126 core_clock_updated = true;
6127 }
6128 sched_core_account_forceidle(rq);
6129 /* reset after accounting force idle */
6130 rq->core->core_forceidle_start = 0;
6131 rq->core->core_forceidle_count = 0;
6132 rq->core->core_forceidle_occupation = 0;
6133 need_sync = true;
6134 fi_before = true;
6135 }
6136
6137 /*
6138 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6139 *
6140 * @task_seq guards the task state ({en,de}queues)
6141 * @pick_seq is the @task_seq we did a selection on
6142 * @sched_seq is the @pick_seq we scheduled
6143 *
6144 * However, preemptions can cause multiple picks on the same task set.
6145 * 'Fix' this by also increasing @task_seq for every pick.
6146 */
6147 rq->core->core_task_seq++;
6148
6149 /*
6150 * Optimize for common case where this CPU has no cookies
6151 * and there are no cookied tasks running on siblings.
6152 */
6153 if (!need_sync) {
6154 next = pick_task(rq);
6155 if (!next->core_cookie) {
6156 rq->core_pick = NULL;
6157 /*
6158 * For robustness, update the min_vruntime_fi for
6159 * unconstrained picks as well.
6160 */
6161 WARN_ON_ONCE(fi_before);
6162 task_vruntime_update(rq, next, false);
6163 goto out_set_next;
6164 }
6165 }
6166
6167 /*
6168 * For each thread: do the regular task pick and find the max prio task
6169 * amongst them.
6170 *
6171 * Tie-break prio towards the current CPU
6172 */
6173 for_each_cpu_wrap(i, smt_mask, cpu) {
6174 rq_i = cpu_rq(i);
6175
6176 /*
6177 * Current cpu always has its clock updated on entrance to
6178 * pick_next_task(). If the current cpu is not the core,
6179 * the core may also have been updated above.
6180 */
6181 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6182 update_rq_clock(rq_i);
6183
6184 p = rq_i->core_pick = pick_task(rq_i);
6185 if (!max || prio_less(max, p, fi_before))
6186 max = p;
6187 }
6188
6189 cookie = rq->core->core_cookie = max->core_cookie;
6190
6191 /*
6192 * For each thread: try and find a runnable task that matches @max or
6193 * force idle.
6194 */
6195 for_each_cpu(i, smt_mask) {
6196 rq_i = cpu_rq(i);
6197 p = rq_i->core_pick;
6198
6199 if (!cookie_equals(p, cookie)) {
6200 p = NULL;
6201 if (cookie)
6202 p = sched_core_find(rq_i, cookie);
6203 if (!p)
6204 p = idle_sched_class.pick_task(rq_i);
6205 }
6206
6207 rq_i->core_pick = p;
6208
6209 if (p == rq_i->idle) {
6210 if (rq_i->nr_running) {
6211 rq->core->core_forceidle_count++;
6212 if (!fi_before)
6213 rq->core->core_forceidle_seq++;
6214 }
6215 } else {
6216 occ++;
6217 }
6218 }
6219
6220 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6221 rq->core->core_forceidle_start = rq_clock(rq->core);
6222 rq->core->core_forceidle_occupation = occ;
6223 }
6224
6225 rq->core->core_pick_seq = rq->core->core_task_seq;
6226 next = rq->core_pick;
6227 rq->core_sched_seq = rq->core->core_pick_seq;
6228
6229 /* Something should have been selected for current CPU */
6230 WARN_ON_ONCE(!next);
6231
6232 /*
6233 * Reschedule siblings
6234 *
6235 * NOTE: L1TF -- at this point we're no longer running the old task and
6236 * sending an IPI (below) ensures the sibling will no longer be running
6237 * their task. This ensures there is no inter-sibling overlap between
6238 * non-matching user state.
6239 */
6240 for_each_cpu(i, smt_mask) {
6241 rq_i = cpu_rq(i);
6242
6243 /*
6244 * An online sibling might have gone offline before a task
6245 * could be picked for it, or it might be offline but later
6246 * happen to come online, but its too late and nothing was
6247 * picked for it. That's Ok - it will pick tasks for itself,
6248 * so ignore it.
6249 */
6250 if (!rq_i->core_pick)
6251 continue;
6252
6253 /*
6254 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6255 * fi_before fi update?
6256 * 0 0 1
6257 * 0 1 1
6258 * 1 0 1
6259 * 1 1 0
6260 */
6261 if (!(fi_before && rq->core->core_forceidle_count))
6262 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6263
6264 rq_i->core_pick->core_occupation = occ;
6265
6266 if (i == cpu) {
6267 rq_i->core_pick = NULL;
6268 continue;
6269 }
6270
6271 /* Did we break L1TF mitigation requirements? */
6272 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6273
6274 if (rq_i->curr == rq_i->core_pick) {
6275 rq_i->core_pick = NULL;
6276 continue;
6277 }
6278
6279 resched_curr(rq_i);
6280 }
6281
6282 out_set_next:
6283 set_next_task(rq, next);
6284 out:
6285 if (rq->core->core_forceidle_count && next == rq->idle)
6286 queue_core_balance(rq);
6287
6288 return next;
6289 }
6290
try_steal_cookie(int this,int that)6291 static bool try_steal_cookie(int this, int that)
6292 {
6293 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6294 struct task_struct *p;
6295 unsigned long cookie;
6296 bool success = false;
6297
6298 guard(irq)();
6299 guard(double_rq_lock)(dst, src);
6300
6301 cookie = dst->core->core_cookie;
6302 if (!cookie)
6303 return false;
6304
6305 if (dst->curr != dst->idle)
6306 return false;
6307
6308 p = sched_core_find(src, cookie);
6309 if (!p)
6310 return false;
6311
6312 do {
6313 if (p == src->core_pick || p == src->curr)
6314 goto next;
6315
6316 if (!is_cpu_allowed(p, this))
6317 goto next;
6318
6319 if (p->core_occupation > dst->idle->core_occupation)
6320 goto next;
6321 /*
6322 * sched_core_find() and sched_core_next() will ensure
6323 * that task @p is not throttled now, we also need to
6324 * check whether the runqueue of the destination CPU is
6325 * being throttled.
6326 */
6327 if (sched_task_is_throttled(p, this))
6328 goto next;
6329
6330 deactivate_task(src, p, 0);
6331 set_task_cpu(p, this);
6332 activate_task(dst, p, 0);
6333
6334 resched_curr(dst);
6335
6336 success = true;
6337 break;
6338
6339 next:
6340 p = sched_core_next(p, cookie);
6341 } while (p);
6342
6343 return success;
6344 }
6345
steal_cookie_task(int cpu,struct sched_domain * sd)6346 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6347 {
6348 int i;
6349
6350 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6351 if (i == cpu)
6352 continue;
6353
6354 if (need_resched())
6355 break;
6356
6357 if (try_steal_cookie(cpu, i))
6358 return true;
6359 }
6360
6361 return false;
6362 }
6363
sched_core_balance(struct rq * rq)6364 static void sched_core_balance(struct rq *rq)
6365 {
6366 struct sched_domain *sd;
6367 int cpu = cpu_of(rq);
6368
6369 preempt_disable();
6370 rcu_read_lock();
6371 raw_spin_rq_unlock_irq(rq);
6372 for_each_domain(cpu, sd) {
6373 if (need_resched())
6374 break;
6375
6376 if (steal_cookie_task(cpu, sd))
6377 break;
6378 }
6379 raw_spin_rq_lock_irq(rq);
6380 rcu_read_unlock();
6381 preempt_enable();
6382 }
6383
6384 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6385
queue_core_balance(struct rq * rq)6386 static void queue_core_balance(struct rq *rq)
6387 {
6388 if (!sched_core_enabled(rq))
6389 return;
6390
6391 if (!rq->core->core_cookie)
6392 return;
6393
6394 if (!rq->nr_running) /* not forced idle */
6395 return;
6396
6397 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6398 }
6399
6400 DEFINE_LOCK_GUARD_1(core_lock, int,
6401 sched_core_lock(*_T->lock, &_T->flags),
6402 sched_core_unlock(*_T->lock, &_T->flags),
6403 unsigned long flags)
6404
sched_core_cpu_starting(unsigned int cpu)6405 static void sched_core_cpu_starting(unsigned int cpu)
6406 {
6407 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6408 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6409 int t;
6410
6411 guard(core_lock)(&cpu);
6412
6413 WARN_ON_ONCE(rq->core != rq);
6414
6415 /* if we're the first, we'll be our own leader */
6416 if (cpumask_weight(smt_mask) == 1)
6417 return;
6418
6419 /* find the leader */
6420 for_each_cpu(t, smt_mask) {
6421 if (t == cpu)
6422 continue;
6423 rq = cpu_rq(t);
6424 if (rq->core == rq) {
6425 core_rq = rq;
6426 break;
6427 }
6428 }
6429
6430 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6431 return;
6432
6433 /* install and validate core_rq */
6434 for_each_cpu(t, smt_mask) {
6435 rq = cpu_rq(t);
6436
6437 if (t == cpu)
6438 rq->core = core_rq;
6439
6440 WARN_ON_ONCE(rq->core != core_rq);
6441 }
6442 }
6443
sched_core_cpu_deactivate(unsigned int cpu)6444 static void sched_core_cpu_deactivate(unsigned int cpu)
6445 {
6446 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6447 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6448 int t;
6449
6450 guard(core_lock)(&cpu);
6451
6452 /* if we're the last man standing, nothing to do */
6453 if (cpumask_weight(smt_mask) == 1) {
6454 WARN_ON_ONCE(rq->core != rq);
6455 return;
6456 }
6457
6458 /* if we're not the leader, nothing to do */
6459 if (rq->core != rq)
6460 return;
6461
6462 /* find a new leader */
6463 for_each_cpu(t, smt_mask) {
6464 if (t == cpu)
6465 continue;
6466 core_rq = cpu_rq(t);
6467 break;
6468 }
6469
6470 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6471 return;
6472
6473 /* copy the shared state to the new leader */
6474 core_rq->core_task_seq = rq->core_task_seq;
6475 core_rq->core_pick_seq = rq->core_pick_seq;
6476 core_rq->core_cookie = rq->core_cookie;
6477 core_rq->core_forceidle_count = rq->core_forceidle_count;
6478 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6479 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6480
6481 /*
6482 * Accounting edge for forced idle is handled in pick_next_task().
6483 * Don't need another one here, since the hotplug thread shouldn't
6484 * have a cookie.
6485 */
6486 core_rq->core_forceidle_start = 0;
6487
6488 /* install new leader */
6489 for_each_cpu(t, smt_mask) {
6490 rq = cpu_rq(t);
6491 rq->core = core_rq;
6492 }
6493 }
6494
sched_core_cpu_dying(unsigned int cpu)6495 static inline void sched_core_cpu_dying(unsigned int cpu)
6496 {
6497 struct rq *rq = cpu_rq(cpu);
6498
6499 if (rq->core != rq)
6500 rq->core = rq;
6501 }
6502
6503 #else /* !CONFIG_SCHED_CORE */
6504
sched_core_cpu_starting(unsigned int cpu)6505 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6506 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6507 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6508
6509 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6510 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6511 {
6512 return __pick_next_task(rq, prev, rf);
6513 }
6514
6515 #endif /* CONFIG_SCHED_CORE */
6516
6517 /*
6518 * Constants for the sched_mode argument of __schedule().
6519 *
6520 * The mode argument allows RT enabled kernels to differentiate a
6521 * preemption from blocking on an 'sleeping' spin/rwlock. Note that
6522 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
6523 * optimize the AND operation out and just check for zero.
6524 */
6525 #define SM_NONE 0x0
6526 #define SM_PREEMPT 0x1
6527 #define SM_RTLOCK_WAIT 0x2
6528
6529 #ifndef CONFIG_PREEMPT_RT
6530 # define SM_MASK_PREEMPT (~0U)
6531 #else
6532 # define SM_MASK_PREEMPT SM_PREEMPT
6533 #endif
6534
6535 /*
6536 * __schedule() is the main scheduler function.
6537 *
6538 * The main means of driving the scheduler and thus entering this function are:
6539 *
6540 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6541 *
6542 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6543 * paths. For example, see arch/x86/entry_64.S.
6544 *
6545 * To drive preemption between tasks, the scheduler sets the flag in timer
6546 * interrupt handler scheduler_tick().
6547 *
6548 * 3. Wakeups don't really cause entry into schedule(). They add a
6549 * task to the run-queue and that's it.
6550 *
6551 * Now, if the new task added to the run-queue preempts the current
6552 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6553 * called on the nearest possible occasion:
6554 *
6555 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6556 *
6557 * - in syscall or exception context, at the next outmost
6558 * preempt_enable(). (this might be as soon as the wake_up()'s
6559 * spin_unlock()!)
6560 *
6561 * - in IRQ context, return from interrupt-handler to
6562 * preemptible context
6563 *
6564 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6565 * then at the next:
6566 *
6567 * - cond_resched() call
6568 * - explicit schedule() call
6569 * - return from syscall or exception to user-space
6570 * - return from interrupt-handler to user-space
6571 *
6572 * WARNING: must be called with preemption disabled!
6573 */
__schedule(unsigned int sched_mode)6574 static void __sched notrace __schedule(unsigned int sched_mode)
6575 {
6576 struct task_struct *prev, *next;
6577 unsigned long *switch_count;
6578 unsigned long prev_state;
6579 struct rq_flags rf;
6580 struct rq *rq;
6581 int cpu;
6582
6583 cpu = smp_processor_id();
6584 rq = cpu_rq(cpu);
6585 prev = rq->curr;
6586
6587 schedule_debug(prev, !!sched_mode);
6588
6589 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6590 hrtick_clear(rq);
6591
6592 local_irq_disable();
6593 rcu_note_context_switch(!!sched_mode);
6594
6595 /*
6596 * Make sure that signal_pending_state()->signal_pending() below
6597 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6598 * done by the caller to avoid the race with signal_wake_up():
6599 *
6600 * __set_current_state(@state) signal_wake_up()
6601 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6602 * wake_up_state(p, state)
6603 * LOCK rq->lock LOCK p->pi_state
6604 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6605 * if (signal_pending_state()) if (p->state & @state)
6606 *
6607 * Also, the membarrier system call requires a full memory barrier
6608 * after coming from user-space, before storing to rq->curr.
6609 */
6610 rq_lock(rq, &rf);
6611 smp_mb__after_spinlock();
6612
6613 /* Promote REQ to ACT */
6614 rq->clock_update_flags <<= 1;
6615 update_rq_clock(rq);
6616 rq->clock_update_flags = RQCF_UPDATED;
6617
6618 switch_count = &prev->nivcsw;
6619
6620 /*
6621 * We must load prev->state once (task_struct::state is volatile), such
6622 * that we form a control dependency vs deactivate_task() below.
6623 */
6624 prev_state = READ_ONCE(prev->__state);
6625 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
6626 if (signal_pending_state(prev_state, prev)) {
6627 WRITE_ONCE(prev->__state, TASK_RUNNING);
6628 } else {
6629 prev->sched_contributes_to_load =
6630 (prev_state & TASK_UNINTERRUPTIBLE) &&
6631 !(prev_state & TASK_NOLOAD) &&
6632 !(prev_state & TASK_FROZEN);
6633
6634 if (prev->sched_contributes_to_load)
6635 rq->nr_uninterruptible++;
6636
6637 /*
6638 * __schedule() ttwu()
6639 * prev_state = prev->state; if (p->on_rq && ...)
6640 * if (prev_state) goto out;
6641 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6642 * p->state = TASK_WAKING
6643 *
6644 * Where __schedule() and ttwu() have matching control dependencies.
6645 *
6646 * After this, schedule() must not care about p->state any more.
6647 */
6648 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6649
6650 if (prev->in_iowait) {
6651 atomic_inc(&rq->nr_iowait);
6652 delayacct_blkio_start();
6653 }
6654 }
6655 switch_count = &prev->nvcsw;
6656 }
6657
6658 next = pick_next_task(rq, prev, &rf);
6659 clear_tsk_need_resched(prev);
6660 clear_preempt_need_resched();
6661 #ifdef CONFIG_SCHED_DEBUG
6662 rq->last_seen_need_resched_ns = 0;
6663 #endif
6664
6665 if (likely(prev != next)) {
6666 rq->nr_switches++;
6667 /*
6668 * RCU users of rcu_dereference(rq->curr) may not see
6669 * changes to task_struct made by pick_next_task().
6670 */
6671 RCU_INIT_POINTER(rq->curr, next);
6672 /*
6673 * The membarrier system call requires each architecture
6674 * to have a full memory barrier after updating
6675 * rq->curr, before returning to user-space.
6676 *
6677 * Here are the schemes providing that barrier on the
6678 * various architectures:
6679 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6680 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6681 * on PowerPC and on RISC-V.
6682 * - finish_lock_switch() for weakly-ordered
6683 * architectures where spin_unlock is a full barrier,
6684 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6685 * is a RELEASE barrier),
6686 */
6687 ++*switch_count;
6688
6689 migrate_disable_switch(rq, prev);
6690 psi_account_irqtime(rq, prev, next);
6691 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6692
6693 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
6694
6695 /* Also unlocks the rq: */
6696 rq = context_switch(rq, prev, next, &rf);
6697 } else {
6698 rq_unpin_lock(rq, &rf);
6699 __balance_callbacks(rq);
6700 raw_spin_rq_unlock_irq(rq);
6701 }
6702 }
6703
do_task_dead(void)6704 void __noreturn do_task_dead(void)
6705 {
6706 /* Causes final put_task_struct in finish_task_switch(): */
6707 set_special_state(TASK_DEAD);
6708
6709 /* Tell freezer to ignore us: */
6710 current->flags |= PF_NOFREEZE;
6711
6712 __schedule(SM_NONE);
6713 BUG();
6714
6715 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6716 for (;;)
6717 cpu_relax();
6718 }
6719
sched_submit_work(struct task_struct * tsk)6720 static inline void sched_submit_work(struct task_struct *tsk)
6721 {
6722 unsigned int task_flags;
6723
6724 if (task_is_running(tsk))
6725 return;
6726
6727 task_flags = tsk->flags;
6728 /*
6729 * If a worker goes to sleep, notify and ask workqueue whether it
6730 * wants to wake up a task to maintain concurrency.
6731 */
6732 if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6733 if (task_flags & PF_WQ_WORKER)
6734 wq_worker_sleeping(tsk);
6735 else
6736 io_wq_worker_sleeping(tsk);
6737 }
6738
6739 /*
6740 * spinlock and rwlock must not flush block requests. This will
6741 * deadlock if the callback attempts to acquire a lock which is
6742 * already acquired.
6743 */
6744 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6745
6746 /*
6747 * If we are going to sleep and we have plugged IO queued,
6748 * make sure to submit it to avoid deadlocks.
6749 */
6750 blk_flush_plug(tsk->plug, true);
6751 }
6752
sched_update_worker(struct task_struct * tsk)6753 static void sched_update_worker(struct task_struct *tsk)
6754 {
6755 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6756 if (tsk->flags & PF_WQ_WORKER)
6757 wq_worker_running(tsk);
6758 else
6759 io_wq_worker_running(tsk);
6760 }
6761 }
6762
schedule(void)6763 asmlinkage __visible void __sched schedule(void)
6764 {
6765 struct task_struct *tsk = current;
6766
6767 sched_submit_work(tsk);
6768 do {
6769 preempt_disable();
6770 __schedule(SM_NONE);
6771 sched_preempt_enable_no_resched();
6772 } while (need_resched());
6773 sched_update_worker(tsk);
6774 }
6775 EXPORT_SYMBOL(schedule);
6776
6777 /*
6778 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6779 * state (have scheduled out non-voluntarily) by making sure that all
6780 * tasks have either left the run queue or have gone into user space.
6781 * As idle tasks do not do either, they must not ever be preempted
6782 * (schedule out non-voluntarily).
6783 *
6784 * schedule_idle() is similar to schedule_preempt_disable() except that it
6785 * never enables preemption because it does not call sched_submit_work().
6786 */
schedule_idle(void)6787 void __sched schedule_idle(void)
6788 {
6789 /*
6790 * As this skips calling sched_submit_work(), which the idle task does
6791 * regardless because that function is a nop when the task is in a
6792 * TASK_RUNNING state, make sure this isn't used someplace that the
6793 * current task can be in any other state. Note, idle is always in the
6794 * TASK_RUNNING state.
6795 */
6796 WARN_ON_ONCE(current->__state);
6797 do {
6798 __schedule(SM_NONE);
6799 } while (need_resched());
6800 }
6801
6802 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6803 asmlinkage __visible void __sched schedule_user(void)
6804 {
6805 /*
6806 * If we come here after a random call to set_need_resched(),
6807 * or we have been woken up remotely but the IPI has not yet arrived,
6808 * we haven't yet exited the RCU idle mode. Do it here manually until
6809 * we find a better solution.
6810 *
6811 * NB: There are buggy callers of this function. Ideally we
6812 * should warn if prev_state != CONTEXT_USER, but that will trigger
6813 * too frequently to make sense yet.
6814 */
6815 enum ctx_state prev_state = exception_enter();
6816 schedule();
6817 exception_exit(prev_state);
6818 }
6819 #endif
6820
6821 /**
6822 * schedule_preempt_disabled - called with preemption disabled
6823 *
6824 * Returns with preemption disabled. Note: preempt_count must be 1
6825 */
schedule_preempt_disabled(void)6826 void __sched schedule_preempt_disabled(void)
6827 {
6828 sched_preempt_enable_no_resched();
6829 schedule();
6830 preempt_disable();
6831 }
6832
6833 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6834 void __sched notrace schedule_rtlock(void)
6835 {
6836 do {
6837 preempt_disable();
6838 __schedule(SM_RTLOCK_WAIT);
6839 sched_preempt_enable_no_resched();
6840 } while (need_resched());
6841 }
6842 NOKPROBE_SYMBOL(schedule_rtlock);
6843 #endif
6844
preempt_schedule_common(void)6845 static void __sched notrace preempt_schedule_common(void)
6846 {
6847 do {
6848 /*
6849 * Because the function tracer can trace preempt_count_sub()
6850 * and it also uses preempt_enable/disable_notrace(), if
6851 * NEED_RESCHED is set, the preempt_enable_notrace() called
6852 * by the function tracer will call this function again and
6853 * cause infinite recursion.
6854 *
6855 * Preemption must be disabled here before the function
6856 * tracer can trace. Break up preempt_disable() into two
6857 * calls. One to disable preemption without fear of being
6858 * traced. The other to still record the preemption latency,
6859 * which can also be traced by the function tracer.
6860 */
6861 preempt_disable_notrace();
6862 preempt_latency_start(1);
6863 __schedule(SM_PREEMPT);
6864 preempt_latency_stop(1);
6865 preempt_enable_no_resched_notrace();
6866
6867 /*
6868 * Check again in case we missed a preemption opportunity
6869 * between schedule and now.
6870 */
6871 } while (need_resched());
6872 }
6873
6874 #ifdef CONFIG_PREEMPTION
6875 /*
6876 * This is the entry point to schedule() from in-kernel preemption
6877 * off of preempt_enable.
6878 */
preempt_schedule(void)6879 asmlinkage __visible void __sched notrace preempt_schedule(void)
6880 {
6881 /*
6882 * If there is a non-zero preempt_count or interrupts are disabled,
6883 * we do not want to preempt the current task. Just return..
6884 */
6885 if (likely(!preemptible()))
6886 return;
6887 preempt_schedule_common();
6888 }
6889 NOKPROBE_SYMBOL(preempt_schedule);
6890 EXPORT_SYMBOL(preempt_schedule);
6891
6892 #ifdef CONFIG_PREEMPT_DYNAMIC
6893 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6894 #ifndef preempt_schedule_dynamic_enabled
6895 #define preempt_schedule_dynamic_enabled preempt_schedule
6896 #define preempt_schedule_dynamic_disabled NULL
6897 #endif
6898 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6899 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6900 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6901 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6902 void __sched notrace dynamic_preempt_schedule(void)
6903 {
6904 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6905 return;
6906 preempt_schedule();
6907 }
6908 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6909 EXPORT_SYMBOL(dynamic_preempt_schedule);
6910 #endif
6911 #endif
6912
6913 /**
6914 * preempt_schedule_notrace - preempt_schedule called by tracing
6915 *
6916 * The tracing infrastructure uses preempt_enable_notrace to prevent
6917 * recursion and tracing preempt enabling caused by the tracing
6918 * infrastructure itself. But as tracing can happen in areas coming
6919 * from userspace or just about to enter userspace, a preempt enable
6920 * can occur before user_exit() is called. This will cause the scheduler
6921 * to be called when the system is still in usermode.
6922 *
6923 * To prevent this, the preempt_enable_notrace will use this function
6924 * instead of preempt_schedule() to exit user context if needed before
6925 * calling the scheduler.
6926 */
preempt_schedule_notrace(void)6927 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6928 {
6929 enum ctx_state prev_ctx;
6930
6931 if (likely(!preemptible()))
6932 return;
6933
6934 do {
6935 /*
6936 * Because the function tracer can trace preempt_count_sub()
6937 * and it also uses preempt_enable/disable_notrace(), if
6938 * NEED_RESCHED is set, the preempt_enable_notrace() called
6939 * by the function tracer will call this function again and
6940 * cause infinite recursion.
6941 *
6942 * Preemption must be disabled here before the function
6943 * tracer can trace. Break up preempt_disable() into two
6944 * calls. One to disable preemption without fear of being
6945 * traced. The other to still record the preemption latency,
6946 * which can also be traced by the function tracer.
6947 */
6948 preempt_disable_notrace();
6949 preempt_latency_start(1);
6950 /*
6951 * Needs preempt disabled in case user_exit() is traced
6952 * and the tracer calls preempt_enable_notrace() causing
6953 * an infinite recursion.
6954 */
6955 prev_ctx = exception_enter();
6956 __schedule(SM_PREEMPT);
6957 exception_exit(prev_ctx);
6958
6959 preempt_latency_stop(1);
6960 preempt_enable_no_resched_notrace();
6961 } while (need_resched());
6962 }
6963 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6964
6965 #ifdef CONFIG_PREEMPT_DYNAMIC
6966 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6967 #ifndef preempt_schedule_notrace_dynamic_enabled
6968 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
6969 #define preempt_schedule_notrace_dynamic_disabled NULL
6970 #endif
6971 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6972 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6973 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6974 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)6975 void __sched notrace dynamic_preempt_schedule_notrace(void)
6976 {
6977 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6978 return;
6979 preempt_schedule_notrace();
6980 }
6981 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6982 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6983 #endif
6984 #endif
6985
6986 #endif /* CONFIG_PREEMPTION */
6987
6988 /*
6989 * This is the entry point to schedule() from kernel preemption
6990 * off of irq context.
6991 * Note, that this is called and return with irqs disabled. This will
6992 * protect us against recursive calling from irq.
6993 */
preempt_schedule_irq(void)6994 asmlinkage __visible void __sched preempt_schedule_irq(void)
6995 {
6996 enum ctx_state prev_state;
6997
6998 /* Catch callers which need to be fixed */
6999 BUG_ON(preempt_count() || !irqs_disabled());
7000
7001 prev_state = exception_enter();
7002
7003 do {
7004 preempt_disable();
7005 local_irq_enable();
7006 __schedule(SM_PREEMPT);
7007 local_irq_disable();
7008 sched_preempt_enable_no_resched();
7009 } while (need_resched());
7010
7011 exception_exit(prev_state);
7012 }
7013
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7014 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7015 void *key)
7016 {
7017 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7018 return try_to_wake_up(curr->private, mode, wake_flags);
7019 }
7020 EXPORT_SYMBOL(default_wake_function);
7021
__setscheduler_prio(struct task_struct * p,int prio)7022 static void __setscheduler_prio(struct task_struct *p, int prio)
7023 {
7024 if (dl_prio(prio))
7025 p->sched_class = &dl_sched_class;
7026 else if (rt_prio(prio))
7027 p->sched_class = &rt_sched_class;
7028 else
7029 p->sched_class = &fair_sched_class;
7030
7031 p->prio = prio;
7032 }
7033
7034 #ifdef CONFIG_RT_MUTEXES
7035
__rt_effective_prio(struct task_struct * pi_task,int prio)7036 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
7037 {
7038 if (pi_task)
7039 prio = min(prio, pi_task->prio);
7040
7041 return prio;
7042 }
7043
rt_effective_prio(struct task_struct * p,int prio)7044 static inline int rt_effective_prio(struct task_struct *p, int prio)
7045 {
7046 struct task_struct *pi_task = rt_mutex_get_top_task(p);
7047
7048 return __rt_effective_prio(pi_task, prio);
7049 }
7050
7051 /*
7052 * rt_mutex_setprio - set the current priority of a task
7053 * @p: task to boost
7054 * @pi_task: donor task
7055 *
7056 * This function changes the 'effective' priority of a task. It does
7057 * not touch ->normal_prio like __setscheduler().
7058 *
7059 * Used by the rt_mutex code to implement priority inheritance
7060 * logic. Call site only calls if the priority of the task changed.
7061 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7062 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7063 {
7064 int prio, oldprio, queued, running, queue_flag =
7065 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7066 const struct sched_class *prev_class;
7067 struct rq_flags rf;
7068 struct rq *rq;
7069
7070 /* XXX used to be waiter->prio, not waiter->task->prio */
7071 prio = __rt_effective_prio(pi_task, p->normal_prio);
7072
7073 /*
7074 * If nothing changed; bail early.
7075 */
7076 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7077 return;
7078
7079 rq = __task_rq_lock(p, &rf);
7080 update_rq_clock(rq);
7081 /*
7082 * Set under pi_lock && rq->lock, such that the value can be used under
7083 * either lock.
7084 *
7085 * Note that there is loads of tricky to make this pointer cache work
7086 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7087 * ensure a task is de-boosted (pi_task is set to NULL) before the
7088 * task is allowed to run again (and can exit). This ensures the pointer
7089 * points to a blocked task -- which guarantees the task is present.
7090 */
7091 p->pi_top_task = pi_task;
7092
7093 /*
7094 * For FIFO/RR we only need to set prio, if that matches we're done.
7095 */
7096 if (prio == p->prio && !dl_prio(prio))
7097 goto out_unlock;
7098
7099 /*
7100 * Idle task boosting is a nono in general. There is one
7101 * exception, when PREEMPT_RT and NOHZ is active:
7102 *
7103 * The idle task calls get_next_timer_interrupt() and holds
7104 * the timer wheel base->lock on the CPU and another CPU wants
7105 * to access the timer (probably to cancel it). We can safely
7106 * ignore the boosting request, as the idle CPU runs this code
7107 * with interrupts disabled and will complete the lock
7108 * protected section without being interrupted. So there is no
7109 * real need to boost.
7110 */
7111 if (unlikely(p == rq->idle)) {
7112 WARN_ON(p != rq->curr);
7113 WARN_ON(p->pi_blocked_on);
7114 goto out_unlock;
7115 }
7116
7117 trace_sched_pi_setprio(p, pi_task);
7118 oldprio = p->prio;
7119
7120 if (oldprio == prio)
7121 queue_flag &= ~DEQUEUE_MOVE;
7122
7123 prev_class = p->sched_class;
7124 queued = task_on_rq_queued(p);
7125 running = task_current(rq, p);
7126 if (queued)
7127 dequeue_task(rq, p, queue_flag);
7128 if (running)
7129 put_prev_task(rq, p);
7130
7131 /*
7132 * Boosting condition are:
7133 * 1. -rt task is running and holds mutex A
7134 * --> -dl task blocks on mutex A
7135 *
7136 * 2. -dl task is running and holds mutex A
7137 * --> -dl task blocks on mutex A and could preempt the
7138 * running task
7139 */
7140 if (dl_prio(prio)) {
7141 if (!dl_prio(p->normal_prio) ||
7142 (pi_task && dl_prio(pi_task->prio) &&
7143 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7144 p->dl.pi_se = pi_task->dl.pi_se;
7145 queue_flag |= ENQUEUE_REPLENISH;
7146 } else {
7147 p->dl.pi_se = &p->dl;
7148 }
7149 } else if (rt_prio(prio)) {
7150 if (dl_prio(oldprio))
7151 p->dl.pi_se = &p->dl;
7152 if (oldprio < prio)
7153 queue_flag |= ENQUEUE_HEAD;
7154 } else {
7155 if (dl_prio(oldprio))
7156 p->dl.pi_se = &p->dl;
7157 if (rt_prio(oldprio))
7158 p->rt.timeout = 0;
7159 }
7160
7161 __setscheduler_prio(p, prio);
7162
7163 if (queued)
7164 enqueue_task(rq, p, queue_flag);
7165 if (running)
7166 set_next_task(rq, p);
7167
7168 check_class_changed(rq, p, prev_class, oldprio);
7169 out_unlock:
7170 /* Avoid rq from going away on us: */
7171 preempt_disable();
7172
7173 rq_unpin_lock(rq, &rf);
7174 __balance_callbacks(rq);
7175 raw_spin_rq_unlock(rq);
7176
7177 preempt_enable();
7178 }
7179 #else
rt_effective_prio(struct task_struct * p,int prio)7180 static inline int rt_effective_prio(struct task_struct *p, int prio)
7181 {
7182 return prio;
7183 }
7184 #endif
7185
set_user_nice(struct task_struct * p,long nice)7186 void set_user_nice(struct task_struct *p, long nice)
7187 {
7188 bool queued, running;
7189 int old_prio;
7190 struct rq_flags rf;
7191 struct rq *rq;
7192
7193 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
7194 return;
7195 /*
7196 * We have to be careful, if called from sys_setpriority(),
7197 * the task might be in the middle of scheduling on another CPU.
7198 */
7199 rq = task_rq_lock(p, &rf);
7200 update_rq_clock(rq);
7201
7202 /*
7203 * The RT priorities are set via sched_setscheduler(), but we still
7204 * allow the 'normal' nice value to be set - but as expected
7205 * it won't have any effect on scheduling until the task is
7206 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
7207 */
7208 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
7209 p->static_prio = NICE_TO_PRIO(nice);
7210 goto out_unlock;
7211 }
7212 queued = task_on_rq_queued(p);
7213 running = task_current(rq, p);
7214 if (queued)
7215 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
7216 if (running)
7217 put_prev_task(rq, p);
7218
7219 p->static_prio = NICE_TO_PRIO(nice);
7220 set_load_weight(p, true);
7221 old_prio = p->prio;
7222 p->prio = effective_prio(p);
7223
7224 if (queued)
7225 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7226 if (running)
7227 set_next_task(rq, p);
7228
7229 /*
7230 * If the task increased its priority or is running and
7231 * lowered its priority, then reschedule its CPU:
7232 */
7233 p->sched_class->prio_changed(rq, p, old_prio);
7234
7235 out_unlock:
7236 task_rq_unlock(rq, p, &rf);
7237 }
7238 EXPORT_SYMBOL(set_user_nice);
7239
7240 /*
7241 * is_nice_reduction - check if nice value is an actual reduction
7242 *
7243 * Similar to can_nice() but does not perform a capability check.
7244 *
7245 * @p: task
7246 * @nice: nice value
7247 */
is_nice_reduction(const struct task_struct * p,const int nice)7248 static bool is_nice_reduction(const struct task_struct *p, const int nice)
7249 {
7250 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7251 int nice_rlim = nice_to_rlimit(nice);
7252
7253 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
7254 }
7255
7256 /*
7257 * can_nice - check if a task can reduce its nice value
7258 * @p: task
7259 * @nice: nice value
7260 */
can_nice(const struct task_struct * p,const int nice)7261 int can_nice(const struct task_struct *p, const int nice)
7262 {
7263 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
7264 }
7265
7266 #ifdef __ARCH_WANT_SYS_NICE
7267
7268 /*
7269 * sys_nice - change the priority of the current process.
7270 * @increment: priority increment
7271 *
7272 * sys_setpriority is a more generic, but much slower function that
7273 * does similar things.
7274 */
SYSCALL_DEFINE1(nice,int,increment)7275 SYSCALL_DEFINE1(nice, int, increment)
7276 {
7277 long nice, retval;
7278
7279 /*
7280 * Setpriority might change our priority at the same moment.
7281 * We don't have to worry. Conceptually one call occurs first
7282 * and we have a single winner.
7283 */
7284 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
7285 nice = task_nice(current) + increment;
7286
7287 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
7288 if (increment < 0 && !can_nice(current, nice))
7289 return -EPERM;
7290
7291 retval = security_task_setnice(current, nice);
7292 if (retval)
7293 return retval;
7294
7295 set_user_nice(current, nice);
7296 return 0;
7297 }
7298
7299 #endif
7300
7301 /**
7302 * task_prio - return the priority value of a given task.
7303 * @p: the task in question.
7304 *
7305 * Return: The priority value as seen by users in /proc.
7306 *
7307 * sched policy return value kernel prio user prio/nice
7308 *
7309 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
7310 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
7311 * deadline -101 -1 0
7312 */
task_prio(const struct task_struct * p)7313 int task_prio(const struct task_struct *p)
7314 {
7315 return p->prio - MAX_RT_PRIO;
7316 }
7317
7318 /**
7319 * idle_cpu - is a given CPU idle currently?
7320 * @cpu: the processor in question.
7321 *
7322 * Return: 1 if the CPU is currently idle. 0 otherwise.
7323 */
idle_cpu(int cpu)7324 int idle_cpu(int cpu)
7325 {
7326 struct rq *rq = cpu_rq(cpu);
7327
7328 if (rq->curr != rq->idle)
7329 return 0;
7330
7331 if (rq->nr_running)
7332 return 0;
7333
7334 #ifdef CONFIG_SMP
7335 if (rq->ttwu_pending)
7336 return 0;
7337 #endif
7338
7339 return 1;
7340 }
7341
7342 /**
7343 * available_idle_cpu - is a given CPU idle for enqueuing work.
7344 * @cpu: the CPU in question.
7345 *
7346 * Return: 1 if the CPU is currently idle. 0 otherwise.
7347 */
available_idle_cpu(int cpu)7348 int available_idle_cpu(int cpu)
7349 {
7350 if (!idle_cpu(cpu))
7351 return 0;
7352
7353 if (vcpu_is_preempted(cpu))
7354 return 0;
7355
7356 return 1;
7357 }
7358
7359 /**
7360 * idle_task - return the idle task for a given CPU.
7361 * @cpu: the processor in question.
7362 *
7363 * Return: The idle task for the CPU @cpu.
7364 */
idle_task(int cpu)7365 struct task_struct *idle_task(int cpu)
7366 {
7367 return cpu_rq(cpu)->idle;
7368 }
7369
7370 #ifdef CONFIG_SCHED_CORE
sched_core_idle_cpu(int cpu)7371 int sched_core_idle_cpu(int cpu)
7372 {
7373 struct rq *rq = cpu_rq(cpu);
7374
7375 if (sched_core_enabled(rq) && rq->curr == rq->idle)
7376 return 1;
7377
7378 return idle_cpu(cpu);
7379 }
7380
7381 #endif
7382
7383 #ifdef CONFIG_SMP
7384 /*
7385 * This function computes an effective utilization for the given CPU, to be
7386 * used for frequency selection given the linear relation: f = u * f_max.
7387 *
7388 * The scheduler tracks the following metrics:
7389 *
7390 * cpu_util_{cfs,rt,dl,irq}()
7391 * cpu_bw_dl()
7392 *
7393 * Where the cfs,rt and dl util numbers are tracked with the same metric and
7394 * synchronized windows and are thus directly comparable.
7395 *
7396 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7397 * which excludes things like IRQ and steal-time. These latter are then accrued
7398 * in the irq utilization.
7399 *
7400 * The DL bandwidth number otoh is not a measured metric but a value computed
7401 * based on the task model parameters and gives the minimal utilization
7402 * required to meet deadlines.
7403 */
effective_cpu_util(int cpu,unsigned long util_cfs,enum cpu_util_type type,struct task_struct * p)7404 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7405 enum cpu_util_type type,
7406 struct task_struct *p)
7407 {
7408 unsigned long dl_util, util, irq, max;
7409 struct rq *rq = cpu_rq(cpu);
7410
7411 max = arch_scale_cpu_capacity(cpu);
7412
7413 if (!uclamp_is_used() &&
7414 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7415 return max;
7416 }
7417
7418 /*
7419 * Early check to see if IRQ/steal time saturates the CPU, can be
7420 * because of inaccuracies in how we track these -- see
7421 * update_irq_load_avg().
7422 */
7423 irq = cpu_util_irq(rq);
7424 if (unlikely(irq >= max))
7425 return max;
7426
7427 /*
7428 * Because the time spend on RT/DL tasks is visible as 'lost' time to
7429 * CFS tasks and we use the same metric to track the effective
7430 * utilization (PELT windows are synchronized) we can directly add them
7431 * to obtain the CPU's actual utilization.
7432 *
7433 * CFS and RT utilization can be boosted or capped, depending on
7434 * utilization clamp constraints requested by currently RUNNABLE
7435 * tasks.
7436 * When there are no CFS RUNNABLE tasks, clamps are released and
7437 * frequency will be gracefully reduced with the utilization decay.
7438 */
7439 util = util_cfs + cpu_util_rt(rq);
7440 if (type == FREQUENCY_UTIL)
7441 util = uclamp_rq_util_with(rq, util, p);
7442
7443 dl_util = cpu_util_dl(rq);
7444
7445 /*
7446 * For frequency selection we do not make cpu_util_dl() a permanent part
7447 * of this sum because we want to use cpu_bw_dl() later on, but we need
7448 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
7449 * that we select f_max when there is no idle time.
7450 *
7451 * NOTE: numerical errors or stop class might cause us to not quite hit
7452 * saturation when we should -- something for later.
7453 */
7454 if (util + dl_util >= max)
7455 return max;
7456
7457 /*
7458 * OTOH, for energy computation we need the estimated running time, so
7459 * include util_dl and ignore dl_bw.
7460 */
7461 if (type == ENERGY_UTIL)
7462 util += dl_util;
7463
7464 /*
7465 * There is still idle time; further improve the number by using the
7466 * irq metric. Because IRQ/steal time is hidden from the task clock we
7467 * need to scale the task numbers:
7468 *
7469 * max - irq
7470 * U' = irq + --------- * U
7471 * max
7472 */
7473 util = scale_irq_capacity(util, irq, max);
7474 util += irq;
7475
7476 /*
7477 * Bandwidth required by DEADLINE must always be granted while, for
7478 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
7479 * to gracefully reduce the frequency when no tasks show up for longer
7480 * periods of time.
7481 *
7482 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
7483 * bw_dl as requested freq. However, cpufreq is not yet ready for such
7484 * an interface. So, we only do the latter for now.
7485 */
7486 if (type == FREQUENCY_UTIL)
7487 util += cpu_bw_dl(rq);
7488
7489 return min(max, util);
7490 }
7491
sched_cpu_util(int cpu)7492 unsigned long sched_cpu_util(int cpu)
7493 {
7494 return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL);
7495 }
7496 #endif /* CONFIG_SMP */
7497
7498 /**
7499 * find_process_by_pid - find a process with a matching PID value.
7500 * @pid: the pid in question.
7501 *
7502 * The task of @pid, if found. %NULL otherwise.
7503 */
find_process_by_pid(pid_t pid)7504 static struct task_struct *find_process_by_pid(pid_t pid)
7505 {
7506 return pid ? find_task_by_vpid(pid) : current;
7507 }
7508
7509 /*
7510 * sched_setparam() passes in -1 for its policy, to let the functions
7511 * it calls know not to change it.
7512 */
7513 #define SETPARAM_POLICY -1
7514
__setscheduler_params(struct task_struct * p,const struct sched_attr * attr)7515 static void __setscheduler_params(struct task_struct *p,
7516 const struct sched_attr *attr)
7517 {
7518 int policy = attr->sched_policy;
7519
7520 if (policy == SETPARAM_POLICY)
7521 policy = p->policy;
7522
7523 p->policy = policy;
7524
7525 if (dl_policy(policy))
7526 __setparam_dl(p, attr);
7527 else if (fair_policy(policy))
7528 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
7529
7530 /*
7531 * __sched_setscheduler() ensures attr->sched_priority == 0 when
7532 * !rt_policy. Always setting this ensures that things like
7533 * getparam()/getattr() don't report silly values for !rt tasks.
7534 */
7535 p->rt_priority = attr->sched_priority;
7536 p->normal_prio = normal_prio(p);
7537 set_load_weight(p, true);
7538 }
7539
7540 /*
7541 * Check the target process has a UID that matches the current process's:
7542 */
check_same_owner(struct task_struct * p)7543 static bool check_same_owner(struct task_struct *p)
7544 {
7545 const struct cred *cred = current_cred(), *pcred;
7546 bool match;
7547
7548 rcu_read_lock();
7549 pcred = __task_cred(p);
7550 match = (uid_eq(cred->euid, pcred->euid) ||
7551 uid_eq(cred->euid, pcred->uid));
7552 rcu_read_unlock();
7553 return match;
7554 }
7555
7556 /*
7557 * Allow unprivileged RT tasks to decrease priority.
7558 * Only issue a capable test if needed and only once to avoid an audit
7559 * event on permitted non-privileged operations:
7560 */
user_check_sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,int policy,int reset_on_fork)7561 static int user_check_sched_setscheduler(struct task_struct *p,
7562 const struct sched_attr *attr,
7563 int policy, int reset_on_fork)
7564 {
7565 if (fair_policy(policy)) {
7566 if (attr->sched_nice < task_nice(p) &&
7567 !is_nice_reduction(p, attr->sched_nice))
7568 goto req_priv;
7569 }
7570
7571 if (rt_policy(policy)) {
7572 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
7573
7574 /* Can't set/change the rt policy: */
7575 if (policy != p->policy && !rlim_rtprio)
7576 goto req_priv;
7577
7578 /* Can't increase priority: */
7579 if (attr->sched_priority > p->rt_priority &&
7580 attr->sched_priority > rlim_rtprio)
7581 goto req_priv;
7582 }
7583
7584 /*
7585 * Can't set/change SCHED_DEADLINE policy at all for now
7586 * (safest behavior); in the future we would like to allow
7587 * unprivileged DL tasks to increase their relative deadline
7588 * or reduce their runtime (both ways reducing utilization)
7589 */
7590 if (dl_policy(policy))
7591 goto req_priv;
7592
7593 /*
7594 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7595 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7596 */
7597 if (task_has_idle_policy(p) && !idle_policy(policy)) {
7598 if (!is_nice_reduction(p, task_nice(p)))
7599 goto req_priv;
7600 }
7601
7602 /* Can't change other user's priorities: */
7603 if (!check_same_owner(p))
7604 goto req_priv;
7605
7606 /* Normal users shall not reset the sched_reset_on_fork flag: */
7607 if (p->sched_reset_on_fork && !reset_on_fork)
7608 goto req_priv;
7609
7610 return 0;
7611
7612 req_priv:
7613 if (!capable(CAP_SYS_NICE))
7614 return -EPERM;
7615
7616 return 0;
7617 }
7618
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)7619 static int __sched_setscheduler(struct task_struct *p,
7620 const struct sched_attr *attr,
7621 bool user, bool pi)
7622 {
7623 int oldpolicy = -1, policy = attr->sched_policy;
7624 int retval, oldprio, newprio, queued, running;
7625 const struct sched_class *prev_class;
7626 struct balance_callback *head;
7627 struct rq_flags rf;
7628 int reset_on_fork;
7629 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7630 struct rq *rq;
7631 bool cpuset_locked = false;
7632
7633 /* The pi code expects interrupts enabled */
7634 BUG_ON(pi && in_interrupt());
7635 recheck:
7636 /* Double check policy once rq lock held: */
7637 if (policy < 0) {
7638 reset_on_fork = p->sched_reset_on_fork;
7639 policy = oldpolicy = p->policy;
7640 } else {
7641 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
7642
7643 if (!valid_policy(policy))
7644 return -EINVAL;
7645 }
7646
7647 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7648 return -EINVAL;
7649
7650 /*
7651 * Valid priorities for SCHED_FIFO and SCHED_RR are
7652 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
7653 * SCHED_BATCH and SCHED_IDLE is 0.
7654 */
7655 if (attr->sched_priority > MAX_RT_PRIO-1)
7656 return -EINVAL;
7657 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7658 (rt_policy(policy) != (attr->sched_priority != 0)))
7659 return -EINVAL;
7660
7661 if (user) {
7662 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
7663 if (retval)
7664 return retval;
7665
7666 if (attr->sched_flags & SCHED_FLAG_SUGOV)
7667 return -EINVAL;
7668
7669 retval = security_task_setscheduler(p);
7670 if (retval)
7671 return retval;
7672 }
7673
7674 /* Update task specific "requested" clamps */
7675 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
7676 retval = uclamp_validate(p, attr);
7677 if (retval)
7678 return retval;
7679 }
7680
7681 /*
7682 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
7683 * information.
7684 */
7685 if (dl_policy(policy) || dl_policy(p->policy)) {
7686 cpuset_locked = true;
7687 cpuset_lock();
7688 }
7689
7690 /*
7691 * Make sure no PI-waiters arrive (or leave) while we are
7692 * changing the priority of the task:
7693 *
7694 * To be able to change p->policy safely, the appropriate
7695 * runqueue lock must be held.
7696 */
7697 rq = task_rq_lock(p, &rf);
7698 update_rq_clock(rq);
7699
7700 /*
7701 * Changing the policy of the stop threads its a very bad idea:
7702 */
7703 if (p == rq->stop) {
7704 retval = -EINVAL;
7705 goto unlock;
7706 }
7707
7708 /*
7709 * If not changing anything there's no need to proceed further,
7710 * but store a possible modification of reset_on_fork.
7711 */
7712 if (unlikely(policy == p->policy)) {
7713 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
7714 goto change;
7715 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
7716 goto change;
7717 if (dl_policy(policy) && dl_param_changed(p, attr))
7718 goto change;
7719 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7720 goto change;
7721
7722 p->sched_reset_on_fork = reset_on_fork;
7723 retval = 0;
7724 goto unlock;
7725 }
7726 change:
7727
7728 if (user) {
7729 #ifdef CONFIG_RT_GROUP_SCHED
7730 /*
7731 * Do not allow realtime tasks into groups that have no runtime
7732 * assigned.
7733 */
7734 if (rt_bandwidth_enabled() && rt_policy(policy) &&
7735 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
7736 !task_group_is_autogroup(task_group(p))) {
7737 retval = -EPERM;
7738 goto unlock;
7739 }
7740 #endif
7741 #ifdef CONFIG_SMP
7742 if (dl_bandwidth_enabled() && dl_policy(policy) &&
7743 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
7744 cpumask_t *span = rq->rd->span;
7745
7746 /*
7747 * Don't allow tasks with an affinity mask smaller than
7748 * the entire root_domain to become SCHED_DEADLINE. We
7749 * will also fail if there's no bandwidth available.
7750 */
7751 if (!cpumask_subset(span, p->cpus_ptr) ||
7752 rq->rd->dl_bw.bw == 0) {
7753 retval = -EPERM;
7754 goto unlock;
7755 }
7756 }
7757 #endif
7758 }
7759
7760 /* Re-check policy now with rq lock held: */
7761 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
7762 policy = oldpolicy = -1;
7763 task_rq_unlock(rq, p, &rf);
7764 if (cpuset_locked)
7765 cpuset_unlock();
7766 goto recheck;
7767 }
7768
7769 /*
7770 * If setscheduling to SCHED_DEADLINE (or changing the parameters
7771 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
7772 * is available.
7773 */
7774 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
7775 retval = -EBUSY;
7776 goto unlock;
7777 }
7778
7779 p->sched_reset_on_fork = reset_on_fork;
7780 oldprio = p->prio;
7781
7782 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
7783 if (pi) {
7784 /*
7785 * Take priority boosted tasks into account. If the new
7786 * effective priority is unchanged, we just store the new
7787 * normal parameters and do not touch the scheduler class and
7788 * the runqueue. This will be done when the task deboost
7789 * itself.
7790 */
7791 newprio = rt_effective_prio(p, newprio);
7792 if (newprio == oldprio)
7793 queue_flags &= ~DEQUEUE_MOVE;
7794 }
7795
7796 queued = task_on_rq_queued(p);
7797 running = task_current(rq, p);
7798 if (queued)
7799 dequeue_task(rq, p, queue_flags);
7800 if (running)
7801 put_prev_task(rq, p);
7802
7803 prev_class = p->sched_class;
7804
7805 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
7806 __setscheduler_params(p, attr);
7807 __setscheduler_prio(p, newprio);
7808 }
7809 __setscheduler_uclamp(p, attr);
7810
7811 if (queued) {
7812 /*
7813 * We enqueue to tail when the priority of a task is
7814 * increased (user space view).
7815 */
7816 if (oldprio < p->prio)
7817 queue_flags |= ENQUEUE_HEAD;
7818
7819 enqueue_task(rq, p, queue_flags);
7820 }
7821 if (running)
7822 set_next_task(rq, p);
7823
7824 check_class_changed(rq, p, prev_class, oldprio);
7825
7826 /* Avoid rq from going away on us: */
7827 preempt_disable();
7828 head = splice_balance_callbacks(rq);
7829 task_rq_unlock(rq, p, &rf);
7830
7831 if (pi) {
7832 if (cpuset_locked)
7833 cpuset_unlock();
7834 rt_mutex_adjust_pi(p);
7835 }
7836
7837 /* Run balance callbacks after we've adjusted the PI chain: */
7838 balance_callbacks(rq, head);
7839 preempt_enable();
7840
7841 return 0;
7842
7843 unlock:
7844 task_rq_unlock(rq, p, &rf);
7845 if (cpuset_locked)
7846 cpuset_unlock();
7847 return retval;
7848 }
7849
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)7850 static int _sched_setscheduler(struct task_struct *p, int policy,
7851 const struct sched_param *param, bool check)
7852 {
7853 struct sched_attr attr = {
7854 .sched_policy = policy,
7855 .sched_priority = param->sched_priority,
7856 .sched_nice = PRIO_TO_NICE(p->static_prio),
7857 };
7858
7859 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
7860 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7861 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
7862 policy &= ~SCHED_RESET_ON_FORK;
7863 attr.sched_policy = policy;
7864 }
7865
7866 return __sched_setscheduler(p, &attr, check, true);
7867 }
7868 /**
7869 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7870 * @p: the task in question.
7871 * @policy: new policy.
7872 * @param: structure containing the new RT priority.
7873 *
7874 * Use sched_set_fifo(), read its comment.
7875 *
7876 * Return: 0 on success. An error code otherwise.
7877 *
7878 * NOTE that the task may be already dead.
7879 */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)7880 int sched_setscheduler(struct task_struct *p, int policy,
7881 const struct sched_param *param)
7882 {
7883 return _sched_setscheduler(p, policy, param, true);
7884 }
7885
sched_setattr(struct task_struct * p,const struct sched_attr * attr)7886 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
7887 {
7888 return __sched_setscheduler(p, attr, true, true);
7889 }
7890
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)7891 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
7892 {
7893 return __sched_setscheduler(p, attr, false, true);
7894 }
7895 EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
7896
7897 /**
7898 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
7899 * @p: the task in question.
7900 * @policy: new policy.
7901 * @param: structure containing the new RT priority.
7902 *
7903 * Just like sched_setscheduler, only don't bother checking if the
7904 * current context has permission. For example, this is needed in
7905 * stop_machine(): we create temporary high priority worker threads,
7906 * but our caller might not have that capability.
7907 *
7908 * Return: 0 on success. An error code otherwise.
7909 */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)7910 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
7911 const struct sched_param *param)
7912 {
7913 return _sched_setscheduler(p, policy, param, false);
7914 }
7915
7916 /*
7917 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
7918 * incapable of resource management, which is the one thing an OS really should
7919 * be doing.
7920 *
7921 * This is of course the reason it is limited to privileged users only.
7922 *
7923 * Worse still; it is fundamentally impossible to compose static priority
7924 * workloads. You cannot take two correctly working static prio workloads
7925 * and smash them together and still expect them to work.
7926 *
7927 * For this reason 'all' FIFO tasks the kernel creates are basically at:
7928 *
7929 * MAX_RT_PRIO / 2
7930 *
7931 * The administrator _MUST_ configure the system, the kernel simply doesn't
7932 * know enough information to make a sensible choice.
7933 */
sched_set_fifo(struct task_struct * p)7934 void sched_set_fifo(struct task_struct *p)
7935 {
7936 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
7937 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7938 }
7939 EXPORT_SYMBOL_GPL(sched_set_fifo);
7940
7941 /*
7942 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
7943 */
sched_set_fifo_low(struct task_struct * p)7944 void sched_set_fifo_low(struct task_struct *p)
7945 {
7946 struct sched_param sp = { .sched_priority = 1 };
7947 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7948 }
7949 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
7950
sched_set_normal(struct task_struct * p,int nice)7951 void sched_set_normal(struct task_struct *p, int nice)
7952 {
7953 struct sched_attr attr = {
7954 .sched_policy = SCHED_NORMAL,
7955 .sched_nice = nice,
7956 };
7957 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
7958 }
7959 EXPORT_SYMBOL_GPL(sched_set_normal);
7960
7961 static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)7962 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
7963 {
7964 struct sched_param lparam;
7965 struct task_struct *p;
7966 int retval;
7967
7968 if (!param || pid < 0)
7969 return -EINVAL;
7970 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
7971 return -EFAULT;
7972
7973 rcu_read_lock();
7974 retval = -ESRCH;
7975 p = find_process_by_pid(pid);
7976 if (likely(p))
7977 get_task_struct(p);
7978 rcu_read_unlock();
7979
7980 if (likely(p)) {
7981 retval = sched_setscheduler(p, policy, &lparam);
7982 put_task_struct(p);
7983 }
7984
7985 return retval;
7986 }
7987
7988 /*
7989 * Mimics kernel/events/core.c perf_copy_attr().
7990 */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)7991 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
7992 {
7993 u32 size;
7994 int ret;
7995
7996 /* Zero the full structure, so that a short copy will be nice: */
7997 memset(attr, 0, sizeof(*attr));
7998
7999 ret = get_user(size, &uattr->size);
8000 if (ret)
8001 return ret;
8002
8003 /* ABI compatibility quirk: */
8004 if (!size)
8005 size = SCHED_ATTR_SIZE_VER0;
8006 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
8007 goto err_size;
8008
8009 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
8010 if (ret) {
8011 if (ret == -E2BIG)
8012 goto err_size;
8013 return ret;
8014 }
8015
8016 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
8017 size < SCHED_ATTR_SIZE_VER1)
8018 return -EINVAL;
8019
8020 /*
8021 * XXX: Do we want to be lenient like existing syscalls; or do we want
8022 * to be strict and return an error on out-of-bounds values?
8023 */
8024 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
8025
8026 return 0;
8027
8028 err_size:
8029 put_user(sizeof(*attr), &uattr->size);
8030 return -E2BIG;
8031 }
8032
get_params(struct task_struct * p,struct sched_attr * attr)8033 static void get_params(struct task_struct *p, struct sched_attr *attr)
8034 {
8035 if (task_has_dl_policy(p))
8036 __getparam_dl(p, attr);
8037 else if (task_has_rt_policy(p))
8038 attr->sched_priority = p->rt_priority;
8039 else
8040 attr->sched_nice = task_nice(p);
8041 }
8042
8043 /**
8044 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
8045 * @pid: the pid in question.
8046 * @policy: new policy.
8047 * @param: structure containing the new RT priority.
8048 *
8049 * Return: 0 on success. An error code otherwise.
8050 */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)8051 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
8052 {
8053 if (policy < 0)
8054 return -EINVAL;
8055
8056 return do_sched_setscheduler(pid, policy, param);
8057 }
8058
8059 /**
8060 * sys_sched_setparam - set/change the RT priority of a thread
8061 * @pid: the pid in question.
8062 * @param: structure containing the new RT priority.
8063 *
8064 * Return: 0 on success. An error code otherwise.
8065 */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)8066 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
8067 {
8068 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
8069 }
8070
8071 /**
8072 * sys_sched_setattr - same as above, but with extended sched_attr
8073 * @pid: the pid in question.
8074 * @uattr: structure containing the extended parameters.
8075 * @flags: for future extension.
8076 */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)8077 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
8078 unsigned int, flags)
8079 {
8080 struct sched_attr attr;
8081 struct task_struct *p;
8082 int retval;
8083
8084 if (!uattr || pid < 0 || flags)
8085 return -EINVAL;
8086
8087 retval = sched_copy_attr(uattr, &attr);
8088 if (retval)
8089 return retval;
8090
8091 if ((int)attr.sched_policy < 0)
8092 return -EINVAL;
8093 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
8094 attr.sched_policy = SETPARAM_POLICY;
8095
8096 rcu_read_lock();
8097 retval = -ESRCH;
8098 p = find_process_by_pid(pid);
8099 if (likely(p))
8100 get_task_struct(p);
8101 rcu_read_unlock();
8102
8103 if (likely(p)) {
8104 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
8105 get_params(p, &attr);
8106 retval = sched_setattr(p, &attr);
8107 put_task_struct(p);
8108 }
8109
8110 return retval;
8111 }
8112
8113 /**
8114 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
8115 * @pid: the pid in question.
8116 *
8117 * Return: On success, the policy of the thread. Otherwise, a negative error
8118 * code.
8119 */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)8120 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
8121 {
8122 struct task_struct *p;
8123 int retval;
8124
8125 if (pid < 0)
8126 return -EINVAL;
8127
8128 retval = -ESRCH;
8129 rcu_read_lock();
8130 p = find_process_by_pid(pid);
8131 if (p) {
8132 retval = security_task_getscheduler(p);
8133 if (!retval)
8134 retval = p->policy
8135 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
8136 }
8137 rcu_read_unlock();
8138 return retval;
8139 }
8140
8141 /**
8142 * sys_sched_getparam - get the RT priority of a thread
8143 * @pid: the pid in question.
8144 * @param: structure containing the RT priority.
8145 *
8146 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
8147 * code.
8148 */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)8149 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
8150 {
8151 struct sched_param lp = { .sched_priority = 0 };
8152 struct task_struct *p;
8153 int retval;
8154
8155 if (!param || pid < 0)
8156 return -EINVAL;
8157
8158 rcu_read_lock();
8159 p = find_process_by_pid(pid);
8160 retval = -ESRCH;
8161 if (!p)
8162 goto out_unlock;
8163
8164 retval = security_task_getscheduler(p);
8165 if (retval)
8166 goto out_unlock;
8167
8168 if (task_has_rt_policy(p))
8169 lp.sched_priority = p->rt_priority;
8170 rcu_read_unlock();
8171
8172 /*
8173 * This one might sleep, we cannot do it with a spinlock held ...
8174 */
8175 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
8176
8177 return retval;
8178
8179 out_unlock:
8180 rcu_read_unlock();
8181 return retval;
8182 }
8183
8184 /*
8185 * Copy the kernel size attribute structure (which might be larger
8186 * than what user-space knows about) to user-space.
8187 *
8188 * Note that all cases are valid: user-space buffer can be larger or
8189 * smaller than the kernel-space buffer. The usual case is that both
8190 * have the same size.
8191 */
8192 static int
sched_attr_copy_to_user(struct sched_attr __user * uattr,struct sched_attr * kattr,unsigned int usize)8193 sched_attr_copy_to_user(struct sched_attr __user *uattr,
8194 struct sched_attr *kattr,
8195 unsigned int usize)
8196 {
8197 unsigned int ksize = sizeof(*kattr);
8198
8199 if (!access_ok(uattr, usize))
8200 return -EFAULT;
8201
8202 /*
8203 * sched_getattr() ABI forwards and backwards compatibility:
8204 *
8205 * If usize == ksize then we just copy everything to user-space and all is good.
8206 *
8207 * If usize < ksize then we only copy as much as user-space has space for,
8208 * this keeps ABI compatibility as well. We skip the rest.
8209 *
8210 * If usize > ksize then user-space is using a newer version of the ABI,
8211 * which part the kernel doesn't know about. Just ignore it - tooling can
8212 * detect the kernel's knowledge of attributes from the attr->size value
8213 * which is set to ksize in this case.
8214 */
8215 kattr->size = min(usize, ksize);
8216
8217 if (copy_to_user(uattr, kattr, kattr->size))
8218 return -EFAULT;
8219
8220 return 0;
8221 }
8222
8223 /**
8224 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8225 * @pid: the pid in question.
8226 * @uattr: structure containing the extended parameters.
8227 * @usize: sizeof(attr) for fwd/bwd comp.
8228 * @flags: for future extension.
8229 */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)8230 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
8231 unsigned int, usize, unsigned int, flags)
8232 {
8233 struct sched_attr kattr = { };
8234 struct task_struct *p;
8235 int retval;
8236
8237 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
8238 usize < SCHED_ATTR_SIZE_VER0 || flags)
8239 return -EINVAL;
8240
8241 rcu_read_lock();
8242 p = find_process_by_pid(pid);
8243 retval = -ESRCH;
8244 if (!p)
8245 goto out_unlock;
8246
8247 retval = security_task_getscheduler(p);
8248 if (retval)
8249 goto out_unlock;
8250
8251 kattr.sched_policy = p->policy;
8252 if (p->sched_reset_on_fork)
8253 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
8254 get_params(p, &kattr);
8255 kattr.sched_flags &= SCHED_FLAG_ALL;
8256
8257 #ifdef CONFIG_UCLAMP_TASK
8258 /*
8259 * This could race with another potential updater, but this is fine
8260 * because it'll correctly read the old or the new value. We don't need
8261 * to guarantee who wins the race as long as it doesn't return garbage.
8262 */
8263 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
8264 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
8265 #endif
8266
8267 rcu_read_unlock();
8268
8269 return sched_attr_copy_to_user(uattr, &kattr, usize);
8270
8271 out_unlock:
8272 rcu_read_unlock();
8273 return retval;
8274 }
8275
8276 #ifdef CONFIG_SMP
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)8277 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
8278 {
8279 int ret = 0;
8280
8281 /*
8282 * If the task isn't a deadline task or admission control is
8283 * disabled then we don't care about affinity changes.
8284 */
8285 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
8286 return 0;
8287
8288 /*
8289 * Since bandwidth control happens on root_domain basis,
8290 * if admission test is enabled, we only admit -deadline
8291 * tasks allowed to run on all the CPUs in the task's
8292 * root_domain.
8293 */
8294 rcu_read_lock();
8295 if (!cpumask_subset(task_rq(p)->rd->span, mask))
8296 ret = -EBUSY;
8297 rcu_read_unlock();
8298 return ret;
8299 }
8300 #endif
8301
8302 static int
__sched_setaffinity(struct task_struct * p,struct affinity_context * ctx)8303 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
8304 {
8305 int retval;
8306 cpumask_var_t cpus_allowed, new_mask;
8307
8308 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
8309 return -ENOMEM;
8310
8311 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
8312 retval = -ENOMEM;
8313 goto out_free_cpus_allowed;
8314 }
8315
8316 cpuset_cpus_allowed(p, cpus_allowed);
8317 cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
8318
8319 ctx->new_mask = new_mask;
8320 ctx->flags |= SCA_CHECK;
8321
8322 retval = dl_task_check_affinity(p, new_mask);
8323 if (retval)
8324 goto out_free_new_mask;
8325
8326 retval = __set_cpus_allowed_ptr(p, ctx);
8327 if (retval)
8328 goto out_free_new_mask;
8329
8330 cpuset_cpus_allowed(p, cpus_allowed);
8331 if (!cpumask_subset(new_mask, cpus_allowed)) {
8332 /*
8333 * We must have raced with a concurrent cpuset update.
8334 * Just reset the cpumask to the cpuset's cpus_allowed.
8335 */
8336 cpumask_copy(new_mask, cpus_allowed);
8337
8338 /*
8339 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
8340 * will restore the previous user_cpus_ptr value.
8341 *
8342 * In the unlikely event a previous user_cpus_ptr exists,
8343 * we need to further restrict the mask to what is allowed
8344 * by that old user_cpus_ptr.
8345 */
8346 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
8347 bool empty = !cpumask_and(new_mask, new_mask,
8348 ctx->user_mask);
8349
8350 if (WARN_ON_ONCE(empty))
8351 cpumask_copy(new_mask, cpus_allowed);
8352 }
8353 __set_cpus_allowed_ptr(p, ctx);
8354 retval = -EINVAL;
8355 }
8356
8357 out_free_new_mask:
8358 free_cpumask_var(new_mask);
8359 out_free_cpus_allowed:
8360 free_cpumask_var(cpus_allowed);
8361 return retval;
8362 }
8363
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)8364 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
8365 {
8366 struct affinity_context ac;
8367 struct cpumask *user_mask;
8368 struct task_struct *p;
8369 int retval;
8370
8371 rcu_read_lock();
8372
8373 p = find_process_by_pid(pid);
8374 if (!p) {
8375 rcu_read_unlock();
8376 return -ESRCH;
8377 }
8378
8379 /* Prevent p going away */
8380 get_task_struct(p);
8381 rcu_read_unlock();
8382
8383 if (p->flags & PF_NO_SETAFFINITY) {
8384 retval = -EINVAL;
8385 goto out_put_task;
8386 }
8387
8388 if (!check_same_owner(p)) {
8389 rcu_read_lock();
8390 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
8391 rcu_read_unlock();
8392 retval = -EPERM;
8393 goto out_put_task;
8394 }
8395 rcu_read_unlock();
8396 }
8397
8398 retval = security_task_setscheduler(p);
8399 if (retval)
8400 goto out_put_task;
8401
8402 /*
8403 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
8404 * alloc_user_cpus_ptr() returns NULL.
8405 */
8406 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
8407 if (user_mask) {
8408 cpumask_copy(user_mask, in_mask);
8409 } else if (IS_ENABLED(CONFIG_SMP)) {
8410 retval = -ENOMEM;
8411 goto out_put_task;
8412 }
8413
8414 ac = (struct affinity_context){
8415 .new_mask = in_mask,
8416 .user_mask = user_mask,
8417 .flags = SCA_USER,
8418 };
8419
8420 retval = __sched_setaffinity(p, &ac);
8421 kfree(ac.user_mask);
8422
8423 out_put_task:
8424 put_task_struct(p);
8425 return retval;
8426 }
8427
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)8428 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
8429 struct cpumask *new_mask)
8430 {
8431 if (len < cpumask_size())
8432 cpumask_clear(new_mask);
8433 else if (len > cpumask_size())
8434 len = cpumask_size();
8435
8436 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
8437 }
8438
8439 /**
8440 * sys_sched_setaffinity - set the CPU affinity of a process
8441 * @pid: pid of the process
8442 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8443 * @user_mask_ptr: user-space pointer to the new CPU mask
8444 *
8445 * Return: 0 on success. An error code otherwise.
8446 */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)8447 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
8448 unsigned long __user *, user_mask_ptr)
8449 {
8450 cpumask_var_t new_mask;
8451 int retval;
8452
8453 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
8454 return -ENOMEM;
8455
8456 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
8457 if (retval == 0)
8458 retval = sched_setaffinity(pid, new_mask);
8459 free_cpumask_var(new_mask);
8460 return retval;
8461 }
8462
sched_getaffinity(pid_t pid,struct cpumask * mask)8463 long sched_getaffinity(pid_t pid, struct cpumask *mask)
8464 {
8465 struct task_struct *p;
8466 unsigned long flags;
8467 int retval;
8468
8469 rcu_read_lock();
8470
8471 retval = -ESRCH;
8472 p = find_process_by_pid(pid);
8473 if (!p)
8474 goto out_unlock;
8475
8476 retval = security_task_getscheduler(p);
8477 if (retval)
8478 goto out_unlock;
8479
8480 raw_spin_lock_irqsave(&p->pi_lock, flags);
8481 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
8482 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
8483
8484 out_unlock:
8485 rcu_read_unlock();
8486
8487 return retval;
8488 }
8489
8490 /**
8491 * sys_sched_getaffinity - get the CPU affinity of a process
8492 * @pid: pid of the process
8493 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8494 * @user_mask_ptr: user-space pointer to hold the current CPU mask
8495 *
8496 * Return: size of CPU mask copied to user_mask_ptr on success. An
8497 * error code otherwise.
8498 */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)8499 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
8500 unsigned long __user *, user_mask_ptr)
8501 {
8502 int ret;
8503 cpumask_var_t mask;
8504
8505 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
8506 return -EINVAL;
8507 if (len & (sizeof(unsigned long)-1))
8508 return -EINVAL;
8509
8510 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
8511 return -ENOMEM;
8512
8513 ret = sched_getaffinity(pid, mask);
8514 if (ret == 0) {
8515 unsigned int retlen = min(len, cpumask_size());
8516
8517 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
8518 ret = -EFAULT;
8519 else
8520 ret = retlen;
8521 }
8522 free_cpumask_var(mask);
8523
8524 return ret;
8525 }
8526
do_sched_yield(void)8527 static void do_sched_yield(void)
8528 {
8529 struct rq_flags rf;
8530 struct rq *rq;
8531
8532 rq = this_rq_lock_irq(&rf);
8533
8534 schedstat_inc(rq->yld_count);
8535 current->sched_class->yield_task(rq);
8536
8537 preempt_disable();
8538 rq_unlock_irq(rq, &rf);
8539 sched_preempt_enable_no_resched();
8540
8541 schedule();
8542 }
8543
8544 /**
8545 * sys_sched_yield - yield the current processor to other threads.
8546 *
8547 * This function yields the current CPU to other tasks. If there are no
8548 * other threads running on this CPU then this function will return.
8549 *
8550 * Return: 0.
8551 */
SYSCALL_DEFINE0(sched_yield)8552 SYSCALL_DEFINE0(sched_yield)
8553 {
8554 do_sched_yield();
8555 return 0;
8556 }
8557
8558 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)8559 int __sched __cond_resched(void)
8560 {
8561 if (should_resched(0)) {
8562 preempt_schedule_common();
8563 return 1;
8564 }
8565 /*
8566 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
8567 * whether the current CPU is in an RCU read-side critical section,
8568 * so the tick can report quiescent states even for CPUs looping
8569 * in kernel context. In contrast, in non-preemptible kernels,
8570 * RCU readers leave no in-memory hints, which means that CPU-bound
8571 * processes executing in kernel context might never report an
8572 * RCU quiescent state. Therefore, the following code causes
8573 * cond_resched() to report a quiescent state, but only when RCU
8574 * is in urgent need of one.
8575 */
8576 #ifndef CONFIG_PREEMPT_RCU
8577 rcu_all_qs();
8578 #endif
8579 return 0;
8580 }
8581 EXPORT_SYMBOL(__cond_resched);
8582 #endif
8583
8584 #ifdef CONFIG_PREEMPT_DYNAMIC
8585 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8586 #define cond_resched_dynamic_enabled __cond_resched
8587 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
8588 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
8589 EXPORT_STATIC_CALL_TRAMP(cond_resched);
8590
8591 #define might_resched_dynamic_enabled __cond_resched
8592 #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
8593 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
8594 EXPORT_STATIC_CALL_TRAMP(might_resched);
8595 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8596 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)8597 int __sched dynamic_cond_resched(void)
8598 {
8599 klp_sched_try_switch();
8600 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
8601 return 0;
8602 return __cond_resched();
8603 }
8604 EXPORT_SYMBOL(dynamic_cond_resched);
8605
8606 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)8607 int __sched dynamic_might_resched(void)
8608 {
8609 if (!static_branch_unlikely(&sk_dynamic_might_resched))
8610 return 0;
8611 return __cond_resched();
8612 }
8613 EXPORT_SYMBOL(dynamic_might_resched);
8614 #endif
8615 #endif
8616
8617 /*
8618 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8619 * call schedule, and on return reacquire the lock.
8620 *
8621 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8622 * operations here to prevent schedule() from being called twice (once via
8623 * spin_unlock(), once by hand).
8624 */
__cond_resched_lock(spinlock_t * lock)8625 int __cond_resched_lock(spinlock_t *lock)
8626 {
8627 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8628 int ret = 0;
8629
8630 lockdep_assert_held(lock);
8631
8632 if (spin_needbreak(lock) || resched) {
8633 spin_unlock(lock);
8634 if (!_cond_resched())
8635 cpu_relax();
8636 ret = 1;
8637 spin_lock(lock);
8638 }
8639 return ret;
8640 }
8641 EXPORT_SYMBOL(__cond_resched_lock);
8642
__cond_resched_rwlock_read(rwlock_t * lock)8643 int __cond_resched_rwlock_read(rwlock_t *lock)
8644 {
8645 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8646 int ret = 0;
8647
8648 lockdep_assert_held_read(lock);
8649
8650 if (rwlock_needbreak(lock) || resched) {
8651 read_unlock(lock);
8652 if (!_cond_resched())
8653 cpu_relax();
8654 ret = 1;
8655 read_lock(lock);
8656 }
8657 return ret;
8658 }
8659 EXPORT_SYMBOL(__cond_resched_rwlock_read);
8660
__cond_resched_rwlock_write(rwlock_t * lock)8661 int __cond_resched_rwlock_write(rwlock_t *lock)
8662 {
8663 int resched = should_resched(PREEMPT_LOCK_OFFSET);
8664 int ret = 0;
8665
8666 lockdep_assert_held_write(lock);
8667
8668 if (rwlock_needbreak(lock) || resched) {
8669 write_unlock(lock);
8670 if (!_cond_resched())
8671 cpu_relax();
8672 ret = 1;
8673 write_lock(lock);
8674 }
8675 return ret;
8676 }
8677 EXPORT_SYMBOL(__cond_resched_rwlock_write);
8678
8679 #ifdef CONFIG_PREEMPT_DYNAMIC
8680
8681 #ifdef CONFIG_GENERIC_ENTRY
8682 #include <linux/entry-common.h>
8683 #endif
8684
8685 /*
8686 * SC:cond_resched
8687 * SC:might_resched
8688 * SC:preempt_schedule
8689 * SC:preempt_schedule_notrace
8690 * SC:irqentry_exit_cond_resched
8691 *
8692 *
8693 * NONE:
8694 * cond_resched <- __cond_resched
8695 * might_resched <- RET0
8696 * preempt_schedule <- NOP
8697 * preempt_schedule_notrace <- NOP
8698 * irqentry_exit_cond_resched <- NOP
8699 *
8700 * VOLUNTARY:
8701 * cond_resched <- __cond_resched
8702 * might_resched <- __cond_resched
8703 * preempt_schedule <- NOP
8704 * preempt_schedule_notrace <- NOP
8705 * irqentry_exit_cond_resched <- NOP
8706 *
8707 * FULL:
8708 * cond_resched <- RET0
8709 * might_resched <- RET0
8710 * preempt_schedule <- preempt_schedule
8711 * preempt_schedule_notrace <- preempt_schedule_notrace
8712 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
8713 */
8714
8715 enum {
8716 preempt_dynamic_undefined = -1,
8717 preempt_dynamic_none,
8718 preempt_dynamic_voluntary,
8719 preempt_dynamic_full,
8720 };
8721
8722 int preempt_dynamic_mode = preempt_dynamic_undefined;
8723
sched_dynamic_mode(const char * str)8724 int sched_dynamic_mode(const char *str)
8725 {
8726 if (!strcmp(str, "none"))
8727 return preempt_dynamic_none;
8728
8729 if (!strcmp(str, "voluntary"))
8730 return preempt_dynamic_voluntary;
8731
8732 if (!strcmp(str, "full"))
8733 return preempt_dynamic_full;
8734
8735 return -EINVAL;
8736 }
8737
8738 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8739 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
8740 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
8741 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8742 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
8743 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
8744 #else
8745 #error "Unsupported PREEMPT_DYNAMIC mechanism"
8746 #endif
8747
8748 static DEFINE_MUTEX(sched_dynamic_mutex);
8749 static bool klp_override;
8750
__sched_dynamic_update(int mode)8751 static void __sched_dynamic_update(int mode)
8752 {
8753 /*
8754 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
8755 * the ZERO state, which is invalid.
8756 */
8757 if (!klp_override)
8758 preempt_dynamic_enable(cond_resched);
8759 preempt_dynamic_enable(might_resched);
8760 preempt_dynamic_enable(preempt_schedule);
8761 preempt_dynamic_enable(preempt_schedule_notrace);
8762 preempt_dynamic_enable(irqentry_exit_cond_resched);
8763
8764 switch (mode) {
8765 case preempt_dynamic_none:
8766 if (!klp_override)
8767 preempt_dynamic_enable(cond_resched);
8768 preempt_dynamic_disable(might_resched);
8769 preempt_dynamic_disable(preempt_schedule);
8770 preempt_dynamic_disable(preempt_schedule_notrace);
8771 preempt_dynamic_disable(irqentry_exit_cond_resched);
8772 if (mode != preempt_dynamic_mode)
8773 pr_info("Dynamic Preempt: none\n");
8774 break;
8775
8776 case preempt_dynamic_voluntary:
8777 if (!klp_override)
8778 preempt_dynamic_enable(cond_resched);
8779 preempt_dynamic_enable(might_resched);
8780 preempt_dynamic_disable(preempt_schedule);
8781 preempt_dynamic_disable(preempt_schedule_notrace);
8782 preempt_dynamic_disable(irqentry_exit_cond_resched);
8783 if (mode != preempt_dynamic_mode)
8784 pr_info("Dynamic Preempt: voluntary\n");
8785 break;
8786
8787 case preempt_dynamic_full:
8788 if (!klp_override)
8789 preempt_dynamic_disable(cond_resched);
8790 preempt_dynamic_disable(might_resched);
8791 preempt_dynamic_enable(preempt_schedule);
8792 preempt_dynamic_enable(preempt_schedule_notrace);
8793 preempt_dynamic_enable(irqentry_exit_cond_resched);
8794 if (mode != preempt_dynamic_mode)
8795 pr_info("Dynamic Preempt: full\n");
8796 break;
8797 }
8798
8799 preempt_dynamic_mode = mode;
8800 }
8801
sched_dynamic_update(int mode)8802 void sched_dynamic_update(int mode)
8803 {
8804 mutex_lock(&sched_dynamic_mutex);
8805 __sched_dynamic_update(mode);
8806 mutex_unlock(&sched_dynamic_mutex);
8807 }
8808
8809 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
8810
klp_cond_resched(void)8811 static int klp_cond_resched(void)
8812 {
8813 __klp_sched_try_switch();
8814 return __cond_resched();
8815 }
8816
sched_dynamic_klp_enable(void)8817 void sched_dynamic_klp_enable(void)
8818 {
8819 mutex_lock(&sched_dynamic_mutex);
8820
8821 klp_override = true;
8822 static_call_update(cond_resched, klp_cond_resched);
8823
8824 mutex_unlock(&sched_dynamic_mutex);
8825 }
8826
sched_dynamic_klp_disable(void)8827 void sched_dynamic_klp_disable(void)
8828 {
8829 mutex_lock(&sched_dynamic_mutex);
8830
8831 klp_override = false;
8832 __sched_dynamic_update(preempt_dynamic_mode);
8833
8834 mutex_unlock(&sched_dynamic_mutex);
8835 }
8836
8837 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
8838
setup_preempt_mode(char * str)8839 static int __init setup_preempt_mode(char *str)
8840 {
8841 int mode = sched_dynamic_mode(str);
8842 if (mode < 0) {
8843 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
8844 return 0;
8845 }
8846
8847 sched_dynamic_update(mode);
8848 return 1;
8849 }
8850 __setup("preempt=", setup_preempt_mode);
8851
preempt_dynamic_init(void)8852 static void __init preempt_dynamic_init(void)
8853 {
8854 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
8855 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
8856 sched_dynamic_update(preempt_dynamic_none);
8857 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
8858 sched_dynamic_update(preempt_dynamic_voluntary);
8859 } else {
8860 /* Default static call setting, nothing to do */
8861 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
8862 preempt_dynamic_mode = preempt_dynamic_full;
8863 pr_info("Dynamic Preempt: full\n");
8864 }
8865 }
8866 }
8867
8868 #define PREEMPT_MODEL_ACCESSOR(mode) \
8869 bool preempt_model_##mode(void) \
8870 { \
8871 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8872 return preempt_dynamic_mode == preempt_dynamic_##mode; \
8873 } \
8874 EXPORT_SYMBOL_GPL(preempt_model_##mode)
8875
8876 PREEMPT_MODEL_ACCESSOR(none);
8877 PREEMPT_MODEL_ACCESSOR(voluntary);
8878 PREEMPT_MODEL_ACCESSOR(full);
8879
8880 #else /* !CONFIG_PREEMPT_DYNAMIC */
8881
preempt_dynamic_init(void)8882 static inline void preempt_dynamic_init(void) { }
8883
8884 #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
8885
8886 /**
8887 * yield - yield the current processor to other threads.
8888 *
8889 * Do not ever use this function, there's a 99% chance you're doing it wrong.
8890 *
8891 * The scheduler is at all times free to pick the calling task as the most
8892 * eligible task to run, if removing the yield() call from your code breaks
8893 * it, it's already broken.
8894 *
8895 * Typical broken usage is:
8896 *
8897 * while (!event)
8898 * yield();
8899 *
8900 * where one assumes that yield() will let 'the other' process run that will
8901 * make event true. If the current task is a SCHED_FIFO task that will never
8902 * happen. Never use yield() as a progress guarantee!!
8903 *
8904 * If you want to use yield() to wait for something, use wait_event().
8905 * If you want to use yield() to be 'nice' for others, use cond_resched().
8906 * If you still want to use yield(), do not!
8907 */
yield(void)8908 void __sched yield(void)
8909 {
8910 set_current_state(TASK_RUNNING);
8911 do_sched_yield();
8912 }
8913 EXPORT_SYMBOL(yield);
8914
8915 /**
8916 * yield_to - yield the current processor to another thread in
8917 * your thread group, or accelerate that thread toward the
8918 * processor it's on.
8919 * @p: target task
8920 * @preempt: whether task preemption is allowed or not
8921 *
8922 * It's the caller's job to ensure that the target task struct
8923 * can't go away on us before we can do any checks.
8924 *
8925 * Return:
8926 * true (>0) if we indeed boosted the target task.
8927 * false (0) if we failed to boost the target.
8928 * -ESRCH if there's no task to yield to.
8929 */
yield_to(struct task_struct * p,bool preempt)8930 int __sched yield_to(struct task_struct *p, bool preempt)
8931 {
8932 struct task_struct *curr = current;
8933 struct rq *rq, *p_rq;
8934 unsigned long flags;
8935 int yielded = 0;
8936
8937 local_irq_save(flags);
8938 rq = this_rq();
8939
8940 again:
8941 p_rq = task_rq(p);
8942 /*
8943 * If we're the only runnable task on the rq and target rq also
8944 * has only one task, there's absolutely no point in yielding.
8945 */
8946 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
8947 yielded = -ESRCH;
8948 goto out_irq;
8949 }
8950
8951 double_rq_lock(rq, p_rq);
8952 if (task_rq(p) != p_rq) {
8953 double_rq_unlock(rq, p_rq);
8954 goto again;
8955 }
8956
8957 if (!curr->sched_class->yield_to_task)
8958 goto out_unlock;
8959
8960 if (curr->sched_class != p->sched_class)
8961 goto out_unlock;
8962
8963 if (task_on_cpu(p_rq, p) || !task_is_running(p))
8964 goto out_unlock;
8965
8966 yielded = curr->sched_class->yield_to_task(rq, p);
8967 if (yielded) {
8968 schedstat_inc(rq->yld_count);
8969 /*
8970 * Make p's CPU reschedule; pick_next_entity takes care of
8971 * fairness.
8972 */
8973 if (preempt && rq != p_rq)
8974 resched_curr(p_rq);
8975 }
8976
8977 out_unlock:
8978 double_rq_unlock(rq, p_rq);
8979 out_irq:
8980 local_irq_restore(flags);
8981
8982 if (yielded > 0)
8983 schedule();
8984
8985 return yielded;
8986 }
8987 EXPORT_SYMBOL_GPL(yield_to);
8988
io_schedule_prepare(void)8989 int io_schedule_prepare(void)
8990 {
8991 int old_iowait = current->in_iowait;
8992
8993 current->in_iowait = 1;
8994 blk_flush_plug(current->plug, true);
8995 return old_iowait;
8996 }
8997
io_schedule_finish(int token)8998 void io_schedule_finish(int token)
8999 {
9000 current->in_iowait = token;
9001 }
9002
9003 /*
9004 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
9005 * that process accounting knows that this is a task in IO wait state.
9006 */
io_schedule_timeout(long timeout)9007 long __sched io_schedule_timeout(long timeout)
9008 {
9009 int token;
9010 long ret;
9011
9012 token = io_schedule_prepare();
9013 ret = schedule_timeout(timeout);
9014 io_schedule_finish(token);
9015
9016 return ret;
9017 }
9018 EXPORT_SYMBOL(io_schedule_timeout);
9019
io_schedule(void)9020 void __sched io_schedule(void)
9021 {
9022 int token;
9023
9024 token = io_schedule_prepare();
9025 schedule();
9026 io_schedule_finish(token);
9027 }
9028 EXPORT_SYMBOL(io_schedule);
9029
9030 /**
9031 * sys_sched_get_priority_max - return maximum RT priority.
9032 * @policy: scheduling class.
9033 *
9034 * Return: On success, this syscall returns the maximum
9035 * rt_priority that can be used by a given scheduling class.
9036 * On failure, a negative error code is returned.
9037 */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)9038 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
9039 {
9040 int ret = -EINVAL;
9041
9042 switch (policy) {
9043 case SCHED_FIFO:
9044 case SCHED_RR:
9045 ret = MAX_RT_PRIO-1;
9046 break;
9047 case SCHED_DEADLINE:
9048 case SCHED_NORMAL:
9049 case SCHED_BATCH:
9050 case SCHED_IDLE:
9051 ret = 0;
9052 break;
9053 }
9054 return ret;
9055 }
9056
9057 /**
9058 * sys_sched_get_priority_min - return minimum RT priority.
9059 * @policy: scheduling class.
9060 *
9061 * Return: On success, this syscall returns the minimum
9062 * rt_priority that can be used by a given scheduling class.
9063 * On failure, a negative error code is returned.
9064 */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)9065 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
9066 {
9067 int ret = -EINVAL;
9068
9069 switch (policy) {
9070 case SCHED_FIFO:
9071 case SCHED_RR:
9072 ret = 1;
9073 break;
9074 case SCHED_DEADLINE:
9075 case SCHED_NORMAL:
9076 case SCHED_BATCH:
9077 case SCHED_IDLE:
9078 ret = 0;
9079 }
9080 return ret;
9081 }
9082
sched_rr_get_interval(pid_t pid,struct timespec64 * t)9083 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
9084 {
9085 struct task_struct *p;
9086 unsigned int time_slice;
9087 struct rq_flags rf;
9088 struct rq *rq;
9089 int retval;
9090
9091 if (pid < 0)
9092 return -EINVAL;
9093
9094 retval = -ESRCH;
9095 rcu_read_lock();
9096 p = find_process_by_pid(pid);
9097 if (!p)
9098 goto out_unlock;
9099
9100 retval = security_task_getscheduler(p);
9101 if (retval)
9102 goto out_unlock;
9103
9104 rq = task_rq_lock(p, &rf);
9105 time_slice = 0;
9106 if (p->sched_class->get_rr_interval)
9107 time_slice = p->sched_class->get_rr_interval(rq, p);
9108 task_rq_unlock(rq, p, &rf);
9109
9110 rcu_read_unlock();
9111 jiffies_to_timespec64(time_slice, t);
9112 return 0;
9113
9114 out_unlock:
9115 rcu_read_unlock();
9116 return retval;
9117 }
9118
9119 /**
9120 * sys_sched_rr_get_interval - return the default timeslice of a process.
9121 * @pid: pid of the process.
9122 * @interval: userspace pointer to the timeslice value.
9123 *
9124 * this syscall writes the default timeslice value of a given process
9125 * into the user-space timespec buffer. A value of '0' means infinity.
9126 *
9127 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
9128 * an error code.
9129 */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)9130 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
9131 struct __kernel_timespec __user *, interval)
9132 {
9133 struct timespec64 t;
9134 int retval = sched_rr_get_interval(pid, &t);
9135
9136 if (retval == 0)
9137 retval = put_timespec64(&t, interval);
9138
9139 return retval;
9140 }
9141
9142 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)9143 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
9144 struct old_timespec32 __user *, interval)
9145 {
9146 struct timespec64 t;
9147 int retval = sched_rr_get_interval(pid, &t);
9148
9149 if (retval == 0)
9150 retval = put_old_timespec32(&t, interval);
9151 return retval;
9152 }
9153 #endif
9154
sched_show_task(struct task_struct * p)9155 void sched_show_task(struct task_struct *p)
9156 {
9157 unsigned long free = 0;
9158 int ppid;
9159
9160 if (!try_get_task_stack(p))
9161 return;
9162
9163 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
9164
9165 if (task_is_running(p))
9166 pr_cont(" running task ");
9167 #ifdef CONFIG_DEBUG_STACK_USAGE
9168 free = stack_not_used(p);
9169 #endif
9170 ppid = 0;
9171 rcu_read_lock();
9172 if (pid_alive(p))
9173 ppid = task_pid_nr(rcu_dereference(p->real_parent));
9174 rcu_read_unlock();
9175 pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
9176 free, task_pid_nr(p), ppid,
9177 read_task_thread_flags(p));
9178
9179 print_worker_info(KERN_INFO, p);
9180 print_stop_info(KERN_INFO, p);
9181 show_stack(p, NULL, KERN_INFO);
9182 put_task_stack(p);
9183 }
9184 EXPORT_SYMBOL_GPL(sched_show_task);
9185
9186 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)9187 state_filter_match(unsigned long state_filter, struct task_struct *p)
9188 {
9189 unsigned int state = READ_ONCE(p->__state);
9190
9191 /* no filter, everything matches */
9192 if (!state_filter)
9193 return true;
9194
9195 /* filter, but doesn't match */
9196 if (!(state & state_filter))
9197 return false;
9198
9199 /*
9200 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
9201 * TASK_KILLABLE).
9202 */
9203 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
9204 return false;
9205
9206 return true;
9207 }
9208
9209
show_state_filter(unsigned int state_filter)9210 void show_state_filter(unsigned int state_filter)
9211 {
9212 struct task_struct *g, *p;
9213
9214 rcu_read_lock();
9215 for_each_process_thread(g, p) {
9216 /*
9217 * reset the NMI-timeout, listing all files on a slow
9218 * console might take a lot of time:
9219 * Also, reset softlockup watchdogs on all CPUs, because
9220 * another CPU might be blocked waiting for us to process
9221 * an IPI.
9222 */
9223 touch_nmi_watchdog();
9224 touch_all_softlockup_watchdogs();
9225 if (state_filter_match(state_filter, p))
9226 sched_show_task(p);
9227 }
9228
9229 #ifdef CONFIG_SCHED_DEBUG
9230 if (!state_filter)
9231 sysrq_sched_debug_show();
9232 #endif
9233 rcu_read_unlock();
9234 /*
9235 * Only show locks if all tasks are dumped:
9236 */
9237 if (!state_filter)
9238 debug_show_all_locks();
9239 }
9240
9241 /**
9242 * init_idle - set up an idle thread for a given CPU
9243 * @idle: task in question
9244 * @cpu: CPU the idle task belongs to
9245 *
9246 * NOTE: this function does not set the idle thread's NEED_RESCHED
9247 * flag, to make booting more robust.
9248 */
init_idle(struct task_struct * idle,int cpu)9249 void __init init_idle(struct task_struct *idle, int cpu)
9250 {
9251 #ifdef CONFIG_SMP
9252 struct affinity_context ac = (struct affinity_context) {
9253 .new_mask = cpumask_of(cpu),
9254 .flags = 0,
9255 };
9256 #endif
9257 struct rq *rq = cpu_rq(cpu);
9258 unsigned long flags;
9259
9260 __sched_fork(0, idle);
9261
9262 raw_spin_lock_irqsave(&idle->pi_lock, flags);
9263 raw_spin_rq_lock(rq);
9264
9265 idle->__state = TASK_RUNNING;
9266 idle->se.exec_start = sched_clock();
9267 /*
9268 * PF_KTHREAD should already be set at this point; regardless, make it
9269 * look like a proper per-CPU kthread.
9270 */
9271 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
9272 kthread_set_per_cpu(idle, cpu);
9273
9274 #ifdef CONFIG_SMP
9275 /*
9276 * It's possible that init_idle() gets called multiple times on a task,
9277 * in that case do_set_cpus_allowed() will not do the right thing.
9278 *
9279 * And since this is boot we can forgo the serialization.
9280 */
9281 set_cpus_allowed_common(idle, &ac);
9282 #endif
9283 /*
9284 * We're having a chicken and egg problem, even though we are
9285 * holding rq->lock, the CPU isn't yet set to this CPU so the
9286 * lockdep check in task_group() will fail.
9287 *
9288 * Similar case to sched_fork(). / Alternatively we could
9289 * use task_rq_lock() here and obtain the other rq->lock.
9290 *
9291 * Silence PROVE_RCU
9292 */
9293 rcu_read_lock();
9294 __set_task_cpu(idle, cpu);
9295 rcu_read_unlock();
9296
9297 rq->idle = idle;
9298 rcu_assign_pointer(rq->curr, idle);
9299 idle->on_rq = TASK_ON_RQ_QUEUED;
9300 #ifdef CONFIG_SMP
9301 idle->on_cpu = 1;
9302 #endif
9303 raw_spin_rq_unlock(rq);
9304 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
9305
9306 /* Set the preempt count _outside_ the spinlocks! */
9307 init_idle_preempt_count(idle, cpu);
9308
9309 /*
9310 * The idle tasks have their own, simple scheduling class:
9311 */
9312 idle->sched_class = &idle_sched_class;
9313 ftrace_graph_init_idle_task(idle, cpu);
9314 vtime_init_idle(idle, cpu);
9315 #ifdef CONFIG_SMP
9316 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
9317 #endif
9318 }
9319
9320 #ifdef CONFIG_SMP
9321
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)9322 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
9323 const struct cpumask *trial)
9324 {
9325 int ret = 1;
9326
9327 if (cpumask_empty(cur))
9328 return ret;
9329
9330 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
9331
9332 return ret;
9333 }
9334
task_can_attach(struct task_struct * p)9335 int task_can_attach(struct task_struct *p)
9336 {
9337 int ret = 0;
9338
9339 /*
9340 * Kthreads which disallow setaffinity shouldn't be moved
9341 * to a new cpuset; we don't want to change their CPU
9342 * affinity and isolating such threads by their set of
9343 * allowed nodes is unnecessary. Thus, cpusets are not
9344 * applicable for such threads. This prevents checking for
9345 * success of set_cpus_allowed_ptr() on all attached tasks
9346 * before cpus_mask may be changed.
9347 */
9348 if (p->flags & PF_NO_SETAFFINITY)
9349 ret = -EINVAL;
9350
9351 return ret;
9352 }
9353
9354 bool sched_smp_initialized __read_mostly;
9355
9356 #ifdef CONFIG_NUMA_BALANCING
9357 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)9358 int migrate_task_to(struct task_struct *p, int target_cpu)
9359 {
9360 struct migration_arg arg = { p, target_cpu };
9361 int curr_cpu = task_cpu(p);
9362
9363 if (curr_cpu == target_cpu)
9364 return 0;
9365
9366 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
9367 return -EINVAL;
9368
9369 /* TODO: This is not properly updating schedstats */
9370
9371 trace_sched_move_numa(p, curr_cpu, target_cpu);
9372 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
9373 }
9374
9375 /*
9376 * Requeue a task on a given node and accurately track the number of NUMA
9377 * tasks on the runqueues
9378 */
sched_setnuma(struct task_struct * p,int nid)9379 void sched_setnuma(struct task_struct *p, int nid)
9380 {
9381 bool queued, running;
9382 struct rq_flags rf;
9383 struct rq *rq;
9384
9385 rq = task_rq_lock(p, &rf);
9386 queued = task_on_rq_queued(p);
9387 running = task_current(rq, p);
9388
9389 if (queued)
9390 dequeue_task(rq, p, DEQUEUE_SAVE);
9391 if (running)
9392 put_prev_task(rq, p);
9393
9394 p->numa_preferred_nid = nid;
9395
9396 if (queued)
9397 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
9398 if (running)
9399 set_next_task(rq, p);
9400 task_rq_unlock(rq, p, &rf);
9401 }
9402 #endif /* CONFIG_NUMA_BALANCING */
9403
9404 #ifdef CONFIG_HOTPLUG_CPU
9405 /*
9406 * Ensure that the idle task is using init_mm right before its CPU goes
9407 * offline.
9408 */
idle_task_exit(void)9409 void idle_task_exit(void)
9410 {
9411 struct mm_struct *mm = current->active_mm;
9412
9413 BUG_ON(cpu_online(smp_processor_id()));
9414 BUG_ON(current != this_rq()->idle);
9415
9416 if (mm != &init_mm) {
9417 switch_mm(mm, &init_mm, current);
9418 finish_arch_post_lock_switch();
9419 }
9420
9421 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
9422 }
9423
__balance_push_cpu_stop(void * arg)9424 static int __balance_push_cpu_stop(void *arg)
9425 {
9426 struct task_struct *p = arg;
9427 struct rq *rq = this_rq();
9428 struct rq_flags rf;
9429 int cpu;
9430
9431 raw_spin_lock_irq(&p->pi_lock);
9432 rq_lock(rq, &rf);
9433
9434 update_rq_clock(rq);
9435
9436 if (task_rq(p) == rq && task_on_rq_queued(p)) {
9437 cpu = select_fallback_rq(rq->cpu, p);
9438 rq = __migrate_task(rq, &rf, p, cpu);
9439 }
9440
9441 rq_unlock(rq, &rf);
9442 raw_spin_unlock_irq(&p->pi_lock);
9443
9444 put_task_struct(p);
9445
9446 return 0;
9447 }
9448
9449 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
9450
9451 /*
9452 * Ensure we only run per-cpu kthreads once the CPU goes !active.
9453 *
9454 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
9455 * effective when the hotplug motion is down.
9456 */
balance_push(struct rq * rq)9457 static void balance_push(struct rq *rq)
9458 {
9459 struct task_struct *push_task = rq->curr;
9460
9461 lockdep_assert_rq_held(rq);
9462
9463 /*
9464 * Ensure the thing is persistent until balance_push_set(.on = false);
9465 */
9466 rq->balance_callback = &balance_push_callback;
9467
9468 /*
9469 * Only active while going offline and when invoked on the outgoing
9470 * CPU.
9471 */
9472 if (!cpu_dying(rq->cpu) || rq != this_rq())
9473 return;
9474
9475 /*
9476 * Both the cpu-hotplug and stop task are in this case and are
9477 * required to complete the hotplug process.
9478 */
9479 if (kthread_is_per_cpu(push_task) ||
9480 is_migration_disabled(push_task)) {
9481
9482 /*
9483 * If this is the idle task on the outgoing CPU try to wake
9484 * up the hotplug control thread which might wait for the
9485 * last task to vanish. The rcuwait_active() check is
9486 * accurate here because the waiter is pinned on this CPU
9487 * and can't obviously be running in parallel.
9488 *
9489 * On RT kernels this also has to check whether there are
9490 * pinned and scheduled out tasks on the runqueue. They
9491 * need to leave the migrate disabled section first.
9492 */
9493 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
9494 rcuwait_active(&rq->hotplug_wait)) {
9495 raw_spin_rq_unlock(rq);
9496 rcuwait_wake_up(&rq->hotplug_wait);
9497 raw_spin_rq_lock(rq);
9498 }
9499 return;
9500 }
9501
9502 get_task_struct(push_task);
9503 /*
9504 * Temporarily drop rq->lock such that we can wake-up the stop task.
9505 * Both preemption and IRQs are still disabled.
9506 */
9507 preempt_disable();
9508 raw_spin_rq_unlock(rq);
9509 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
9510 this_cpu_ptr(&push_work));
9511 preempt_enable();
9512 /*
9513 * At this point need_resched() is true and we'll take the loop in
9514 * schedule(). The next pick is obviously going to be the stop task
9515 * which kthread_is_per_cpu() and will push this task away.
9516 */
9517 raw_spin_rq_lock(rq);
9518 }
9519
balance_push_set(int cpu,bool on)9520 static void balance_push_set(int cpu, bool on)
9521 {
9522 struct rq *rq = cpu_rq(cpu);
9523 struct rq_flags rf;
9524
9525 rq_lock_irqsave(rq, &rf);
9526 if (on) {
9527 WARN_ON_ONCE(rq->balance_callback);
9528 rq->balance_callback = &balance_push_callback;
9529 } else if (rq->balance_callback == &balance_push_callback) {
9530 rq->balance_callback = NULL;
9531 }
9532 rq_unlock_irqrestore(rq, &rf);
9533 }
9534
9535 /*
9536 * Invoked from a CPUs hotplug control thread after the CPU has been marked
9537 * inactive. All tasks which are not per CPU kernel threads are either
9538 * pushed off this CPU now via balance_push() or placed on a different CPU
9539 * during wakeup. Wait until the CPU is quiescent.
9540 */
balance_hotplug_wait(void)9541 static void balance_hotplug_wait(void)
9542 {
9543 struct rq *rq = this_rq();
9544
9545 rcuwait_wait_event(&rq->hotplug_wait,
9546 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
9547 TASK_UNINTERRUPTIBLE);
9548 }
9549
9550 #else
9551
balance_push(struct rq * rq)9552 static inline void balance_push(struct rq *rq)
9553 {
9554 }
9555
balance_push_set(int cpu,bool on)9556 static inline void balance_push_set(int cpu, bool on)
9557 {
9558 }
9559
balance_hotplug_wait(void)9560 static inline void balance_hotplug_wait(void)
9561 {
9562 }
9563
9564 #endif /* CONFIG_HOTPLUG_CPU */
9565
set_rq_online(struct rq * rq)9566 void set_rq_online(struct rq *rq)
9567 {
9568 if (!rq->online) {
9569 const struct sched_class *class;
9570
9571 cpumask_set_cpu(rq->cpu, rq->rd->online);
9572 rq->online = 1;
9573
9574 for_each_class(class) {
9575 if (class->rq_online)
9576 class->rq_online(rq);
9577 }
9578 }
9579 }
9580
set_rq_offline(struct rq * rq)9581 void set_rq_offline(struct rq *rq)
9582 {
9583 if (rq->online) {
9584 const struct sched_class *class;
9585
9586 update_rq_clock(rq);
9587 for_each_class(class) {
9588 if (class->rq_offline)
9589 class->rq_offline(rq);
9590 }
9591
9592 cpumask_clear_cpu(rq->cpu, rq->rd->online);
9593 rq->online = 0;
9594 }
9595 }
9596
sched_set_rq_online(struct rq * rq,int cpu)9597 static inline void sched_set_rq_online(struct rq *rq, int cpu)
9598 {
9599 struct rq_flags rf;
9600
9601 rq_lock_irqsave(rq, &rf);
9602 if (rq->rd) {
9603 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9604 set_rq_online(rq);
9605 }
9606 rq_unlock_irqrestore(rq, &rf);
9607 }
9608
sched_set_rq_offline(struct rq * rq,int cpu)9609 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
9610 {
9611 struct rq_flags rf;
9612
9613 rq_lock_irqsave(rq, &rf);
9614 if (rq->rd) {
9615 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9616 set_rq_offline(rq);
9617 }
9618 rq_unlock_irqrestore(rq, &rf);
9619 }
9620
9621 /*
9622 * used to mark begin/end of suspend/resume:
9623 */
9624 static int num_cpus_frozen;
9625
9626 /*
9627 * Update cpusets according to cpu_active mask. If cpusets are
9628 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
9629 * around partition_sched_domains().
9630 *
9631 * If we come here as part of a suspend/resume, don't touch cpusets because we
9632 * want to restore it back to its original state upon resume anyway.
9633 */
cpuset_cpu_active(void)9634 static void cpuset_cpu_active(void)
9635 {
9636 if (cpuhp_tasks_frozen) {
9637 /*
9638 * num_cpus_frozen tracks how many CPUs are involved in suspend
9639 * resume sequence. As long as this is not the last online
9640 * operation in the resume sequence, just build a single sched
9641 * domain, ignoring cpusets.
9642 */
9643 partition_sched_domains(1, NULL, NULL);
9644 if (--num_cpus_frozen)
9645 return;
9646 /*
9647 * This is the last CPU online operation. So fall through and
9648 * restore the original sched domains by considering the
9649 * cpuset configurations.
9650 */
9651 cpuset_force_rebuild();
9652 }
9653 cpuset_update_active_cpus();
9654 }
9655
cpuset_cpu_inactive(unsigned int cpu)9656 static int cpuset_cpu_inactive(unsigned int cpu)
9657 {
9658 if (!cpuhp_tasks_frozen) {
9659 int ret = dl_bw_check_overflow(cpu);
9660
9661 if (ret)
9662 return ret;
9663 cpuset_update_active_cpus();
9664 } else {
9665 num_cpus_frozen++;
9666 partition_sched_domains(1, NULL, NULL);
9667 }
9668 return 0;
9669 }
9670
sched_smt_present_inc(int cpu)9671 static inline void sched_smt_present_inc(int cpu)
9672 {
9673 #ifdef CONFIG_SCHED_SMT
9674 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9675 static_branch_inc_cpuslocked(&sched_smt_present);
9676 #endif
9677 }
9678
sched_smt_present_dec(int cpu)9679 static inline void sched_smt_present_dec(int cpu)
9680 {
9681 #ifdef CONFIG_SCHED_SMT
9682 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9683 static_branch_dec_cpuslocked(&sched_smt_present);
9684 #endif
9685 }
9686
sched_cpu_activate(unsigned int cpu)9687 int sched_cpu_activate(unsigned int cpu)
9688 {
9689 struct rq *rq = cpu_rq(cpu);
9690
9691 /*
9692 * Clear the balance_push callback and prepare to schedule
9693 * regular tasks.
9694 */
9695 balance_push_set(cpu, false);
9696
9697 /*
9698 * When going up, increment the number of cores with SMT present.
9699 */
9700 sched_smt_present_inc(cpu);
9701 set_cpu_active(cpu, true);
9702
9703 if (sched_smp_initialized) {
9704 sched_update_numa(cpu, true);
9705 sched_domains_numa_masks_set(cpu);
9706 cpuset_cpu_active();
9707 }
9708
9709 /*
9710 * Put the rq online, if not already. This happens:
9711 *
9712 * 1) In the early boot process, because we build the real domains
9713 * after all CPUs have been brought up.
9714 *
9715 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
9716 * domains.
9717 */
9718 sched_set_rq_online(rq, cpu);
9719
9720 return 0;
9721 }
9722
sched_cpu_deactivate(unsigned int cpu)9723 int sched_cpu_deactivate(unsigned int cpu)
9724 {
9725 struct rq *rq = cpu_rq(cpu);
9726 int ret;
9727
9728 /*
9729 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
9730 * load balancing when not active
9731 */
9732 nohz_balance_exit_idle(rq);
9733
9734 set_cpu_active(cpu, false);
9735
9736 /*
9737 * From this point forward, this CPU will refuse to run any task that
9738 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
9739 * push those tasks away until this gets cleared, see
9740 * sched_cpu_dying().
9741 */
9742 balance_push_set(cpu, true);
9743
9744 /*
9745 * We've cleared cpu_active_mask / set balance_push, wait for all
9746 * preempt-disabled and RCU users of this state to go away such that
9747 * all new such users will observe it.
9748 *
9749 * Specifically, we rely on ttwu to no longer target this CPU, see
9750 * ttwu_queue_cond() and is_cpu_allowed().
9751 *
9752 * Do sync before park smpboot threads to take care the rcu boost case.
9753 */
9754 synchronize_rcu();
9755
9756 sched_set_rq_offline(rq, cpu);
9757
9758 /*
9759 * When going down, decrement the number of cores with SMT present.
9760 */
9761 sched_smt_present_dec(cpu);
9762
9763 #ifdef CONFIG_SCHED_SMT
9764 sched_core_cpu_deactivate(cpu);
9765 #endif
9766
9767 if (!sched_smp_initialized)
9768 return 0;
9769
9770 sched_update_numa(cpu, false);
9771 ret = cpuset_cpu_inactive(cpu);
9772 if (ret) {
9773 sched_smt_present_inc(cpu);
9774 sched_set_rq_online(rq, cpu);
9775 balance_push_set(cpu, false);
9776 set_cpu_active(cpu, true);
9777 sched_update_numa(cpu, true);
9778 return ret;
9779 }
9780 sched_domains_numa_masks_clear(cpu);
9781 return 0;
9782 }
9783
sched_rq_cpu_starting(unsigned int cpu)9784 static void sched_rq_cpu_starting(unsigned int cpu)
9785 {
9786 struct rq *rq = cpu_rq(cpu);
9787
9788 rq->calc_load_update = calc_load_update;
9789 update_max_interval();
9790 }
9791
sched_cpu_starting(unsigned int cpu)9792 int sched_cpu_starting(unsigned int cpu)
9793 {
9794 sched_core_cpu_starting(cpu);
9795 sched_rq_cpu_starting(cpu);
9796 sched_tick_start(cpu);
9797 return 0;
9798 }
9799
9800 #ifdef CONFIG_HOTPLUG_CPU
9801
9802 /*
9803 * Invoked immediately before the stopper thread is invoked to bring the
9804 * CPU down completely. At this point all per CPU kthreads except the
9805 * hotplug thread (current) and the stopper thread (inactive) have been
9806 * either parked or have been unbound from the outgoing CPU. Ensure that
9807 * any of those which might be on the way out are gone.
9808 *
9809 * If after this point a bound task is being woken on this CPU then the
9810 * responsible hotplug callback has failed to do it's job.
9811 * sched_cpu_dying() will catch it with the appropriate fireworks.
9812 */
sched_cpu_wait_empty(unsigned int cpu)9813 int sched_cpu_wait_empty(unsigned int cpu)
9814 {
9815 balance_hotplug_wait();
9816 return 0;
9817 }
9818
9819 /*
9820 * Since this CPU is going 'away' for a while, fold any nr_active delta we
9821 * might have. Called from the CPU stopper task after ensuring that the
9822 * stopper is the last running task on the CPU, so nr_active count is
9823 * stable. We need to take the teardown thread which is calling this into
9824 * account, so we hand in adjust = 1 to the load calculation.
9825 *
9826 * Also see the comment "Global load-average calculations".
9827 */
calc_load_migrate(struct rq * rq)9828 static void calc_load_migrate(struct rq *rq)
9829 {
9830 long delta = calc_load_fold_active(rq, 1);
9831
9832 if (delta)
9833 atomic_long_add(delta, &calc_load_tasks);
9834 }
9835
dump_rq_tasks(struct rq * rq,const char * loglvl)9836 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
9837 {
9838 struct task_struct *g, *p;
9839 int cpu = cpu_of(rq);
9840
9841 lockdep_assert_rq_held(rq);
9842
9843 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
9844 for_each_process_thread(g, p) {
9845 if (task_cpu(p) != cpu)
9846 continue;
9847
9848 if (!task_on_rq_queued(p))
9849 continue;
9850
9851 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
9852 }
9853 }
9854
sched_cpu_dying(unsigned int cpu)9855 int sched_cpu_dying(unsigned int cpu)
9856 {
9857 struct rq *rq = cpu_rq(cpu);
9858 struct rq_flags rf;
9859
9860 /* Handle pending wakeups and then migrate everything off */
9861 sched_tick_stop(cpu);
9862
9863 rq_lock_irqsave(rq, &rf);
9864 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
9865 WARN(true, "Dying CPU not properly vacated!");
9866 dump_rq_tasks(rq, KERN_WARNING);
9867 }
9868 rq_unlock_irqrestore(rq, &rf);
9869
9870 calc_load_migrate(rq);
9871 update_max_interval();
9872 hrtick_clear(rq);
9873 sched_core_cpu_dying(cpu);
9874 return 0;
9875 }
9876 #endif
9877
sched_init_smp(void)9878 void __init sched_init_smp(void)
9879 {
9880 sched_init_numa(NUMA_NO_NODE);
9881
9882 /*
9883 * There's no userspace yet to cause hotplug operations; hence all the
9884 * CPU masks are stable and all blatant races in the below code cannot
9885 * happen.
9886 */
9887 mutex_lock(&sched_domains_mutex);
9888 sched_init_domains(cpu_active_mask);
9889 mutex_unlock(&sched_domains_mutex);
9890
9891 /* Move init over to a non-isolated CPU */
9892 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
9893 BUG();
9894 current->flags &= ~PF_NO_SETAFFINITY;
9895 sched_init_granularity();
9896
9897 init_sched_rt_class();
9898 init_sched_dl_class();
9899
9900 sched_smp_initialized = true;
9901 }
9902
migration_init(void)9903 static int __init migration_init(void)
9904 {
9905 sched_cpu_starting(smp_processor_id());
9906 return 0;
9907 }
9908 early_initcall(migration_init);
9909
9910 #else
sched_init_smp(void)9911 void __init sched_init_smp(void)
9912 {
9913 sched_init_granularity();
9914 }
9915 #endif /* CONFIG_SMP */
9916
in_sched_functions(unsigned long addr)9917 int in_sched_functions(unsigned long addr)
9918 {
9919 return in_lock_functions(addr) ||
9920 (addr >= (unsigned long)__sched_text_start
9921 && addr < (unsigned long)__sched_text_end);
9922 }
9923
9924 #ifdef CONFIG_CGROUP_SCHED
9925 /*
9926 * Default task group.
9927 * Every task in system belongs to this group at bootup.
9928 */
9929 struct task_group root_task_group;
9930 LIST_HEAD(task_groups);
9931
9932 /* Cacheline aligned slab cache for task_group */
9933 static struct kmem_cache *task_group_cache __read_mostly;
9934 #endif
9935
sched_init(void)9936 void __init sched_init(void)
9937 {
9938 unsigned long ptr = 0;
9939 int i;
9940
9941 /* Make sure the linker didn't screw up */
9942 BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
9943 &fair_sched_class != &rt_sched_class + 1 ||
9944 &rt_sched_class != &dl_sched_class + 1);
9945 #ifdef CONFIG_SMP
9946 BUG_ON(&dl_sched_class != &stop_sched_class + 1);
9947 #endif
9948
9949 wait_bit_init();
9950
9951 #ifdef CONFIG_FAIR_GROUP_SCHED
9952 ptr += 2 * nr_cpu_ids * sizeof(void **);
9953 #endif
9954 #ifdef CONFIG_RT_GROUP_SCHED
9955 ptr += 2 * nr_cpu_ids * sizeof(void **);
9956 #endif
9957 if (ptr) {
9958 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
9959
9960 #ifdef CONFIG_FAIR_GROUP_SCHED
9961 root_task_group.se = (struct sched_entity **)ptr;
9962 ptr += nr_cpu_ids * sizeof(void **);
9963
9964 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9965 ptr += nr_cpu_ids * sizeof(void **);
9966
9967 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
9968 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
9969 #endif /* CONFIG_FAIR_GROUP_SCHED */
9970 #ifdef CONFIG_RT_GROUP_SCHED
9971 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9972 ptr += nr_cpu_ids * sizeof(void **);
9973
9974 root_task_group.rt_rq = (struct rt_rq **)ptr;
9975 ptr += nr_cpu_ids * sizeof(void **);
9976
9977 #endif /* CONFIG_RT_GROUP_SCHED */
9978 }
9979
9980 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9981
9982 #ifdef CONFIG_SMP
9983 init_defrootdomain();
9984 #endif
9985
9986 #ifdef CONFIG_RT_GROUP_SCHED
9987 init_rt_bandwidth(&root_task_group.rt_bandwidth,
9988 global_rt_period(), global_rt_runtime());
9989 #endif /* CONFIG_RT_GROUP_SCHED */
9990
9991 #ifdef CONFIG_CGROUP_SCHED
9992 task_group_cache = KMEM_CACHE(task_group, 0);
9993
9994 list_add(&root_task_group.list, &task_groups);
9995 INIT_LIST_HEAD(&root_task_group.children);
9996 INIT_LIST_HEAD(&root_task_group.siblings);
9997 autogroup_init(&init_task);
9998 #endif /* CONFIG_CGROUP_SCHED */
9999
10000 for_each_possible_cpu(i) {
10001 struct rq *rq;
10002
10003 rq = cpu_rq(i);
10004 raw_spin_lock_init(&rq->__lock);
10005 rq->nr_running = 0;
10006 rq->calc_load_active = 0;
10007 rq->calc_load_update = jiffies + LOAD_FREQ;
10008 init_cfs_rq(&rq->cfs);
10009 init_rt_rq(&rq->rt);
10010 init_dl_rq(&rq->dl);
10011 #ifdef CONFIG_FAIR_GROUP_SCHED
10012 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
10013 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
10014 /*
10015 * How much CPU bandwidth does root_task_group get?
10016 *
10017 * In case of task-groups formed thr' the cgroup filesystem, it
10018 * gets 100% of the CPU resources in the system. This overall
10019 * system CPU resource is divided among the tasks of
10020 * root_task_group and its child task-groups in a fair manner,
10021 * based on each entity's (task or task-group's) weight
10022 * (se->load.weight).
10023 *
10024 * In other words, if root_task_group has 10 tasks of weight
10025 * 1024) and two child groups A0 and A1 (of weight 1024 each),
10026 * then A0's share of the CPU resource is:
10027 *
10028 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
10029 *
10030 * We achieve this by letting root_task_group's tasks sit
10031 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
10032 */
10033 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
10034 #endif /* CONFIG_FAIR_GROUP_SCHED */
10035
10036 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
10037 #ifdef CONFIG_RT_GROUP_SCHED
10038 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
10039 #endif
10040 #ifdef CONFIG_SMP
10041 rq->sd = NULL;
10042 rq->rd = NULL;
10043 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
10044 rq->balance_callback = &balance_push_callback;
10045 rq->active_balance = 0;
10046 rq->next_balance = jiffies;
10047 rq->push_cpu = 0;
10048 rq->cpu = i;
10049 rq->online = 0;
10050 rq->idle_stamp = 0;
10051 rq->avg_idle = 2*sysctl_sched_migration_cost;
10052 rq->wake_stamp = jiffies;
10053 rq->wake_avg_idle = rq->avg_idle;
10054 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
10055
10056 INIT_LIST_HEAD(&rq->cfs_tasks);
10057
10058 rq_attach_root(rq, &def_root_domain);
10059 #ifdef CONFIG_NO_HZ_COMMON
10060 rq->last_blocked_load_update_tick = jiffies;
10061 atomic_set(&rq->nohz_flags, 0);
10062
10063 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
10064 #endif
10065 #ifdef CONFIG_HOTPLUG_CPU
10066 rcuwait_init(&rq->hotplug_wait);
10067 #endif
10068 #endif /* CONFIG_SMP */
10069 hrtick_rq_init(rq);
10070 atomic_set(&rq->nr_iowait, 0);
10071
10072 #ifdef CONFIG_SCHED_CORE
10073 rq->core = rq;
10074 rq->core_pick = NULL;
10075 rq->core_enabled = 0;
10076 rq->core_tree = RB_ROOT;
10077 rq->core_forceidle_count = 0;
10078 rq->core_forceidle_occupation = 0;
10079 rq->core_forceidle_start = 0;
10080
10081 rq->core_cookie = 0UL;
10082 #endif
10083 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
10084 }
10085
10086 set_load_weight(&init_task, false);
10087
10088 /*
10089 * The boot idle thread does lazy MMU switching as well:
10090 */
10091 mmgrab_lazy_tlb(&init_mm);
10092 enter_lazy_tlb(&init_mm, current);
10093
10094 /*
10095 * The idle task doesn't need the kthread struct to function, but it
10096 * is dressed up as a per-CPU kthread and thus needs to play the part
10097 * if we want to avoid special-casing it in code that deals with per-CPU
10098 * kthreads.
10099 */
10100 WARN_ON(!set_kthread_struct(current));
10101
10102 /*
10103 * Make us the idle thread. Technically, schedule() should not be
10104 * called from this thread, however somewhere below it might be,
10105 * but because we are the idle thread, we just pick up running again
10106 * when this runqueue becomes "idle".
10107 */
10108 init_idle(current, smp_processor_id());
10109
10110 calc_load_update = jiffies + LOAD_FREQ;
10111
10112 #ifdef CONFIG_SMP
10113 idle_thread_set_boot_cpu();
10114 balance_push_set(smp_processor_id(), false);
10115 #endif
10116 init_sched_fair_class();
10117
10118 psi_init();
10119
10120 init_uclamp();
10121
10122 preempt_dynamic_init();
10123
10124 scheduler_running = 1;
10125 }
10126
10127 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
10128
__might_sleep(const char * file,int line)10129 void __might_sleep(const char *file, int line)
10130 {
10131 unsigned int state = get_current_state();
10132 /*
10133 * Blocking primitives will set (and therefore destroy) current->state,
10134 * since we will exit with TASK_RUNNING make sure we enter with it,
10135 * otherwise we will destroy state.
10136 */
10137 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
10138 "do not call blocking ops when !TASK_RUNNING; "
10139 "state=%x set at [<%p>] %pS\n", state,
10140 (void *)current->task_state_change,
10141 (void *)current->task_state_change);
10142
10143 __might_resched(file, line, 0);
10144 }
10145 EXPORT_SYMBOL(__might_sleep);
10146
print_preempt_disable_ip(int preempt_offset,unsigned long ip)10147 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
10148 {
10149 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
10150 return;
10151
10152 if (preempt_count() == preempt_offset)
10153 return;
10154
10155 pr_err("Preemption disabled at:");
10156 print_ip_sym(KERN_ERR, ip);
10157 }
10158
resched_offsets_ok(unsigned int offsets)10159 static inline bool resched_offsets_ok(unsigned int offsets)
10160 {
10161 unsigned int nested = preempt_count();
10162
10163 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
10164
10165 return nested == offsets;
10166 }
10167
__might_resched(const char * file,int line,unsigned int offsets)10168 void __might_resched(const char *file, int line, unsigned int offsets)
10169 {
10170 /* Ratelimiting timestamp: */
10171 static unsigned long prev_jiffy;
10172
10173 unsigned long preempt_disable_ip;
10174
10175 /* WARN_ON_ONCE() by default, no rate limit required: */
10176 rcu_sleep_check();
10177
10178 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
10179 !is_idle_task(current) && !current->non_block_count) ||
10180 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
10181 oops_in_progress)
10182 return;
10183
10184 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10185 return;
10186 prev_jiffy = jiffies;
10187
10188 /* Save this before calling printk(), since that will clobber it: */
10189 preempt_disable_ip = get_preempt_disable_ip(current);
10190
10191 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
10192 file, line);
10193 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
10194 in_atomic(), irqs_disabled(), current->non_block_count,
10195 current->pid, current->comm);
10196 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
10197 offsets & MIGHT_RESCHED_PREEMPT_MASK);
10198
10199 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
10200 pr_err("RCU nest depth: %d, expected: %u\n",
10201 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
10202 }
10203
10204 if (task_stack_end_corrupted(current))
10205 pr_emerg("Thread overran stack, or stack corrupted\n");
10206
10207 debug_show_held_locks(current);
10208 if (irqs_disabled())
10209 print_irqtrace_events(current);
10210
10211 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
10212 preempt_disable_ip);
10213
10214 dump_stack();
10215 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10216 }
10217 EXPORT_SYMBOL(__might_resched);
10218
__cant_sleep(const char * file,int line,int preempt_offset)10219 void __cant_sleep(const char *file, int line, int preempt_offset)
10220 {
10221 static unsigned long prev_jiffy;
10222
10223 if (irqs_disabled())
10224 return;
10225
10226 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10227 return;
10228
10229 if (preempt_count() > preempt_offset)
10230 return;
10231
10232 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10233 return;
10234 prev_jiffy = jiffies;
10235
10236 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
10237 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
10238 in_atomic(), irqs_disabled(),
10239 current->pid, current->comm);
10240
10241 debug_show_held_locks(current);
10242 dump_stack();
10243 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10244 }
10245 EXPORT_SYMBOL_GPL(__cant_sleep);
10246
10247 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)10248 void __cant_migrate(const char *file, int line)
10249 {
10250 static unsigned long prev_jiffy;
10251
10252 if (irqs_disabled())
10253 return;
10254
10255 if (is_migration_disabled(current))
10256 return;
10257
10258 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10259 return;
10260
10261 if (preempt_count() > 0)
10262 return;
10263
10264 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10265 return;
10266 prev_jiffy = jiffies;
10267
10268 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
10269 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
10270 in_atomic(), irqs_disabled(), is_migration_disabled(current),
10271 current->pid, current->comm);
10272
10273 debug_show_held_locks(current);
10274 dump_stack();
10275 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10276 }
10277 EXPORT_SYMBOL_GPL(__cant_migrate);
10278 #endif
10279 #endif
10280
10281 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)10282 void normalize_rt_tasks(void)
10283 {
10284 struct task_struct *g, *p;
10285 struct sched_attr attr = {
10286 .sched_policy = SCHED_NORMAL,
10287 };
10288
10289 read_lock(&tasklist_lock);
10290 for_each_process_thread(g, p) {
10291 /*
10292 * Only normalize user tasks:
10293 */
10294 if (p->flags & PF_KTHREAD)
10295 continue;
10296
10297 p->se.exec_start = 0;
10298 schedstat_set(p->stats.wait_start, 0);
10299 schedstat_set(p->stats.sleep_start, 0);
10300 schedstat_set(p->stats.block_start, 0);
10301
10302 if (!dl_task(p) && !rt_task(p)) {
10303 /*
10304 * Renice negative nice level userspace
10305 * tasks back to 0:
10306 */
10307 if (task_nice(p) < 0)
10308 set_user_nice(p, 0);
10309 continue;
10310 }
10311
10312 __sched_setscheduler(p, &attr, false, false);
10313 }
10314 read_unlock(&tasklist_lock);
10315 }
10316
10317 #endif /* CONFIG_MAGIC_SYSRQ */
10318
10319 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
10320 /*
10321 * These functions are only useful for the IA64 MCA handling, or kdb.
10322 *
10323 * They can only be called when the whole system has been
10324 * stopped - every CPU needs to be quiescent, and no scheduling
10325 * activity can take place. Using them for anything else would
10326 * be a serious bug, and as a result, they aren't even visible
10327 * under any other configuration.
10328 */
10329
10330 /**
10331 * curr_task - return the current task for a given CPU.
10332 * @cpu: the processor in question.
10333 *
10334 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10335 *
10336 * Return: The current task for @cpu.
10337 */
curr_task(int cpu)10338 struct task_struct *curr_task(int cpu)
10339 {
10340 return cpu_curr(cpu);
10341 }
10342
10343 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
10344
10345 #ifdef CONFIG_IA64
10346 /**
10347 * ia64_set_curr_task - set the current task for a given CPU.
10348 * @cpu: the processor in question.
10349 * @p: the task pointer to set.
10350 *
10351 * Description: This function must only be used when non-maskable interrupts
10352 * are serviced on a separate stack. It allows the architecture to switch the
10353 * notion of the current task on a CPU in a non-blocking manner. This function
10354 * must be called with all CPU's synchronized, and interrupts disabled, the
10355 * and caller must save the original value of the current task (see
10356 * curr_task() above) and restore that value before reenabling interrupts and
10357 * re-starting the system.
10358 *
10359 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10360 */
ia64_set_curr_task(int cpu,struct task_struct * p)10361 void ia64_set_curr_task(int cpu, struct task_struct *p)
10362 {
10363 cpu_curr(cpu) = p;
10364 }
10365
10366 #endif
10367
10368 #ifdef CONFIG_CGROUP_SCHED
10369 /* task_group_lock serializes the addition/removal of task groups */
10370 static DEFINE_SPINLOCK(task_group_lock);
10371
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)10372 static inline void alloc_uclamp_sched_group(struct task_group *tg,
10373 struct task_group *parent)
10374 {
10375 #ifdef CONFIG_UCLAMP_TASK_GROUP
10376 enum uclamp_id clamp_id;
10377
10378 for_each_clamp_id(clamp_id) {
10379 uclamp_se_set(&tg->uclamp_req[clamp_id],
10380 uclamp_none(clamp_id), false);
10381 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
10382 }
10383 #endif
10384 }
10385
sched_free_group(struct task_group * tg)10386 static void sched_free_group(struct task_group *tg)
10387 {
10388 free_fair_sched_group(tg);
10389 free_rt_sched_group(tg);
10390 autogroup_free(tg);
10391 kmem_cache_free(task_group_cache, tg);
10392 }
10393
sched_free_group_rcu(struct rcu_head * rcu)10394 static void sched_free_group_rcu(struct rcu_head *rcu)
10395 {
10396 sched_free_group(container_of(rcu, struct task_group, rcu));
10397 }
10398
sched_unregister_group(struct task_group * tg)10399 static void sched_unregister_group(struct task_group *tg)
10400 {
10401 unregister_fair_sched_group(tg);
10402 unregister_rt_sched_group(tg);
10403 /*
10404 * We have to wait for yet another RCU grace period to expire, as
10405 * print_cfs_stats() might run concurrently.
10406 */
10407 call_rcu(&tg->rcu, sched_free_group_rcu);
10408 }
10409
10410 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)10411 struct task_group *sched_create_group(struct task_group *parent)
10412 {
10413 struct task_group *tg;
10414
10415 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
10416 if (!tg)
10417 return ERR_PTR(-ENOMEM);
10418
10419 if (!alloc_fair_sched_group(tg, parent))
10420 goto err;
10421
10422 if (!alloc_rt_sched_group(tg, parent))
10423 goto err;
10424
10425 alloc_uclamp_sched_group(tg, parent);
10426
10427 return tg;
10428
10429 err:
10430 sched_free_group(tg);
10431 return ERR_PTR(-ENOMEM);
10432 }
10433
sched_online_group(struct task_group * tg,struct task_group * parent)10434 void sched_online_group(struct task_group *tg, struct task_group *parent)
10435 {
10436 unsigned long flags;
10437
10438 spin_lock_irqsave(&task_group_lock, flags);
10439 list_add_rcu(&tg->list, &task_groups);
10440
10441 /* Root should already exist: */
10442 WARN_ON(!parent);
10443
10444 tg->parent = parent;
10445 INIT_LIST_HEAD(&tg->children);
10446 list_add_rcu(&tg->siblings, &parent->children);
10447 spin_unlock_irqrestore(&task_group_lock, flags);
10448
10449 online_fair_sched_group(tg);
10450 }
10451
10452 /* rcu callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)10453 static void sched_unregister_group_rcu(struct rcu_head *rhp)
10454 {
10455 /* Now it should be safe to free those cfs_rqs: */
10456 sched_unregister_group(container_of(rhp, struct task_group, rcu));
10457 }
10458
sched_destroy_group(struct task_group * tg)10459 void sched_destroy_group(struct task_group *tg)
10460 {
10461 /* Wait for possible concurrent references to cfs_rqs complete: */
10462 call_rcu(&tg->rcu, sched_unregister_group_rcu);
10463 }
10464
sched_release_group(struct task_group * tg)10465 void sched_release_group(struct task_group *tg)
10466 {
10467 unsigned long flags;
10468
10469 /*
10470 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
10471 * sched_cfs_period_timer()).
10472 *
10473 * For this to be effective, we have to wait for all pending users of
10474 * this task group to leave their RCU critical section to ensure no new
10475 * user will see our dying task group any more. Specifically ensure
10476 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10477 *
10478 * We therefore defer calling unregister_fair_sched_group() to
10479 * sched_unregister_group() which is guarantied to get called only after the
10480 * current RCU grace period has expired.
10481 */
10482 spin_lock_irqsave(&task_group_lock, flags);
10483 list_del_rcu(&tg->list);
10484 list_del_rcu(&tg->siblings);
10485 spin_unlock_irqrestore(&task_group_lock, flags);
10486 }
10487
sched_get_task_group(struct task_struct * tsk)10488 static struct task_group *sched_get_task_group(struct task_struct *tsk)
10489 {
10490 struct task_group *tg;
10491
10492 /*
10493 * All callers are synchronized by task_rq_lock(); we do not use RCU
10494 * which is pointless here. Thus, we pass "true" to task_css_check()
10495 * to prevent lockdep warnings.
10496 */
10497 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
10498 struct task_group, css);
10499 tg = autogroup_task_group(tsk, tg);
10500
10501 return tg;
10502 }
10503
sched_change_group(struct task_struct * tsk,struct task_group * group)10504 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
10505 {
10506 tsk->sched_task_group = group;
10507
10508 #ifdef CONFIG_FAIR_GROUP_SCHED
10509 if (tsk->sched_class->task_change_group)
10510 tsk->sched_class->task_change_group(tsk);
10511 else
10512 #endif
10513 set_task_rq(tsk, task_cpu(tsk));
10514 }
10515
10516 /*
10517 * Change task's runqueue when it moves between groups.
10518 *
10519 * The caller of this function should have put the task in its new group by
10520 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10521 * its new group.
10522 */
sched_move_task(struct task_struct * tsk)10523 void sched_move_task(struct task_struct *tsk)
10524 {
10525 int queued, running, queue_flags =
10526 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
10527 struct task_group *group;
10528 struct rq_flags rf;
10529 struct rq *rq;
10530
10531 rq = task_rq_lock(tsk, &rf);
10532 /*
10533 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
10534 * group changes.
10535 */
10536 group = sched_get_task_group(tsk);
10537 if (group == tsk->sched_task_group)
10538 goto unlock;
10539
10540 update_rq_clock(rq);
10541
10542 running = task_current(rq, tsk);
10543 queued = task_on_rq_queued(tsk);
10544
10545 if (queued)
10546 dequeue_task(rq, tsk, queue_flags);
10547 if (running)
10548 put_prev_task(rq, tsk);
10549
10550 sched_change_group(tsk, group);
10551
10552 if (queued)
10553 enqueue_task(rq, tsk, queue_flags);
10554 if (running) {
10555 set_next_task(rq, tsk);
10556 /*
10557 * After changing group, the running task may have joined a
10558 * throttled one but it's still the running task. Trigger a
10559 * resched to make sure that task can still run.
10560 */
10561 resched_curr(rq);
10562 }
10563
10564 unlock:
10565 task_rq_unlock(rq, tsk, &rf);
10566 }
10567
css_tg(struct cgroup_subsys_state * css)10568 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
10569 {
10570 return css ? container_of(css, struct task_group, css) : NULL;
10571 }
10572
10573 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)10574 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10575 {
10576 struct task_group *parent = css_tg(parent_css);
10577 struct task_group *tg;
10578
10579 if (!parent) {
10580 /* This is early initialization for the top cgroup */
10581 return &root_task_group.css;
10582 }
10583
10584 tg = sched_create_group(parent);
10585 if (IS_ERR(tg))
10586 return ERR_PTR(-ENOMEM);
10587
10588 return &tg->css;
10589 }
10590
10591 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)10592 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
10593 {
10594 struct task_group *tg = css_tg(css);
10595 struct task_group *parent = css_tg(css->parent);
10596
10597 if (parent)
10598 sched_online_group(tg, parent);
10599
10600 #ifdef CONFIG_UCLAMP_TASK_GROUP
10601 /* Propagate the effective uclamp value for the new group */
10602 mutex_lock(&uclamp_mutex);
10603 rcu_read_lock();
10604 cpu_util_update_eff(css);
10605 rcu_read_unlock();
10606 mutex_unlock(&uclamp_mutex);
10607 #endif
10608
10609 return 0;
10610 }
10611
cpu_cgroup_css_released(struct cgroup_subsys_state * css)10612 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
10613 {
10614 struct task_group *tg = css_tg(css);
10615
10616 sched_release_group(tg);
10617 }
10618
cpu_cgroup_css_free(struct cgroup_subsys_state * css)10619 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
10620 {
10621 struct task_group *tg = css_tg(css);
10622
10623 /*
10624 * Relies on the RCU grace period between css_released() and this.
10625 */
10626 sched_unregister_group(tg);
10627 }
10628
10629 #ifdef CONFIG_RT_GROUP_SCHED
cpu_cgroup_can_attach(struct cgroup_taskset * tset)10630 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
10631 {
10632 struct task_struct *task;
10633 struct cgroup_subsys_state *css;
10634
10635 cgroup_taskset_for_each(task, css, tset) {
10636 if (!sched_rt_can_attach(css_tg(css), task))
10637 return -EINVAL;
10638 }
10639 return 0;
10640 }
10641 #endif
10642
cpu_cgroup_attach(struct cgroup_taskset * tset)10643 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
10644 {
10645 struct task_struct *task;
10646 struct cgroup_subsys_state *css;
10647
10648 cgroup_taskset_for_each(task, css, tset)
10649 sched_move_task(task);
10650 }
10651
10652 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)10653 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
10654 {
10655 struct cgroup_subsys_state *top_css = css;
10656 struct uclamp_se *uc_parent = NULL;
10657 struct uclamp_se *uc_se = NULL;
10658 unsigned int eff[UCLAMP_CNT];
10659 enum uclamp_id clamp_id;
10660 unsigned int clamps;
10661
10662 lockdep_assert_held(&uclamp_mutex);
10663 SCHED_WARN_ON(!rcu_read_lock_held());
10664
10665 css_for_each_descendant_pre(css, top_css) {
10666 uc_parent = css_tg(css)->parent
10667 ? css_tg(css)->parent->uclamp : NULL;
10668
10669 for_each_clamp_id(clamp_id) {
10670 /* Assume effective clamps matches requested clamps */
10671 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
10672 /* Cap effective clamps with parent's effective clamps */
10673 if (uc_parent &&
10674 eff[clamp_id] > uc_parent[clamp_id].value) {
10675 eff[clamp_id] = uc_parent[clamp_id].value;
10676 }
10677 }
10678 /* Ensure protection is always capped by limit */
10679 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
10680
10681 /* Propagate most restrictive effective clamps */
10682 clamps = 0x0;
10683 uc_se = css_tg(css)->uclamp;
10684 for_each_clamp_id(clamp_id) {
10685 if (eff[clamp_id] == uc_se[clamp_id].value)
10686 continue;
10687 uc_se[clamp_id].value = eff[clamp_id];
10688 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
10689 clamps |= (0x1 << clamp_id);
10690 }
10691 if (!clamps) {
10692 css = css_rightmost_descendant(css);
10693 continue;
10694 }
10695
10696 /* Immediately update descendants RUNNABLE tasks */
10697 uclamp_update_active_tasks(css);
10698 }
10699 }
10700
10701 /*
10702 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
10703 * C expression. Since there is no way to convert a macro argument (N) into a
10704 * character constant, use two levels of macros.
10705 */
10706 #define _POW10(exp) ((unsigned int)1e##exp)
10707 #define POW10(exp) _POW10(exp)
10708
10709 struct uclamp_request {
10710 #define UCLAMP_PERCENT_SHIFT 2
10711 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
10712 s64 percent;
10713 u64 util;
10714 int ret;
10715 };
10716
10717 static inline struct uclamp_request
capacity_from_percent(char * buf)10718 capacity_from_percent(char *buf)
10719 {
10720 struct uclamp_request req = {
10721 .percent = UCLAMP_PERCENT_SCALE,
10722 .util = SCHED_CAPACITY_SCALE,
10723 .ret = 0,
10724 };
10725
10726 buf = strim(buf);
10727 if (strcmp(buf, "max")) {
10728 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
10729 &req.percent);
10730 if (req.ret)
10731 return req;
10732 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
10733 req.ret = -ERANGE;
10734 return req;
10735 }
10736
10737 req.util = req.percent << SCHED_CAPACITY_SHIFT;
10738 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
10739 }
10740
10741 return req;
10742 }
10743
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)10744 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
10745 size_t nbytes, loff_t off,
10746 enum uclamp_id clamp_id)
10747 {
10748 struct uclamp_request req;
10749 struct task_group *tg;
10750
10751 req = capacity_from_percent(buf);
10752 if (req.ret)
10753 return req.ret;
10754
10755 static_branch_enable(&sched_uclamp_used);
10756
10757 mutex_lock(&uclamp_mutex);
10758 rcu_read_lock();
10759
10760 tg = css_tg(of_css(of));
10761 if (tg->uclamp_req[clamp_id].value != req.util)
10762 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
10763
10764 /*
10765 * Because of not recoverable conversion rounding we keep track of the
10766 * exact requested value
10767 */
10768 tg->uclamp_pct[clamp_id] = req.percent;
10769
10770 /* Update effective clamps to track the most restrictive value */
10771 cpu_util_update_eff(of_css(of));
10772
10773 rcu_read_unlock();
10774 mutex_unlock(&uclamp_mutex);
10775
10776 return nbytes;
10777 }
10778
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10779 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
10780 char *buf, size_t nbytes,
10781 loff_t off)
10782 {
10783 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
10784 }
10785
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10786 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
10787 char *buf, size_t nbytes,
10788 loff_t off)
10789 {
10790 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
10791 }
10792
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)10793 static inline void cpu_uclamp_print(struct seq_file *sf,
10794 enum uclamp_id clamp_id)
10795 {
10796 struct task_group *tg;
10797 u64 util_clamp;
10798 u64 percent;
10799 u32 rem;
10800
10801 rcu_read_lock();
10802 tg = css_tg(seq_css(sf));
10803 util_clamp = tg->uclamp_req[clamp_id].value;
10804 rcu_read_unlock();
10805
10806 if (util_clamp == SCHED_CAPACITY_SCALE) {
10807 seq_puts(sf, "max\n");
10808 return;
10809 }
10810
10811 percent = tg->uclamp_pct[clamp_id];
10812 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
10813 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
10814 }
10815
cpu_uclamp_min_show(struct seq_file * sf,void * v)10816 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
10817 {
10818 cpu_uclamp_print(sf, UCLAMP_MIN);
10819 return 0;
10820 }
10821
cpu_uclamp_max_show(struct seq_file * sf,void * v)10822 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
10823 {
10824 cpu_uclamp_print(sf, UCLAMP_MAX);
10825 return 0;
10826 }
10827 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10828
10829 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)10830 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
10831 struct cftype *cftype, u64 shareval)
10832 {
10833 if (shareval > scale_load_down(ULONG_MAX))
10834 shareval = MAX_SHARES;
10835 return sched_group_set_shares(css_tg(css), scale_load(shareval));
10836 }
10837
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10838 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
10839 struct cftype *cft)
10840 {
10841 struct task_group *tg = css_tg(css);
10842
10843 return (u64) scale_load_down(tg->shares);
10844 }
10845
10846 #ifdef CONFIG_CFS_BANDWIDTH
10847 static DEFINE_MUTEX(cfs_constraints_mutex);
10848
10849 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
10850 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
10851 /* More than 203 days if BW_SHIFT equals 20. */
10852 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
10853
10854 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10855
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)10856 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
10857 u64 burst)
10858 {
10859 int i, ret = 0, runtime_enabled, runtime_was_enabled;
10860 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10861
10862 if (tg == &root_task_group)
10863 return -EINVAL;
10864
10865 /*
10866 * Ensure we have at some amount of bandwidth every period. This is
10867 * to prevent reaching a state of large arrears when throttled via
10868 * entity_tick() resulting in prolonged exit starvation.
10869 */
10870 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
10871 return -EINVAL;
10872
10873 /*
10874 * Likewise, bound things on the other side by preventing insane quota
10875 * periods. This also allows us to normalize in computing quota
10876 * feasibility.
10877 */
10878 if (period > max_cfs_quota_period)
10879 return -EINVAL;
10880
10881 /*
10882 * Bound quota to defend quota against overflow during bandwidth shift.
10883 */
10884 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
10885 return -EINVAL;
10886
10887 if (quota != RUNTIME_INF && (burst > quota ||
10888 burst + quota > max_cfs_runtime))
10889 return -EINVAL;
10890
10891 /*
10892 * Prevent race between setting of cfs_rq->runtime_enabled and
10893 * unthrottle_offline_cfs_rqs().
10894 */
10895 guard(cpus_read_lock)();
10896 guard(mutex)(&cfs_constraints_mutex);
10897
10898 ret = __cfs_schedulable(tg, period, quota);
10899 if (ret)
10900 return ret;
10901
10902 runtime_enabled = quota != RUNTIME_INF;
10903 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
10904 /*
10905 * If we need to toggle cfs_bandwidth_used, off->on must occur
10906 * before making related changes, and on->off must occur afterwards
10907 */
10908 if (runtime_enabled && !runtime_was_enabled)
10909 cfs_bandwidth_usage_inc();
10910
10911 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
10912 cfs_b->period = ns_to_ktime(period);
10913 cfs_b->quota = quota;
10914 cfs_b->burst = burst;
10915
10916 __refill_cfs_bandwidth_runtime(cfs_b);
10917
10918 /*
10919 * Restart the period timer (if active) to handle new
10920 * period expiry:
10921 */
10922 if (runtime_enabled)
10923 start_cfs_bandwidth(cfs_b);
10924 }
10925
10926 for_each_online_cpu(i) {
10927 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
10928 struct rq *rq = cfs_rq->rq;
10929
10930 guard(rq_lock_irq)(rq);
10931 cfs_rq->runtime_enabled = runtime_enabled;
10932 cfs_rq->runtime_remaining = 0;
10933
10934 if (cfs_rq->throttled)
10935 unthrottle_cfs_rq(cfs_rq);
10936 }
10937
10938 if (runtime_was_enabled && !runtime_enabled)
10939 cfs_bandwidth_usage_dec();
10940
10941 return 0;
10942 }
10943
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)10944 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
10945 {
10946 u64 quota, period, burst;
10947
10948 period = ktime_to_ns(tg->cfs_bandwidth.period);
10949 burst = tg->cfs_bandwidth.burst;
10950 if (cfs_quota_us < 0)
10951 quota = RUNTIME_INF;
10952 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
10953 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
10954 else
10955 return -EINVAL;
10956
10957 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10958 }
10959
tg_get_cfs_quota(struct task_group * tg)10960 static long tg_get_cfs_quota(struct task_group *tg)
10961 {
10962 u64 quota_us;
10963
10964 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
10965 return -1;
10966
10967 quota_us = tg->cfs_bandwidth.quota;
10968 do_div(quota_us, NSEC_PER_USEC);
10969
10970 return quota_us;
10971 }
10972
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)10973 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
10974 {
10975 u64 quota, period, burst;
10976
10977 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
10978 return -EINVAL;
10979
10980 period = (u64)cfs_period_us * NSEC_PER_USEC;
10981 quota = tg->cfs_bandwidth.quota;
10982 burst = tg->cfs_bandwidth.burst;
10983
10984 return tg_set_cfs_bandwidth(tg, period, quota, burst);
10985 }
10986
tg_get_cfs_period(struct task_group * tg)10987 static long tg_get_cfs_period(struct task_group *tg)
10988 {
10989 u64 cfs_period_us;
10990
10991 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
10992 do_div(cfs_period_us, NSEC_PER_USEC);
10993
10994 return cfs_period_us;
10995 }
10996
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)10997 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
10998 {
10999 u64 quota, period, burst;
11000
11001 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
11002 return -EINVAL;
11003
11004 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
11005 period = ktime_to_ns(tg->cfs_bandwidth.period);
11006 quota = tg->cfs_bandwidth.quota;
11007
11008 return tg_set_cfs_bandwidth(tg, period, quota, burst);
11009 }
11010
tg_get_cfs_burst(struct task_group * tg)11011 static long tg_get_cfs_burst(struct task_group *tg)
11012 {
11013 u64 burst_us;
11014
11015 burst_us = tg->cfs_bandwidth.burst;
11016 do_div(burst_us, NSEC_PER_USEC);
11017
11018 return burst_us;
11019 }
11020
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)11021 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
11022 struct cftype *cft)
11023 {
11024 return tg_get_cfs_quota(css_tg(css));
11025 }
11026
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)11027 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
11028 struct cftype *cftype, s64 cfs_quota_us)
11029 {
11030 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
11031 }
11032
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)11033 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
11034 struct cftype *cft)
11035 {
11036 return tg_get_cfs_period(css_tg(css));
11037 }
11038
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)11039 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
11040 struct cftype *cftype, u64 cfs_period_us)
11041 {
11042 return tg_set_cfs_period(css_tg(css), cfs_period_us);
11043 }
11044
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)11045 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
11046 struct cftype *cft)
11047 {
11048 return tg_get_cfs_burst(css_tg(css));
11049 }
11050
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)11051 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
11052 struct cftype *cftype, u64 cfs_burst_us)
11053 {
11054 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
11055 }
11056
11057 struct cfs_schedulable_data {
11058 struct task_group *tg;
11059 u64 period, quota;
11060 };
11061
11062 /*
11063 * normalize group quota/period to be quota/max_period
11064 * note: units are usecs
11065 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)11066 static u64 normalize_cfs_quota(struct task_group *tg,
11067 struct cfs_schedulable_data *d)
11068 {
11069 u64 quota, period;
11070
11071 if (tg == d->tg) {
11072 period = d->period;
11073 quota = d->quota;
11074 } else {
11075 period = tg_get_cfs_period(tg);
11076 quota = tg_get_cfs_quota(tg);
11077 }
11078
11079 /* note: these should typically be equivalent */
11080 if (quota == RUNTIME_INF || quota == -1)
11081 return RUNTIME_INF;
11082
11083 return to_ratio(period, quota);
11084 }
11085
tg_cfs_schedulable_down(struct task_group * tg,void * data)11086 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
11087 {
11088 struct cfs_schedulable_data *d = data;
11089 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11090 s64 quota = 0, parent_quota = -1;
11091
11092 if (!tg->parent) {
11093 quota = RUNTIME_INF;
11094 } else {
11095 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
11096
11097 quota = normalize_cfs_quota(tg, d);
11098 parent_quota = parent_b->hierarchical_quota;
11099
11100 /*
11101 * Ensure max(child_quota) <= parent_quota. On cgroup2,
11102 * always take the non-RUNTIME_INF min. On cgroup1, only
11103 * inherit when no limit is set. In both cases this is used
11104 * by the scheduler to determine if a given CFS task has a
11105 * bandwidth constraint at some higher level.
11106 */
11107 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
11108 if (quota == RUNTIME_INF)
11109 quota = parent_quota;
11110 else if (parent_quota != RUNTIME_INF)
11111 quota = min(quota, parent_quota);
11112 } else {
11113 if (quota == RUNTIME_INF)
11114 quota = parent_quota;
11115 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
11116 return -EINVAL;
11117 }
11118 }
11119 cfs_b->hierarchical_quota = quota;
11120
11121 return 0;
11122 }
11123
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)11124 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
11125 {
11126 int ret;
11127 struct cfs_schedulable_data data = {
11128 .tg = tg,
11129 .period = period,
11130 .quota = quota,
11131 };
11132
11133 if (quota != RUNTIME_INF) {
11134 do_div(data.period, NSEC_PER_USEC);
11135 do_div(data.quota, NSEC_PER_USEC);
11136 }
11137
11138 rcu_read_lock();
11139 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
11140 rcu_read_unlock();
11141
11142 return ret;
11143 }
11144
cpu_cfs_stat_show(struct seq_file * sf,void * v)11145 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
11146 {
11147 struct task_group *tg = css_tg(seq_css(sf));
11148 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11149
11150 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
11151 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
11152 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
11153
11154 if (schedstat_enabled() && tg != &root_task_group) {
11155 struct sched_statistics *stats;
11156 u64 ws = 0;
11157 int i;
11158
11159 for_each_possible_cpu(i) {
11160 stats = __schedstats_from_se(tg->se[i]);
11161 ws += schedstat_val(stats->wait_sum);
11162 }
11163
11164 seq_printf(sf, "wait_sum %llu\n", ws);
11165 }
11166
11167 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
11168 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
11169
11170 return 0;
11171 }
11172
throttled_time_self(struct task_group * tg)11173 static u64 throttled_time_self(struct task_group *tg)
11174 {
11175 int i;
11176 u64 total = 0;
11177
11178 for_each_possible_cpu(i) {
11179 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
11180 }
11181
11182 return total;
11183 }
11184
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)11185 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
11186 {
11187 struct task_group *tg = css_tg(seq_css(sf));
11188
11189 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
11190
11191 return 0;
11192 }
11193 #endif /* CONFIG_CFS_BANDWIDTH */
11194 #endif /* CONFIG_FAIR_GROUP_SCHED */
11195
11196 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)11197 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
11198 struct cftype *cft, s64 val)
11199 {
11200 return sched_group_set_rt_runtime(css_tg(css), val);
11201 }
11202
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)11203 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
11204 struct cftype *cft)
11205 {
11206 return sched_group_rt_runtime(css_tg(css));
11207 }
11208
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)11209 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
11210 struct cftype *cftype, u64 rt_period_us)
11211 {
11212 return sched_group_set_rt_period(css_tg(css), rt_period_us);
11213 }
11214
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)11215 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
11216 struct cftype *cft)
11217 {
11218 return sched_group_rt_period(css_tg(css));
11219 }
11220 #endif /* CONFIG_RT_GROUP_SCHED */
11221
11222 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)11223 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
11224 struct cftype *cft)
11225 {
11226 return css_tg(css)->idle;
11227 }
11228
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)11229 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
11230 struct cftype *cft, s64 idle)
11231 {
11232 return sched_group_set_idle(css_tg(css), idle);
11233 }
11234 #endif
11235
11236 static struct cftype cpu_legacy_files[] = {
11237 #ifdef CONFIG_FAIR_GROUP_SCHED
11238 {
11239 .name = "shares",
11240 .read_u64 = cpu_shares_read_u64,
11241 .write_u64 = cpu_shares_write_u64,
11242 },
11243 {
11244 .name = "idle",
11245 .read_s64 = cpu_idle_read_s64,
11246 .write_s64 = cpu_idle_write_s64,
11247 },
11248 #endif
11249 #ifdef CONFIG_CFS_BANDWIDTH
11250 {
11251 .name = "cfs_quota_us",
11252 .read_s64 = cpu_cfs_quota_read_s64,
11253 .write_s64 = cpu_cfs_quota_write_s64,
11254 },
11255 {
11256 .name = "cfs_period_us",
11257 .read_u64 = cpu_cfs_period_read_u64,
11258 .write_u64 = cpu_cfs_period_write_u64,
11259 },
11260 {
11261 .name = "cfs_burst_us",
11262 .read_u64 = cpu_cfs_burst_read_u64,
11263 .write_u64 = cpu_cfs_burst_write_u64,
11264 },
11265 {
11266 .name = "stat",
11267 .seq_show = cpu_cfs_stat_show,
11268 },
11269 {
11270 .name = "stat.local",
11271 .seq_show = cpu_cfs_local_stat_show,
11272 },
11273 #endif
11274 #ifdef CONFIG_RT_GROUP_SCHED
11275 {
11276 .name = "rt_runtime_us",
11277 .read_s64 = cpu_rt_runtime_read,
11278 .write_s64 = cpu_rt_runtime_write,
11279 },
11280 {
11281 .name = "rt_period_us",
11282 .read_u64 = cpu_rt_period_read_uint,
11283 .write_u64 = cpu_rt_period_write_uint,
11284 },
11285 #endif
11286 #ifdef CONFIG_UCLAMP_TASK_GROUP
11287 {
11288 .name = "uclamp.min",
11289 .flags = CFTYPE_NOT_ON_ROOT,
11290 .seq_show = cpu_uclamp_min_show,
11291 .write = cpu_uclamp_min_write,
11292 },
11293 {
11294 .name = "uclamp.max",
11295 .flags = CFTYPE_NOT_ON_ROOT,
11296 .seq_show = cpu_uclamp_max_show,
11297 .write = cpu_uclamp_max_write,
11298 },
11299 #endif
11300 { } /* Terminate */
11301 };
11302
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)11303 static int cpu_extra_stat_show(struct seq_file *sf,
11304 struct cgroup_subsys_state *css)
11305 {
11306 #ifdef CONFIG_CFS_BANDWIDTH
11307 {
11308 struct task_group *tg = css_tg(css);
11309 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11310 u64 throttled_usec, burst_usec;
11311
11312 throttled_usec = cfs_b->throttled_time;
11313 do_div(throttled_usec, NSEC_PER_USEC);
11314 burst_usec = cfs_b->burst_time;
11315 do_div(burst_usec, NSEC_PER_USEC);
11316
11317 seq_printf(sf, "nr_periods %d\n"
11318 "nr_throttled %d\n"
11319 "throttled_usec %llu\n"
11320 "nr_bursts %d\n"
11321 "burst_usec %llu\n",
11322 cfs_b->nr_periods, cfs_b->nr_throttled,
11323 throttled_usec, cfs_b->nr_burst, burst_usec);
11324 }
11325 #endif
11326 return 0;
11327 }
11328
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)11329 static int cpu_local_stat_show(struct seq_file *sf,
11330 struct cgroup_subsys_state *css)
11331 {
11332 #ifdef CONFIG_CFS_BANDWIDTH
11333 {
11334 struct task_group *tg = css_tg(css);
11335 u64 throttled_self_usec;
11336
11337 throttled_self_usec = throttled_time_self(tg);
11338 do_div(throttled_self_usec, NSEC_PER_USEC);
11339
11340 seq_printf(sf, "throttled_usec %llu\n",
11341 throttled_self_usec);
11342 }
11343 #endif
11344 return 0;
11345 }
11346
11347 #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)11348 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
11349 struct cftype *cft)
11350 {
11351 struct task_group *tg = css_tg(css);
11352 u64 weight = scale_load_down(tg->shares);
11353
11354 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
11355 }
11356
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 weight)11357 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
11358 struct cftype *cft, u64 weight)
11359 {
11360 /*
11361 * cgroup weight knobs should use the common MIN, DFL and MAX
11362 * values which are 1, 100 and 10000 respectively. While it loses
11363 * a bit of range on both ends, it maps pretty well onto the shares
11364 * value used by scheduler and the round-trip conversions preserve
11365 * the original value over the entire range.
11366 */
11367 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
11368 return -ERANGE;
11369
11370 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
11371
11372 return sched_group_set_shares(css_tg(css), scale_load(weight));
11373 }
11374
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)11375 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
11376 struct cftype *cft)
11377 {
11378 unsigned long weight = scale_load_down(css_tg(css)->shares);
11379 int last_delta = INT_MAX;
11380 int prio, delta;
11381
11382 /* find the closest nice value to the current weight */
11383 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
11384 delta = abs(sched_prio_to_weight[prio] - weight);
11385 if (delta >= last_delta)
11386 break;
11387 last_delta = delta;
11388 }
11389
11390 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
11391 }
11392
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)11393 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
11394 struct cftype *cft, s64 nice)
11395 {
11396 unsigned long weight;
11397 int idx;
11398
11399 if (nice < MIN_NICE || nice > MAX_NICE)
11400 return -ERANGE;
11401
11402 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
11403 idx = array_index_nospec(idx, 40);
11404 weight = sched_prio_to_weight[idx];
11405
11406 return sched_group_set_shares(css_tg(css), scale_load(weight));
11407 }
11408 #endif
11409
cpu_period_quota_print(struct seq_file * sf,long period,long quota)11410 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
11411 long period, long quota)
11412 {
11413 if (quota < 0)
11414 seq_puts(sf, "max");
11415 else
11416 seq_printf(sf, "%ld", quota);
11417
11418 seq_printf(sf, " %ld\n", period);
11419 }
11420
11421 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)11422 static int __maybe_unused cpu_period_quota_parse(char *buf,
11423 u64 *periodp, u64 *quotap)
11424 {
11425 char tok[21]; /* U64_MAX */
11426
11427 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
11428 return -EINVAL;
11429
11430 *periodp *= NSEC_PER_USEC;
11431
11432 if (sscanf(tok, "%llu", quotap))
11433 *quotap *= NSEC_PER_USEC;
11434 else if (!strcmp(tok, "max"))
11435 *quotap = RUNTIME_INF;
11436 else
11437 return -EINVAL;
11438
11439 return 0;
11440 }
11441
11442 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)11443 static int cpu_max_show(struct seq_file *sf, void *v)
11444 {
11445 struct task_group *tg = css_tg(seq_css(sf));
11446
11447 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
11448 return 0;
11449 }
11450
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)11451 static ssize_t cpu_max_write(struct kernfs_open_file *of,
11452 char *buf, size_t nbytes, loff_t off)
11453 {
11454 struct task_group *tg = css_tg(of_css(of));
11455 u64 period = tg_get_cfs_period(tg);
11456 u64 burst = tg->cfs_bandwidth.burst;
11457 u64 quota;
11458 int ret;
11459
11460 ret = cpu_period_quota_parse(buf, &period, "a);
11461 if (!ret)
11462 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
11463 return ret ?: nbytes;
11464 }
11465 #endif
11466
11467 static struct cftype cpu_files[] = {
11468 #ifdef CONFIG_FAIR_GROUP_SCHED
11469 {
11470 .name = "weight",
11471 .flags = CFTYPE_NOT_ON_ROOT,
11472 .read_u64 = cpu_weight_read_u64,
11473 .write_u64 = cpu_weight_write_u64,
11474 },
11475 {
11476 .name = "weight.nice",
11477 .flags = CFTYPE_NOT_ON_ROOT,
11478 .read_s64 = cpu_weight_nice_read_s64,
11479 .write_s64 = cpu_weight_nice_write_s64,
11480 },
11481 {
11482 .name = "idle",
11483 .flags = CFTYPE_NOT_ON_ROOT,
11484 .read_s64 = cpu_idle_read_s64,
11485 .write_s64 = cpu_idle_write_s64,
11486 },
11487 #endif
11488 #ifdef CONFIG_CFS_BANDWIDTH
11489 {
11490 .name = "max",
11491 .flags = CFTYPE_NOT_ON_ROOT,
11492 .seq_show = cpu_max_show,
11493 .write = cpu_max_write,
11494 },
11495 {
11496 .name = "max.burst",
11497 .flags = CFTYPE_NOT_ON_ROOT,
11498 .read_u64 = cpu_cfs_burst_read_u64,
11499 .write_u64 = cpu_cfs_burst_write_u64,
11500 },
11501 #endif
11502 #ifdef CONFIG_UCLAMP_TASK_GROUP
11503 {
11504 .name = "uclamp.min",
11505 .flags = CFTYPE_NOT_ON_ROOT,
11506 .seq_show = cpu_uclamp_min_show,
11507 .write = cpu_uclamp_min_write,
11508 },
11509 {
11510 .name = "uclamp.max",
11511 .flags = CFTYPE_NOT_ON_ROOT,
11512 .seq_show = cpu_uclamp_max_show,
11513 .write = cpu_uclamp_max_write,
11514 },
11515 #endif
11516 { } /* terminate */
11517 };
11518
11519 struct cgroup_subsys cpu_cgrp_subsys = {
11520 .css_alloc = cpu_cgroup_css_alloc,
11521 .css_online = cpu_cgroup_css_online,
11522 .css_released = cpu_cgroup_css_released,
11523 .css_free = cpu_cgroup_css_free,
11524 .css_extra_stat_show = cpu_extra_stat_show,
11525 .css_local_stat_show = cpu_local_stat_show,
11526 #ifdef CONFIG_RT_GROUP_SCHED
11527 .can_attach = cpu_cgroup_can_attach,
11528 #endif
11529 .attach = cpu_cgroup_attach,
11530 .legacy_cftypes = cpu_legacy_files,
11531 .dfl_cftypes = cpu_files,
11532 .early_init = true,
11533 .threaded = true,
11534 };
11535
11536 #endif /* CONFIG_CGROUP_SCHED */
11537
dump_cpu_task(int cpu)11538 void dump_cpu_task(int cpu)
11539 {
11540 if (cpu == smp_processor_id() && in_hardirq()) {
11541 struct pt_regs *regs;
11542
11543 regs = get_irq_regs();
11544 if (regs) {
11545 show_regs(regs);
11546 return;
11547 }
11548 }
11549
11550 if (trigger_single_cpu_backtrace(cpu))
11551 return;
11552
11553 pr_info("Task dump for CPU %d:\n", cpu);
11554 sched_show_task(cpu_curr(cpu));
11555 }
11556
11557 /*
11558 * Nice levels are multiplicative, with a gentle 10% change for every
11559 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11560 * nice 1, it will get ~10% less CPU time than another CPU-bound task
11561 * that remained on nice 0.
11562 *
11563 * The "10% effect" is relative and cumulative: from _any_ nice level,
11564 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11565 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
11566 * If a task goes up by ~10% and another task goes down by ~10% then
11567 * the relative distance between them is ~25%.)
11568 */
11569 const int sched_prio_to_weight[40] = {
11570 /* -20 */ 88761, 71755, 56483, 46273, 36291,
11571 /* -15 */ 29154, 23254, 18705, 14949, 11916,
11572 /* -10 */ 9548, 7620, 6100, 4904, 3906,
11573 /* -5 */ 3121, 2501, 1991, 1586, 1277,
11574 /* 0 */ 1024, 820, 655, 526, 423,
11575 /* 5 */ 335, 272, 215, 172, 137,
11576 /* 10 */ 110, 87, 70, 56, 45,
11577 /* 15 */ 36, 29, 23, 18, 15,
11578 };
11579
11580 /*
11581 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
11582 *
11583 * In cases where the weight does not change often, we can use the
11584 * precalculated inverse to speed up arithmetics by turning divisions
11585 * into multiplications:
11586 */
11587 const u32 sched_prio_to_wmult[40] = {
11588 /* -20 */ 48388, 59856, 76040, 92818, 118348,
11589 /* -15 */ 147320, 184698, 229616, 287308, 360437,
11590 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
11591 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
11592 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
11593 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
11594 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
11595 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
11596 };
11597
call_trace_sched_update_nr_running(struct rq * rq,int count)11598 void call_trace_sched_update_nr_running(struct rq *rq, int count)
11599 {
11600 trace_sched_update_nr_running_tp(rq, count);
11601 }
11602
11603 #ifdef CONFIG_SCHED_MM_CID
11604
11605 /*
11606 * @cid_lock: Guarantee forward-progress of cid allocation.
11607 *
11608 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
11609 * is only used when contention is detected by the lock-free allocation so
11610 * forward progress can be guaranteed.
11611 */
11612 DEFINE_RAW_SPINLOCK(cid_lock);
11613
11614 /*
11615 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
11616 *
11617 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
11618 * detected, it is set to 1 to ensure that all newly coming allocations are
11619 * serialized by @cid_lock until the allocation which detected contention
11620 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
11621 * of a cid allocation.
11622 */
11623 int use_cid_lock;
11624
11625 /*
11626 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
11627 * concurrently with respect to the execution of the source runqueue context
11628 * switch.
11629 *
11630 * There is one basic properties we want to guarantee here:
11631 *
11632 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
11633 * used by a task. That would lead to concurrent allocation of the cid and
11634 * userspace corruption.
11635 *
11636 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
11637 * that a pair of loads observe at least one of a pair of stores, which can be
11638 * shown as:
11639 *
11640 * X = Y = 0
11641 *
11642 * w[X]=1 w[Y]=1
11643 * MB MB
11644 * r[Y]=y r[X]=x
11645 *
11646 * Which guarantees that x==0 && y==0 is impossible. But rather than using
11647 * values 0 and 1, this algorithm cares about specific state transitions of the
11648 * runqueue current task (as updated by the scheduler context switch), and the
11649 * per-mm/cpu cid value.
11650 *
11651 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
11652 * task->mm != mm for the rest of the discussion. There are two scheduler state
11653 * transitions on context switch we care about:
11654 *
11655 * (TSA) Store to rq->curr with transition from (N) to (Y)
11656 *
11657 * (TSB) Store to rq->curr with transition from (Y) to (N)
11658 *
11659 * On the remote-clear side, there is one transition we care about:
11660 *
11661 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
11662 *
11663 * There is also a transition to UNSET state which can be performed from all
11664 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
11665 * guarantees that only a single thread will succeed:
11666 *
11667 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
11668 *
11669 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
11670 * when a thread is actively using the cid (property (1)).
11671 *
11672 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
11673 *
11674 * Scenario A) (TSA)+(TMA) (from next task perspective)
11675 *
11676 * CPU0 CPU1
11677 *
11678 * Context switch CS-1 Remote-clear
11679 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
11680 * (implied barrier after cmpxchg)
11681 * - switch_mm_cid()
11682 * - memory barrier (see switch_mm_cid()
11683 * comment explaining how this barrier
11684 * is combined with other scheduler
11685 * barriers)
11686 * - mm_cid_get (next)
11687 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
11688 *
11689 * This Dekker ensures that either task (Y) is observed by the
11690 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
11691 * observed.
11692 *
11693 * If task (Y) store is observed by rcu_dereference(), it means that there is
11694 * still an active task on the cpu. Remote-clear will therefore not transition
11695 * to UNSET, which fulfills property (1).
11696 *
11697 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
11698 * it will move its state to UNSET, which clears the percpu cid perhaps
11699 * uselessly (which is not an issue for correctness). Because task (Y) is not
11700 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
11701 * state to UNSET is done with a cmpxchg expecting that the old state has the
11702 * LAZY flag set, only one thread will successfully UNSET.
11703 *
11704 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
11705 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
11706 * CPU1 will observe task (Y) and do nothing more, which is fine.
11707 *
11708 * What we are effectively preventing with this Dekker is a scenario where
11709 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
11710 * because this would UNSET a cid which is actively used.
11711 */
11712
sched_mm_cid_migrate_from(struct task_struct * t)11713 void sched_mm_cid_migrate_from(struct task_struct *t)
11714 {
11715 t->migrate_from_cpu = task_cpu(t);
11716 }
11717
11718 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)11719 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
11720 struct task_struct *t,
11721 struct mm_cid *src_pcpu_cid)
11722 {
11723 struct mm_struct *mm = t->mm;
11724 struct task_struct *src_task;
11725 int src_cid, last_mm_cid;
11726
11727 if (!mm)
11728 return -1;
11729
11730 last_mm_cid = t->last_mm_cid;
11731 /*
11732 * If the migrated task has no last cid, or if the current
11733 * task on src rq uses the cid, it means the source cid does not need
11734 * to be moved to the destination cpu.
11735 */
11736 if (last_mm_cid == -1)
11737 return -1;
11738 src_cid = READ_ONCE(src_pcpu_cid->cid);
11739 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
11740 return -1;
11741
11742 /*
11743 * If we observe an active task using the mm on this rq, it means we
11744 * are not the last task to be migrated from this cpu for this mm, so
11745 * there is no need to move src_cid to the destination cpu.
11746 */
11747 rcu_read_lock();
11748 src_task = rcu_dereference(src_rq->curr);
11749 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11750 rcu_read_unlock();
11751 t->last_mm_cid = -1;
11752 return -1;
11753 }
11754 rcu_read_unlock();
11755
11756 return src_cid;
11757 }
11758
11759 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)11760 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
11761 struct task_struct *t,
11762 struct mm_cid *src_pcpu_cid,
11763 int src_cid)
11764 {
11765 struct task_struct *src_task;
11766 struct mm_struct *mm = t->mm;
11767 int lazy_cid;
11768
11769 if (src_cid == -1)
11770 return -1;
11771
11772 /*
11773 * Attempt to clear the source cpu cid to move it to the destination
11774 * cpu.
11775 */
11776 lazy_cid = mm_cid_set_lazy_put(src_cid);
11777 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
11778 return -1;
11779
11780 /*
11781 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11782 * rq->curr->mm matches the scheduler barrier in context_switch()
11783 * between store to rq->curr and load of prev and next task's
11784 * per-mm/cpu cid.
11785 *
11786 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11787 * rq->curr->mm_cid_active matches the barrier in
11788 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
11789 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11790 * load of per-mm/cpu cid.
11791 */
11792
11793 /*
11794 * If we observe an active task using the mm on this rq after setting
11795 * the lazy-put flag, this task will be responsible for transitioning
11796 * from lazy-put flag set to MM_CID_UNSET.
11797 */
11798 rcu_read_lock();
11799 src_task = rcu_dereference(src_rq->curr);
11800 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11801 rcu_read_unlock();
11802 /*
11803 * We observed an active task for this mm, there is therefore
11804 * no point in moving this cid to the destination cpu.
11805 */
11806 t->last_mm_cid = -1;
11807 return -1;
11808 }
11809 rcu_read_unlock();
11810
11811 /*
11812 * The src_cid is unused, so it can be unset.
11813 */
11814 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11815 return -1;
11816 return src_cid;
11817 }
11818
11819 /*
11820 * Migration to dst cpu. Called with dst_rq lock held.
11821 * Interrupts are disabled, which keeps the window of cid ownership without the
11822 * source rq lock held small.
11823 */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)11824 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
11825 {
11826 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
11827 struct mm_struct *mm = t->mm;
11828 int src_cid, dst_cid, src_cpu;
11829 struct rq *src_rq;
11830
11831 lockdep_assert_rq_held(dst_rq);
11832
11833 if (!mm)
11834 return;
11835 src_cpu = t->migrate_from_cpu;
11836 if (src_cpu == -1) {
11837 t->last_mm_cid = -1;
11838 return;
11839 }
11840 /*
11841 * Move the src cid if the dst cid is unset. This keeps id
11842 * allocation closest to 0 in cases where few threads migrate around
11843 * many cpus.
11844 *
11845 * If destination cid is already set, we may have to just clear
11846 * the src cid to ensure compactness in frequent migrations
11847 * scenarios.
11848 *
11849 * It is not useful to clear the src cid when the number of threads is
11850 * greater or equal to the number of allowed cpus, because user-space
11851 * can expect that the number of allowed cids can reach the number of
11852 * allowed cpus.
11853 */
11854 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
11855 dst_cid = READ_ONCE(dst_pcpu_cid->cid);
11856 if (!mm_cid_is_unset(dst_cid) &&
11857 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
11858 return;
11859 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
11860 src_rq = cpu_rq(src_cpu);
11861 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
11862 if (src_cid == -1)
11863 return;
11864 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
11865 src_cid);
11866 if (src_cid == -1)
11867 return;
11868 if (!mm_cid_is_unset(dst_cid)) {
11869 __mm_cid_put(mm, src_cid);
11870 return;
11871 }
11872 /* Move src_cid to dst cpu. */
11873 mm_cid_snapshot_time(dst_rq, mm);
11874 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
11875 }
11876
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)11877 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
11878 int cpu)
11879 {
11880 struct rq *rq = cpu_rq(cpu);
11881 struct task_struct *t;
11882 unsigned long flags;
11883 int cid, lazy_cid;
11884
11885 cid = READ_ONCE(pcpu_cid->cid);
11886 if (!mm_cid_is_valid(cid))
11887 return;
11888
11889 /*
11890 * Clear the cpu cid if it is set to keep cid allocation compact. If
11891 * there happens to be other tasks left on the source cpu using this
11892 * mm, the next task using this mm will reallocate its cid on context
11893 * switch.
11894 */
11895 lazy_cid = mm_cid_set_lazy_put(cid);
11896 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
11897 return;
11898
11899 /*
11900 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11901 * rq->curr->mm matches the scheduler barrier in context_switch()
11902 * between store to rq->curr and load of prev and next task's
11903 * per-mm/cpu cid.
11904 *
11905 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11906 * rq->curr->mm_cid_active matches the barrier in
11907 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
11908 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11909 * load of per-mm/cpu cid.
11910 */
11911
11912 /*
11913 * If we observe an active task using the mm on this rq after setting
11914 * the lazy-put flag, that task will be responsible for transitioning
11915 * from lazy-put flag set to MM_CID_UNSET.
11916 */
11917 rcu_read_lock();
11918 t = rcu_dereference(rq->curr);
11919 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
11920 rcu_read_unlock();
11921 return;
11922 }
11923 rcu_read_unlock();
11924
11925 /*
11926 * The cid is unused, so it can be unset.
11927 * Disable interrupts to keep the window of cid ownership without rq
11928 * lock small.
11929 */
11930 local_irq_save(flags);
11931 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11932 __mm_cid_put(mm, cid);
11933 local_irq_restore(flags);
11934 }
11935
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)11936 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
11937 {
11938 struct rq *rq = cpu_rq(cpu);
11939 struct mm_cid *pcpu_cid;
11940 struct task_struct *curr;
11941 u64 rq_clock;
11942
11943 /*
11944 * rq->clock load is racy on 32-bit but one spurious clear once in a
11945 * while is irrelevant.
11946 */
11947 rq_clock = READ_ONCE(rq->clock);
11948 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11949
11950 /*
11951 * In order to take care of infrequently scheduled tasks, bump the time
11952 * snapshot associated with this cid if an active task using the mm is
11953 * observed on this rq.
11954 */
11955 rcu_read_lock();
11956 curr = rcu_dereference(rq->curr);
11957 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
11958 WRITE_ONCE(pcpu_cid->time, rq_clock);
11959 rcu_read_unlock();
11960 return;
11961 }
11962 rcu_read_unlock();
11963
11964 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
11965 return;
11966 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
11967 }
11968
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)11969 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
11970 int weight)
11971 {
11972 struct mm_cid *pcpu_cid;
11973 int cid;
11974
11975 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11976 cid = READ_ONCE(pcpu_cid->cid);
11977 if (!mm_cid_is_valid(cid) || cid < weight)
11978 return;
11979 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
11980 }
11981
task_mm_cid_work(struct callback_head * work)11982 static void task_mm_cid_work(struct callback_head *work)
11983 {
11984 unsigned long now = jiffies, old_scan, next_scan;
11985 struct task_struct *t = current;
11986 struct cpumask *cidmask;
11987 struct mm_struct *mm;
11988 int weight, cpu;
11989
11990 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
11991
11992 work->next = work; /* Prevent double-add */
11993 if (t->flags & PF_EXITING)
11994 return;
11995 mm = t->mm;
11996 if (!mm)
11997 return;
11998 old_scan = READ_ONCE(mm->mm_cid_next_scan);
11999 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
12000 if (!old_scan) {
12001 unsigned long res;
12002
12003 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
12004 if (res != old_scan)
12005 old_scan = res;
12006 else
12007 old_scan = next_scan;
12008 }
12009 if (time_before(now, old_scan))
12010 return;
12011 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
12012 return;
12013 cidmask = mm_cidmask(mm);
12014 /* Clear cids that were not recently used. */
12015 for_each_possible_cpu(cpu)
12016 sched_mm_cid_remote_clear_old(mm, cpu);
12017 weight = cpumask_weight(cidmask);
12018 /*
12019 * Clear cids that are greater or equal to the cidmask weight to
12020 * recompact it.
12021 */
12022 for_each_possible_cpu(cpu)
12023 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
12024 }
12025
init_sched_mm_cid(struct task_struct * t)12026 void init_sched_mm_cid(struct task_struct *t)
12027 {
12028 struct mm_struct *mm = t->mm;
12029 int mm_users = 0;
12030
12031 if (mm) {
12032 mm_users = atomic_read(&mm->mm_users);
12033 if (mm_users == 1)
12034 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
12035 }
12036 t->cid_work.next = &t->cid_work; /* Protect against double add */
12037 init_task_work(&t->cid_work, task_mm_cid_work);
12038 }
12039
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)12040 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
12041 {
12042 struct callback_head *work = &curr->cid_work;
12043 unsigned long now = jiffies;
12044
12045 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
12046 work->next != work)
12047 return;
12048 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
12049 return;
12050
12051 /* No page allocation under rq lock */
12052 task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
12053 }
12054
sched_mm_cid_exit_signals(struct task_struct * t)12055 void sched_mm_cid_exit_signals(struct task_struct *t)
12056 {
12057 struct mm_struct *mm = t->mm;
12058 struct rq_flags rf;
12059 struct rq *rq;
12060
12061 if (!mm)
12062 return;
12063
12064 preempt_disable();
12065 rq = this_rq();
12066 rq_lock_irqsave(rq, &rf);
12067 preempt_enable_no_resched(); /* holding spinlock */
12068 WRITE_ONCE(t->mm_cid_active, 0);
12069 /*
12070 * Store t->mm_cid_active before loading per-mm/cpu cid.
12071 * Matches barrier in sched_mm_cid_remote_clear_old().
12072 */
12073 smp_mb();
12074 mm_cid_put(mm);
12075 t->last_mm_cid = t->mm_cid = -1;
12076 rq_unlock_irqrestore(rq, &rf);
12077 }
12078
sched_mm_cid_before_execve(struct task_struct * t)12079 void sched_mm_cid_before_execve(struct task_struct *t)
12080 {
12081 struct mm_struct *mm = t->mm;
12082 struct rq_flags rf;
12083 struct rq *rq;
12084
12085 if (!mm)
12086 return;
12087
12088 preempt_disable();
12089 rq = this_rq();
12090 rq_lock_irqsave(rq, &rf);
12091 preempt_enable_no_resched(); /* holding spinlock */
12092 WRITE_ONCE(t->mm_cid_active, 0);
12093 /*
12094 * Store t->mm_cid_active before loading per-mm/cpu cid.
12095 * Matches barrier in sched_mm_cid_remote_clear_old().
12096 */
12097 smp_mb();
12098 mm_cid_put(mm);
12099 t->last_mm_cid = t->mm_cid = -1;
12100 rq_unlock_irqrestore(rq, &rf);
12101 }
12102
sched_mm_cid_after_execve(struct task_struct * t)12103 void sched_mm_cid_after_execve(struct task_struct *t)
12104 {
12105 struct mm_struct *mm = t->mm;
12106 struct rq_flags rf;
12107 struct rq *rq;
12108
12109 if (!mm)
12110 return;
12111
12112 preempt_disable();
12113 rq = this_rq();
12114 rq_lock_irqsave(rq, &rf);
12115 preempt_enable_no_resched(); /* holding spinlock */
12116 WRITE_ONCE(t->mm_cid_active, 1);
12117 /*
12118 * Store t->mm_cid_active before loading per-mm/cpu cid.
12119 * Matches barrier in sched_mm_cid_remote_clear_old().
12120 */
12121 smp_mb();
12122 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
12123 rq_unlock_irqrestore(rq, &rf);
12124 rseq_set_notify_resume(t);
12125 }
12126
sched_mm_cid_fork(struct task_struct * t)12127 void sched_mm_cid_fork(struct task_struct *t)
12128 {
12129 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
12130 t->mm_cid_active = 1;
12131 }
12132 #endif
12133