xref: /openbmc/linux/kernel/sched/core.c (revision 840d9a813c8eaa5c55d86525e374a97ca5023b53)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  kernel/sched/core.c
4   *
5   *  Core kernel scheduler code and related syscalls
6   *
7   *  Copyright (C) 1991-2002  Linus Torvalds
8   */
9  #include <linux/highmem.h>
10  #include <linux/hrtimer_api.h>
11  #include <linux/ktime_api.h>
12  #include <linux/sched/signal.h>
13  #include <linux/syscalls_api.h>
14  #include <linux/debug_locks.h>
15  #include <linux/prefetch.h>
16  #include <linux/capability.h>
17  #include <linux/pgtable_api.h>
18  #include <linux/wait_bit.h>
19  #include <linux/jiffies.h>
20  #include <linux/spinlock_api.h>
21  #include <linux/cpumask_api.h>
22  #include <linux/lockdep_api.h>
23  #include <linux/hardirq.h>
24  #include <linux/softirq.h>
25  #include <linux/refcount_api.h>
26  #include <linux/topology.h>
27  #include <linux/sched/clock.h>
28  #include <linux/sched/cond_resched.h>
29  #include <linux/sched/cputime.h>
30  #include <linux/sched/debug.h>
31  #include <linux/sched/hotplug.h>
32  #include <linux/sched/init.h>
33  #include <linux/sched/isolation.h>
34  #include <linux/sched/loadavg.h>
35  #include <linux/sched/mm.h>
36  #include <linux/sched/nohz.h>
37  #include <linux/sched/rseq_api.h>
38  #include <linux/sched/rt.h>
39  
40  #include <linux/blkdev.h>
41  #include <linux/context_tracking.h>
42  #include <linux/cpuset.h>
43  #include <linux/delayacct.h>
44  #include <linux/init_task.h>
45  #include <linux/interrupt.h>
46  #include <linux/ioprio.h>
47  #include <linux/kallsyms.h>
48  #include <linux/kcov.h>
49  #include <linux/kprobes.h>
50  #include <linux/llist_api.h>
51  #include <linux/mmu_context.h>
52  #include <linux/mmzone.h>
53  #include <linux/mutex_api.h>
54  #include <linux/nmi.h>
55  #include <linux/nospec.h>
56  #include <linux/perf_event_api.h>
57  #include <linux/profile.h>
58  #include <linux/psi.h>
59  #include <linux/rcuwait_api.h>
60  #include <linux/sched/wake_q.h>
61  #include <linux/scs.h>
62  #include <linux/slab.h>
63  #include <linux/syscalls.h>
64  #include <linux/vtime.h>
65  #include <linux/wait_api.h>
66  #include <linux/workqueue_api.h>
67  
68  #ifdef CONFIG_PREEMPT_DYNAMIC
69  # ifdef CONFIG_GENERIC_ENTRY
70  #  include <linux/entry-common.h>
71  # endif
72  #endif
73  
74  #include <uapi/linux/sched/types.h>
75  
76  #include <asm/irq_regs.h>
77  #include <asm/switch_to.h>
78  #include <asm/tlb.h>
79  
80  #define CREATE_TRACE_POINTS
81  #include <linux/sched/rseq_api.h>
82  #include <trace/events/sched.h>
83  #include <trace/events/ipi.h>
84  #undef CREATE_TRACE_POINTS
85  
86  #include "sched.h"
87  #include "stats.h"
88  #include "autogroup.h"
89  
90  #include "autogroup.h"
91  #include "pelt.h"
92  #include "smp.h"
93  #include "stats.h"
94  
95  #include "../workqueue_internal.h"
96  #include "../../io_uring/io-wq.h"
97  #include "../smpboot.h"
98  
99  EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
100  EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
101  
102  /*
103   * Export tracepoints that act as a bare tracehook (ie: have no trace event
104   * associated with them) to allow external modules to probe them.
105   */
106  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
107  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
108  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
109  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
110  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
111  EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp);
112  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
113  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
114  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
115  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
116  EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
117  
118  DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
119  
120  #ifdef CONFIG_SCHED_DEBUG
121  /*
122   * Debugging: various feature bits
123   *
124   * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
125   * sysctl_sched_features, defined in sched.h, to allow constants propagation
126   * at compile time and compiler optimization based on features default.
127   */
128  #define SCHED_FEAT(name, enabled)	\
129  	(1UL << __SCHED_FEAT_##name) * enabled |
130  const_debug unsigned int sysctl_sched_features =
131  #include "features.h"
132  	0;
133  #undef SCHED_FEAT
134  
135  /*
136   * Print a warning if need_resched is set for the given duration (if
137   * LATENCY_WARN is enabled).
138   *
139   * If sysctl_resched_latency_warn_once is set, only one warning will be shown
140   * per boot.
141   */
142  __read_mostly int sysctl_resched_latency_warn_ms = 100;
143  __read_mostly int sysctl_resched_latency_warn_once = 1;
144  #endif /* CONFIG_SCHED_DEBUG */
145  
146  /*
147   * Number of tasks to iterate in a single balance run.
148   * Limited because this is done with IRQs disabled.
149   */
150  const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
151  
152  __read_mostly int scheduler_running;
153  
154  #ifdef CONFIG_SCHED_CORE
155  
156  DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
157  
158  /* kernel prio, less is more */
__task_prio(const struct task_struct * p)159  static inline int __task_prio(const struct task_struct *p)
160  {
161  	if (p->sched_class == &stop_sched_class) /* trumps deadline */
162  		return -2;
163  
164  	if (rt_prio(p->prio)) /* includes deadline */
165  		return p->prio; /* [-1, 99] */
166  
167  	if (p->sched_class == &idle_sched_class)
168  		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
169  
170  	return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
171  }
172  
173  /*
174   * l(a,b)
175   * le(a,b) := !l(b,a)
176   * g(a,b)  := l(b,a)
177   * ge(a,b) := !l(a,b)
178   */
179  
180  /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)181  static inline bool prio_less(const struct task_struct *a,
182  			     const struct task_struct *b, bool in_fi)
183  {
184  
185  	int pa = __task_prio(a), pb = __task_prio(b);
186  
187  	if (-pa < -pb)
188  		return true;
189  
190  	if (-pb < -pa)
191  		return false;
192  
193  	if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
194  		return !dl_time_before(a->dl.deadline, b->dl.deadline);
195  
196  	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
197  		return cfs_prio_less(a, b, in_fi);
198  
199  	return false;
200  }
201  
__sched_core_less(const struct task_struct * a,const struct task_struct * b)202  static inline bool __sched_core_less(const struct task_struct *a,
203  				     const struct task_struct *b)
204  {
205  	if (a->core_cookie < b->core_cookie)
206  		return true;
207  
208  	if (a->core_cookie > b->core_cookie)
209  		return false;
210  
211  	/* flip prio, so high prio is leftmost */
212  	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
213  		return true;
214  
215  	return false;
216  }
217  
218  #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
219  
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)220  static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
221  {
222  	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
223  }
224  
rb_sched_core_cmp(const void * key,const struct rb_node * node)225  static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
226  {
227  	const struct task_struct *p = __node_2_sc(node);
228  	unsigned long cookie = (unsigned long)key;
229  
230  	if (cookie < p->core_cookie)
231  		return -1;
232  
233  	if (cookie > p->core_cookie)
234  		return 1;
235  
236  	return 0;
237  }
238  
sched_core_enqueue(struct rq * rq,struct task_struct * p)239  void sched_core_enqueue(struct rq *rq, struct task_struct *p)
240  {
241  	rq->core->core_task_seq++;
242  
243  	if (!p->core_cookie)
244  		return;
245  
246  	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
247  }
248  
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)249  void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
250  {
251  	rq->core->core_task_seq++;
252  
253  	if (sched_core_enqueued(p)) {
254  		rb_erase(&p->core_node, &rq->core_tree);
255  		RB_CLEAR_NODE(&p->core_node);
256  	}
257  
258  	/*
259  	 * Migrating the last task off the cpu, with the cpu in forced idle
260  	 * state. Reschedule to create an accounting edge for forced idle,
261  	 * and re-examine whether the core is still in forced idle state.
262  	 */
263  	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
264  	    rq->core->core_forceidle_count && rq->curr == rq->idle)
265  		resched_curr(rq);
266  }
267  
sched_task_is_throttled(struct task_struct * p,int cpu)268  static int sched_task_is_throttled(struct task_struct *p, int cpu)
269  {
270  	if (p->sched_class->task_is_throttled)
271  		return p->sched_class->task_is_throttled(p, cpu);
272  
273  	return 0;
274  }
275  
sched_core_next(struct task_struct * p,unsigned long cookie)276  static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
277  {
278  	struct rb_node *node = &p->core_node;
279  	int cpu = task_cpu(p);
280  
281  	do {
282  		node = rb_next(node);
283  		if (!node)
284  			return NULL;
285  
286  		p = __node_2_sc(node);
287  		if (p->core_cookie != cookie)
288  			return NULL;
289  
290  	} while (sched_task_is_throttled(p, cpu));
291  
292  	return p;
293  }
294  
295  /*
296   * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
297   * If no suitable task is found, NULL will be returned.
298   */
sched_core_find(struct rq * rq,unsigned long cookie)299  static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
300  {
301  	struct task_struct *p;
302  	struct rb_node *node;
303  
304  	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
305  	if (!node)
306  		return NULL;
307  
308  	p = __node_2_sc(node);
309  	if (!sched_task_is_throttled(p, rq->cpu))
310  		return p;
311  
312  	return sched_core_next(p, cookie);
313  }
314  
315  /*
316   * Magic required such that:
317   *
318   *	raw_spin_rq_lock(rq);
319   *	...
320   *	raw_spin_rq_unlock(rq);
321   *
322   * ends up locking and unlocking the _same_ lock, and all CPUs
323   * always agree on what rq has what lock.
324   *
325   * XXX entirely possible to selectively enable cores, don't bother for now.
326   */
327  
328  static DEFINE_MUTEX(sched_core_mutex);
329  static atomic_t sched_core_count;
330  static struct cpumask sched_core_mask;
331  
sched_core_lock(int cpu,unsigned long * flags)332  static void sched_core_lock(int cpu, unsigned long *flags)
333  {
334  	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
335  	int t, i = 0;
336  
337  	local_irq_save(*flags);
338  	for_each_cpu(t, smt_mask)
339  		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
340  }
341  
sched_core_unlock(int cpu,unsigned long * flags)342  static void sched_core_unlock(int cpu, unsigned long *flags)
343  {
344  	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
345  	int t;
346  
347  	for_each_cpu(t, smt_mask)
348  		raw_spin_unlock(&cpu_rq(t)->__lock);
349  	local_irq_restore(*flags);
350  }
351  
__sched_core_flip(bool enabled)352  static void __sched_core_flip(bool enabled)
353  {
354  	unsigned long flags;
355  	int cpu, t;
356  
357  	cpus_read_lock();
358  
359  	/*
360  	 * Toggle the online cores, one by one.
361  	 */
362  	cpumask_copy(&sched_core_mask, cpu_online_mask);
363  	for_each_cpu(cpu, &sched_core_mask) {
364  		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
365  
366  		sched_core_lock(cpu, &flags);
367  
368  		for_each_cpu(t, smt_mask)
369  			cpu_rq(t)->core_enabled = enabled;
370  
371  		cpu_rq(cpu)->core->core_forceidle_start = 0;
372  
373  		sched_core_unlock(cpu, &flags);
374  
375  		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
376  	}
377  
378  	/*
379  	 * Toggle the offline CPUs.
380  	 */
381  	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
382  		cpu_rq(cpu)->core_enabled = enabled;
383  
384  	cpus_read_unlock();
385  }
386  
sched_core_assert_empty(void)387  static void sched_core_assert_empty(void)
388  {
389  	int cpu;
390  
391  	for_each_possible_cpu(cpu)
392  		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
393  }
394  
__sched_core_enable(void)395  static void __sched_core_enable(void)
396  {
397  	static_branch_enable(&__sched_core_enabled);
398  	/*
399  	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
400  	 * and future ones will observe !sched_core_disabled().
401  	 */
402  	synchronize_rcu();
403  	__sched_core_flip(true);
404  	sched_core_assert_empty();
405  }
406  
__sched_core_disable(void)407  static void __sched_core_disable(void)
408  {
409  	sched_core_assert_empty();
410  	__sched_core_flip(false);
411  	static_branch_disable(&__sched_core_enabled);
412  }
413  
sched_core_get(void)414  void sched_core_get(void)
415  {
416  	if (atomic_inc_not_zero(&sched_core_count))
417  		return;
418  
419  	mutex_lock(&sched_core_mutex);
420  	if (!atomic_read(&sched_core_count))
421  		__sched_core_enable();
422  
423  	smp_mb__before_atomic();
424  	atomic_inc(&sched_core_count);
425  	mutex_unlock(&sched_core_mutex);
426  }
427  
__sched_core_put(struct work_struct * work)428  static void __sched_core_put(struct work_struct *work)
429  {
430  	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
431  		__sched_core_disable();
432  		mutex_unlock(&sched_core_mutex);
433  	}
434  }
435  
sched_core_put(void)436  void sched_core_put(void)
437  {
438  	static DECLARE_WORK(_work, __sched_core_put);
439  
440  	/*
441  	 * "There can be only one"
442  	 *
443  	 * Either this is the last one, or we don't actually need to do any
444  	 * 'work'. If it is the last *again*, we rely on
445  	 * WORK_STRUCT_PENDING_BIT.
446  	 */
447  	if (!atomic_add_unless(&sched_core_count, -1, 1))
448  		schedule_work(&_work);
449  }
450  
451  #else /* !CONFIG_SCHED_CORE */
452  
sched_core_enqueue(struct rq * rq,struct task_struct * p)453  static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
454  static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)455  sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
456  
457  #endif /* CONFIG_SCHED_CORE */
458  
459  /*
460   * Serialization rules:
461   *
462   * Lock order:
463   *
464   *   p->pi_lock
465   *     rq->lock
466   *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
467   *
468   *  rq1->lock
469   *    rq2->lock  where: rq1 < rq2
470   *
471   * Regular state:
472   *
473   * Normal scheduling state is serialized by rq->lock. __schedule() takes the
474   * local CPU's rq->lock, it optionally removes the task from the runqueue and
475   * always looks at the local rq data structures to find the most eligible task
476   * to run next.
477   *
478   * Task enqueue is also under rq->lock, possibly taken from another CPU.
479   * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
480   * the local CPU to avoid bouncing the runqueue state around [ see
481   * ttwu_queue_wakelist() ]
482   *
483   * Task wakeup, specifically wakeups that involve migration, are horribly
484   * complicated to avoid having to take two rq->locks.
485   *
486   * Special state:
487   *
488   * System-calls and anything external will use task_rq_lock() which acquires
489   * both p->pi_lock and rq->lock. As a consequence the state they change is
490   * stable while holding either lock:
491   *
492   *  - sched_setaffinity()/
493   *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
494   *  - set_user_nice():		p->se.load, p->*prio
495   *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
496   *				p->se.load, p->rt_priority,
497   *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
498   *  - sched_setnuma():		p->numa_preferred_nid
499   *  - sched_move_task():	p->sched_task_group
500   *  - uclamp_update_active()	p->uclamp*
501   *
502   * p->state <- TASK_*:
503   *
504   *   is changed locklessly using set_current_state(), __set_current_state() or
505   *   set_special_state(), see their respective comments, or by
506   *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
507   *   concurrent self.
508   *
509   * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
510   *
511   *   is set by activate_task() and cleared by deactivate_task(), under
512   *   rq->lock. Non-zero indicates the task is runnable, the special
513   *   ON_RQ_MIGRATING state is used for migration without holding both
514   *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
515   *
516   * p->on_cpu <- { 0, 1 }:
517   *
518   *   is set by prepare_task() and cleared by finish_task() such that it will be
519   *   set before p is scheduled-in and cleared after p is scheduled-out, both
520   *   under rq->lock. Non-zero indicates the task is running on its CPU.
521   *
522   *   [ The astute reader will observe that it is possible for two tasks on one
523   *     CPU to have ->on_cpu = 1 at the same time. ]
524   *
525   * task_cpu(p): is changed by set_task_cpu(), the rules are:
526   *
527   *  - Don't call set_task_cpu() on a blocked task:
528   *
529   *    We don't care what CPU we're not running on, this simplifies hotplug,
530   *    the CPU assignment of blocked tasks isn't required to be valid.
531   *
532   *  - for try_to_wake_up(), called under p->pi_lock:
533   *
534   *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
535   *
536   *  - for migration called under rq->lock:
537   *    [ see task_on_rq_migrating() in task_rq_lock() ]
538   *
539   *    o move_queued_task()
540   *    o detach_task()
541   *
542   *  - for migration called under double_rq_lock():
543   *
544   *    o __migrate_swap_task()
545   *    o push_rt_task() / pull_rt_task()
546   *    o push_dl_task() / pull_dl_task()
547   *    o dl_task_offline_migration()
548   *
549   */
550  
raw_spin_rq_lock_nested(struct rq * rq,int subclass)551  void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
552  {
553  	raw_spinlock_t *lock;
554  
555  	/* Matches synchronize_rcu() in __sched_core_enable() */
556  	preempt_disable();
557  	if (sched_core_disabled()) {
558  		raw_spin_lock_nested(&rq->__lock, subclass);
559  		/* preempt_count *MUST* be > 1 */
560  		preempt_enable_no_resched();
561  		return;
562  	}
563  
564  	for (;;) {
565  		lock = __rq_lockp(rq);
566  		raw_spin_lock_nested(lock, subclass);
567  		if (likely(lock == __rq_lockp(rq))) {
568  			/* preempt_count *MUST* be > 1 */
569  			preempt_enable_no_resched();
570  			return;
571  		}
572  		raw_spin_unlock(lock);
573  	}
574  }
575  
raw_spin_rq_trylock(struct rq * rq)576  bool raw_spin_rq_trylock(struct rq *rq)
577  {
578  	raw_spinlock_t *lock;
579  	bool ret;
580  
581  	/* Matches synchronize_rcu() in __sched_core_enable() */
582  	preempt_disable();
583  	if (sched_core_disabled()) {
584  		ret = raw_spin_trylock(&rq->__lock);
585  		preempt_enable();
586  		return ret;
587  	}
588  
589  	for (;;) {
590  		lock = __rq_lockp(rq);
591  		ret = raw_spin_trylock(lock);
592  		if (!ret || (likely(lock == __rq_lockp(rq)))) {
593  			preempt_enable();
594  			return ret;
595  		}
596  		raw_spin_unlock(lock);
597  	}
598  }
599  
raw_spin_rq_unlock(struct rq * rq)600  void raw_spin_rq_unlock(struct rq *rq)
601  {
602  	raw_spin_unlock(rq_lockp(rq));
603  }
604  
605  #ifdef CONFIG_SMP
606  /*
607   * double_rq_lock - safely lock two runqueues
608   */
double_rq_lock(struct rq * rq1,struct rq * rq2)609  void double_rq_lock(struct rq *rq1, struct rq *rq2)
610  {
611  	lockdep_assert_irqs_disabled();
612  
613  	if (rq_order_less(rq2, rq1))
614  		swap(rq1, rq2);
615  
616  	raw_spin_rq_lock(rq1);
617  	if (__rq_lockp(rq1) != __rq_lockp(rq2))
618  		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
619  
620  	double_rq_clock_clear_update(rq1, rq2);
621  }
622  #endif
623  
624  /*
625   * __task_rq_lock - lock the rq @p resides on.
626   */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)627  struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
628  	__acquires(rq->lock)
629  {
630  	struct rq *rq;
631  
632  	lockdep_assert_held(&p->pi_lock);
633  
634  	for (;;) {
635  		rq = task_rq(p);
636  		raw_spin_rq_lock(rq);
637  		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
638  			rq_pin_lock(rq, rf);
639  			return rq;
640  		}
641  		raw_spin_rq_unlock(rq);
642  
643  		while (unlikely(task_on_rq_migrating(p)))
644  			cpu_relax();
645  	}
646  }
647  
648  /*
649   * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
650   */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)651  struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
652  	__acquires(p->pi_lock)
653  	__acquires(rq->lock)
654  {
655  	struct rq *rq;
656  
657  	for (;;) {
658  		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
659  		rq = task_rq(p);
660  		raw_spin_rq_lock(rq);
661  		/*
662  		 *	move_queued_task()		task_rq_lock()
663  		 *
664  		 *	ACQUIRE (rq->lock)
665  		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
666  		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
667  		 *	[S] ->cpu = new_cpu		[L] task_rq()
668  		 *					[L] ->on_rq
669  		 *	RELEASE (rq->lock)
670  		 *
671  		 * If we observe the old CPU in task_rq_lock(), the acquire of
672  		 * the old rq->lock will fully serialize against the stores.
673  		 *
674  		 * If we observe the new CPU in task_rq_lock(), the address
675  		 * dependency headed by '[L] rq = task_rq()' and the acquire
676  		 * will pair with the WMB to ensure we then also see migrating.
677  		 */
678  		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
679  			rq_pin_lock(rq, rf);
680  			return rq;
681  		}
682  		raw_spin_rq_unlock(rq);
683  		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
684  
685  		while (unlikely(task_on_rq_migrating(p)))
686  			cpu_relax();
687  	}
688  }
689  
690  /*
691   * RQ-clock updating methods:
692   */
693  
update_rq_clock_task(struct rq * rq,s64 delta)694  static void update_rq_clock_task(struct rq *rq, s64 delta)
695  {
696  /*
697   * In theory, the compile should just see 0 here, and optimize out the call
698   * to sched_rt_avg_update. But I don't trust it...
699   */
700  	s64 __maybe_unused steal = 0, irq_delta = 0;
701  
702  #ifdef CONFIG_IRQ_TIME_ACCOUNTING
703  	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
704  
705  	/*
706  	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
707  	 * this case when a previous update_rq_clock() happened inside a
708  	 * {soft,}irq region.
709  	 *
710  	 * When this happens, we stop ->clock_task and only update the
711  	 * prev_irq_time stamp to account for the part that fit, so that a next
712  	 * update will consume the rest. This ensures ->clock_task is
713  	 * monotonic.
714  	 *
715  	 * It does however cause some slight miss-attribution of {soft,}irq
716  	 * time, a more accurate solution would be to update the irq_time using
717  	 * the current rq->clock timestamp, except that would require using
718  	 * atomic ops.
719  	 */
720  	if (irq_delta > delta)
721  		irq_delta = delta;
722  
723  	rq->prev_irq_time += irq_delta;
724  	delta -= irq_delta;
725  	delayacct_irq(rq->curr, irq_delta);
726  #endif
727  #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
728  	if (static_key_false((&paravirt_steal_rq_enabled))) {
729  		u64 prev_steal;
730  
731  		steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
732  		steal -= rq->prev_steal_time_rq;
733  
734  		if (unlikely(steal > delta))
735  			steal = delta;
736  
737  		rq->prev_steal_time_rq = prev_steal;
738  		delta -= steal;
739  	}
740  #endif
741  
742  	rq->clock_task += delta;
743  
744  #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
745  	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
746  		update_irq_load_avg(rq, irq_delta + steal);
747  #endif
748  	update_rq_clock_pelt(rq, delta);
749  }
750  
update_rq_clock(struct rq * rq)751  void update_rq_clock(struct rq *rq)
752  {
753  	s64 delta;
754  
755  	lockdep_assert_rq_held(rq);
756  
757  	if (rq->clock_update_flags & RQCF_ACT_SKIP)
758  		return;
759  
760  #ifdef CONFIG_SCHED_DEBUG
761  	if (sched_feat(WARN_DOUBLE_CLOCK))
762  		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
763  	rq->clock_update_flags |= RQCF_UPDATED;
764  #endif
765  
766  	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
767  	if (delta < 0)
768  		return;
769  	rq->clock += delta;
770  	update_rq_clock_task(rq, delta);
771  }
772  
773  #ifdef CONFIG_SCHED_HRTICK
774  /*
775   * Use HR-timers to deliver accurate preemption points.
776   */
777  
hrtick_clear(struct rq * rq)778  static void hrtick_clear(struct rq *rq)
779  {
780  	if (hrtimer_active(&rq->hrtick_timer))
781  		hrtimer_cancel(&rq->hrtick_timer);
782  }
783  
784  /*
785   * High-resolution timer tick.
786   * Runs from hardirq context with interrupts disabled.
787   */
hrtick(struct hrtimer * timer)788  static enum hrtimer_restart hrtick(struct hrtimer *timer)
789  {
790  	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
791  	struct rq_flags rf;
792  
793  	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
794  
795  	rq_lock(rq, &rf);
796  	update_rq_clock(rq);
797  	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
798  	rq_unlock(rq, &rf);
799  
800  	return HRTIMER_NORESTART;
801  }
802  
803  #ifdef CONFIG_SMP
804  
__hrtick_restart(struct rq * rq)805  static void __hrtick_restart(struct rq *rq)
806  {
807  	struct hrtimer *timer = &rq->hrtick_timer;
808  	ktime_t time = rq->hrtick_time;
809  
810  	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
811  }
812  
813  /*
814   * called from hardirq (IPI) context
815   */
__hrtick_start(void * arg)816  static void __hrtick_start(void *arg)
817  {
818  	struct rq *rq = arg;
819  	struct rq_flags rf;
820  
821  	rq_lock(rq, &rf);
822  	__hrtick_restart(rq);
823  	rq_unlock(rq, &rf);
824  }
825  
826  /*
827   * Called to set the hrtick timer state.
828   *
829   * called with rq->lock held and irqs disabled
830   */
hrtick_start(struct rq * rq,u64 delay)831  void hrtick_start(struct rq *rq, u64 delay)
832  {
833  	struct hrtimer *timer = &rq->hrtick_timer;
834  	s64 delta;
835  
836  	/*
837  	 * Don't schedule slices shorter than 10000ns, that just
838  	 * doesn't make sense and can cause timer DoS.
839  	 */
840  	delta = max_t(s64, delay, 10000LL);
841  	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
842  
843  	if (rq == this_rq())
844  		__hrtick_restart(rq);
845  	else
846  		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
847  }
848  
849  #else
850  /*
851   * Called to set the hrtick timer state.
852   *
853   * called with rq->lock held and irqs disabled
854   */
hrtick_start(struct rq * rq,u64 delay)855  void hrtick_start(struct rq *rq, u64 delay)
856  {
857  	/*
858  	 * Don't schedule slices shorter than 10000ns, that just
859  	 * doesn't make sense. Rely on vruntime for fairness.
860  	 */
861  	delay = max_t(u64, delay, 10000LL);
862  	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
863  		      HRTIMER_MODE_REL_PINNED_HARD);
864  }
865  
866  #endif /* CONFIG_SMP */
867  
hrtick_rq_init(struct rq * rq)868  static void hrtick_rq_init(struct rq *rq)
869  {
870  #ifdef CONFIG_SMP
871  	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
872  #endif
873  	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
874  	rq->hrtick_timer.function = hrtick;
875  }
876  #else	/* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)877  static inline void hrtick_clear(struct rq *rq)
878  {
879  }
880  
hrtick_rq_init(struct rq * rq)881  static inline void hrtick_rq_init(struct rq *rq)
882  {
883  }
884  #endif	/* CONFIG_SCHED_HRTICK */
885  
886  /*
887   * cmpxchg based fetch_or, macro so it works for different integer types
888   */
889  #define fetch_or(ptr, mask)						\
890  	({								\
891  		typeof(ptr) _ptr = (ptr);				\
892  		typeof(mask) _mask = (mask);				\
893  		typeof(*_ptr) _val = *_ptr;				\
894  									\
895  		do {							\
896  		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
897  	_val;								\
898  })
899  
900  #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
901  /*
902   * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
903   * this avoids any races wrt polling state changes and thereby avoids
904   * spurious IPIs.
905   */
set_nr_and_not_polling(struct task_struct * p)906  static inline bool set_nr_and_not_polling(struct task_struct *p)
907  {
908  	struct thread_info *ti = task_thread_info(p);
909  	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
910  }
911  
912  /*
913   * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
914   *
915   * If this returns true, then the idle task promises to call
916   * sched_ttwu_pending() and reschedule soon.
917   */
set_nr_if_polling(struct task_struct * p)918  static bool set_nr_if_polling(struct task_struct *p)
919  {
920  	struct thread_info *ti = task_thread_info(p);
921  	typeof(ti->flags) val = READ_ONCE(ti->flags);
922  
923  	for (;;) {
924  		if (!(val & _TIF_POLLING_NRFLAG))
925  			return false;
926  		if (val & _TIF_NEED_RESCHED)
927  			return true;
928  		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
929  			break;
930  	}
931  	return true;
932  }
933  
934  #else
set_nr_and_not_polling(struct task_struct * p)935  static inline bool set_nr_and_not_polling(struct task_struct *p)
936  {
937  	set_tsk_need_resched(p);
938  	return true;
939  }
940  
941  #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)942  static inline bool set_nr_if_polling(struct task_struct *p)
943  {
944  	return false;
945  }
946  #endif
947  #endif
948  
__wake_q_add(struct wake_q_head * head,struct task_struct * task)949  static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
950  {
951  	struct wake_q_node *node = &task->wake_q;
952  
953  	/*
954  	 * Atomically grab the task, if ->wake_q is !nil already it means
955  	 * it's already queued (either by us or someone else) and will get the
956  	 * wakeup due to that.
957  	 *
958  	 * In order to ensure that a pending wakeup will observe our pending
959  	 * state, even in the failed case, an explicit smp_mb() must be used.
960  	 */
961  	smp_mb__before_atomic();
962  	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
963  		return false;
964  
965  	/*
966  	 * The head is context local, there can be no concurrency.
967  	 */
968  	*head->lastp = node;
969  	head->lastp = &node->next;
970  	return true;
971  }
972  
973  /**
974   * wake_q_add() - queue a wakeup for 'later' waking.
975   * @head: the wake_q_head to add @task to
976   * @task: the task to queue for 'later' wakeup
977   *
978   * Queue a task for later wakeup, most likely by the wake_up_q() call in the
979   * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
980   * instantly.
981   *
982   * This function must be used as-if it were wake_up_process(); IOW the task
983   * must be ready to be woken at this location.
984   */
wake_q_add(struct wake_q_head * head,struct task_struct * task)985  void wake_q_add(struct wake_q_head *head, struct task_struct *task)
986  {
987  	if (__wake_q_add(head, task))
988  		get_task_struct(task);
989  }
990  
991  /**
992   * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
993   * @head: the wake_q_head to add @task to
994   * @task: the task to queue for 'later' wakeup
995   *
996   * Queue a task for later wakeup, most likely by the wake_up_q() call in the
997   * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
998   * instantly.
999   *
1000   * This function must be used as-if it were wake_up_process(); IOW the task
1001   * must be ready to be woken at this location.
1002   *
1003   * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1004   * that already hold reference to @task can call the 'safe' version and trust
1005   * wake_q to do the right thing depending whether or not the @task is already
1006   * queued for wakeup.
1007   */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1008  void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1009  {
1010  	if (!__wake_q_add(head, task))
1011  		put_task_struct(task);
1012  }
1013  
wake_up_q(struct wake_q_head * head)1014  void wake_up_q(struct wake_q_head *head)
1015  {
1016  	struct wake_q_node *node = head->first;
1017  
1018  	while (node != WAKE_Q_TAIL) {
1019  		struct task_struct *task;
1020  
1021  		task = container_of(node, struct task_struct, wake_q);
1022  		node = node->next;
1023  		/* pairs with cmpxchg_relaxed() in __wake_q_add() */
1024  		WRITE_ONCE(task->wake_q.next, NULL);
1025  		/* Task can safely be re-inserted now. */
1026  
1027  		/*
1028  		 * wake_up_process() executes a full barrier, which pairs with
1029  		 * the queueing in wake_q_add() so as not to miss wakeups.
1030  		 */
1031  		wake_up_process(task);
1032  		put_task_struct(task);
1033  	}
1034  }
1035  
1036  /*
1037   * resched_curr - mark rq's current task 'to be rescheduled now'.
1038   *
1039   * On UP this means the setting of the need_resched flag, on SMP it
1040   * might also involve a cross-CPU call to trigger the scheduler on
1041   * the target CPU.
1042   */
resched_curr(struct rq * rq)1043  void resched_curr(struct rq *rq)
1044  {
1045  	struct task_struct *curr = rq->curr;
1046  	int cpu;
1047  
1048  	lockdep_assert_rq_held(rq);
1049  
1050  	if (test_tsk_need_resched(curr))
1051  		return;
1052  
1053  	cpu = cpu_of(rq);
1054  
1055  	if (cpu == smp_processor_id()) {
1056  		set_tsk_need_resched(curr);
1057  		set_preempt_need_resched();
1058  		return;
1059  	}
1060  
1061  	if (set_nr_and_not_polling(curr))
1062  		smp_send_reschedule(cpu);
1063  	else
1064  		trace_sched_wake_idle_without_ipi(cpu);
1065  }
1066  
resched_cpu(int cpu)1067  void resched_cpu(int cpu)
1068  {
1069  	struct rq *rq = cpu_rq(cpu);
1070  	unsigned long flags;
1071  
1072  	raw_spin_rq_lock_irqsave(rq, flags);
1073  	if (cpu_online(cpu) || cpu == smp_processor_id())
1074  		resched_curr(rq);
1075  	raw_spin_rq_unlock_irqrestore(rq, flags);
1076  }
1077  
1078  #ifdef CONFIG_SMP
1079  #ifdef CONFIG_NO_HZ_COMMON
1080  /*
1081   * In the semi idle case, use the nearest busy CPU for migrating timers
1082   * from an idle CPU.  This is good for power-savings.
1083   *
1084   * We don't do similar optimization for completely idle system, as
1085   * selecting an idle CPU will add more delays to the timers than intended
1086   * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1087   */
get_nohz_timer_target(void)1088  int get_nohz_timer_target(void)
1089  {
1090  	int i, cpu = smp_processor_id(), default_cpu = -1;
1091  	struct sched_domain *sd;
1092  	const struct cpumask *hk_mask;
1093  
1094  	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1095  		if (!idle_cpu(cpu))
1096  			return cpu;
1097  		default_cpu = cpu;
1098  	}
1099  
1100  	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1101  
1102  	guard(rcu)();
1103  
1104  	for_each_domain(cpu, sd) {
1105  		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1106  			if (cpu == i)
1107  				continue;
1108  
1109  			if (!idle_cpu(i))
1110  				return i;
1111  		}
1112  	}
1113  
1114  	if (default_cpu == -1)
1115  		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1116  
1117  	return default_cpu;
1118  }
1119  
1120  /*
1121   * When add_timer_on() enqueues a timer into the timer wheel of an
1122   * idle CPU then this timer might expire before the next timer event
1123   * which is scheduled to wake up that CPU. In case of a completely
1124   * idle system the next event might even be infinite time into the
1125   * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1126   * leaves the inner idle loop so the newly added timer is taken into
1127   * account when the CPU goes back to idle and evaluates the timer
1128   * wheel for the next timer event.
1129   */
wake_up_idle_cpu(int cpu)1130  static void wake_up_idle_cpu(int cpu)
1131  {
1132  	struct rq *rq = cpu_rq(cpu);
1133  
1134  	if (cpu == smp_processor_id())
1135  		return;
1136  
1137  	if (set_nr_and_not_polling(rq->idle))
1138  		smp_send_reschedule(cpu);
1139  	else
1140  		trace_sched_wake_idle_without_ipi(cpu);
1141  }
1142  
wake_up_full_nohz_cpu(int cpu)1143  static bool wake_up_full_nohz_cpu(int cpu)
1144  {
1145  	/*
1146  	 * We just need the target to call irq_exit() and re-evaluate
1147  	 * the next tick. The nohz full kick at least implies that.
1148  	 * If needed we can still optimize that later with an
1149  	 * empty IRQ.
1150  	 */
1151  	if (cpu_is_offline(cpu))
1152  		return true;  /* Don't try to wake offline CPUs. */
1153  	if (tick_nohz_full_cpu(cpu)) {
1154  		if (cpu != smp_processor_id() ||
1155  		    tick_nohz_tick_stopped())
1156  			tick_nohz_full_kick_cpu(cpu);
1157  		return true;
1158  	}
1159  
1160  	return false;
1161  }
1162  
1163  /*
1164   * Wake up the specified CPU.  If the CPU is going offline, it is the
1165   * caller's responsibility to deal with the lost wakeup, for example,
1166   * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1167   */
wake_up_nohz_cpu(int cpu)1168  void wake_up_nohz_cpu(int cpu)
1169  {
1170  	if (!wake_up_full_nohz_cpu(cpu))
1171  		wake_up_idle_cpu(cpu);
1172  }
1173  
nohz_csd_func(void * info)1174  static void nohz_csd_func(void *info)
1175  {
1176  	struct rq *rq = info;
1177  	int cpu = cpu_of(rq);
1178  	unsigned int flags;
1179  
1180  	/*
1181  	 * Release the rq::nohz_csd.
1182  	 */
1183  	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1184  	WARN_ON(!(flags & NOHZ_KICK_MASK));
1185  
1186  	rq->idle_balance = idle_cpu(cpu);
1187  	if (rq->idle_balance) {
1188  		rq->nohz_idle_balance = flags;
1189  		__raise_softirq_irqoff(SCHED_SOFTIRQ);
1190  	}
1191  }
1192  
1193  #endif /* CONFIG_NO_HZ_COMMON */
1194  
1195  #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1196  static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1197  {
1198  	if (rq->nr_running != 1)
1199  		return false;
1200  
1201  	if (p->sched_class != &fair_sched_class)
1202  		return false;
1203  
1204  	if (!task_on_rq_queued(p))
1205  		return false;
1206  
1207  	return true;
1208  }
1209  
sched_can_stop_tick(struct rq * rq)1210  bool sched_can_stop_tick(struct rq *rq)
1211  {
1212  	int fifo_nr_running;
1213  
1214  	/* Deadline tasks, even if single, need the tick */
1215  	if (rq->dl.dl_nr_running)
1216  		return false;
1217  
1218  	/*
1219  	 * If there are more than one RR tasks, we need the tick to affect the
1220  	 * actual RR behaviour.
1221  	 */
1222  	if (rq->rt.rr_nr_running) {
1223  		if (rq->rt.rr_nr_running == 1)
1224  			return true;
1225  		else
1226  			return false;
1227  	}
1228  
1229  	/*
1230  	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1231  	 * forced preemption between FIFO tasks.
1232  	 */
1233  	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1234  	if (fifo_nr_running)
1235  		return true;
1236  
1237  	/*
1238  	 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
1239  	 * if there's more than one we need the tick for involuntary
1240  	 * preemption.
1241  	 */
1242  	if (rq->nr_running > 1)
1243  		return false;
1244  
1245  	/*
1246  	 * If there is one task and it has CFS runtime bandwidth constraints
1247  	 * and it's on the cpu now we don't want to stop the tick.
1248  	 * This check prevents clearing the bit if a newly enqueued task here is
1249  	 * dequeued by migrating while the constrained task continues to run.
1250  	 * E.g. going from 2->1 without going through pick_next_task().
1251  	 */
1252  	if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
1253  		if (cfs_task_bw_constrained(rq->curr))
1254  			return false;
1255  	}
1256  
1257  	return true;
1258  }
1259  #endif /* CONFIG_NO_HZ_FULL */
1260  #endif /* CONFIG_SMP */
1261  
1262  #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1263  			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1264  /*
1265   * Iterate task_group tree rooted at *from, calling @down when first entering a
1266   * node and @up when leaving it for the final time.
1267   *
1268   * Caller must hold rcu_lock or sufficient equivalent.
1269   */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1270  int walk_tg_tree_from(struct task_group *from,
1271  			     tg_visitor down, tg_visitor up, void *data)
1272  {
1273  	struct task_group *parent, *child;
1274  	int ret;
1275  
1276  	parent = from;
1277  
1278  down:
1279  	ret = (*down)(parent, data);
1280  	if (ret)
1281  		goto out;
1282  	list_for_each_entry_rcu(child, &parent->children, siblings) {
1283  		parent = child;
1284  		goto down;
1285  
1286  up:
1287  		continue;
1288  	}
1289  	ret = (*up)(parent, data);
1290  	if (ret || parent == from)
1291  		goto out;
1292  
1293  	child = parent;
1294  	parent = parent->parent;
1295  	if (parent)
1296  		goto up;
1297  out:
1298  	return ret;
1299  }
1300  
tg_nop(struct task_group * tg,void * data)1301  int tg_nop(struct task_group *tg, void *data)
1302  {
1303  	return 0;
1304  }
1305  #endif
1306  
set_load_weight(struct task_struct * p,bool update_load)1307  static void set_load_weight(struct task_struct *p, bool update_load)
1308  {
1309  	int prio = p->static_prio - MAX_RT_PRIO;
1310  	struct load_weight lw;
1311  
1312  	if (task_has_idle_policy(p)) {
1313  		lw.weight = scale_load(WEIGHT_IDLEPRIO);
1314  		lw.inv_weight = WMULT_IDLEPRIO;
1315  	} else {
1316  		lw.weight = scale_load(sched_prio_to_weight[prio]);
1317  		lw.inv_weight = sched_prio_to_wmult[prio];
1318  	}
1319  
1320  	/*
1321  	 * SCHED_OTHER tasks have to update their load when changing their
1322  	 * weight
1323  	 */
1324  	if (update_load && p->sched_class == &fair_sched_class)
1325  		reweight_task(p, &lw);
1326  	else
1327  		p->se.load = lw;
1328  }
1329  
1330  #ifdef CONFIG_UCLAMP_TASK
1331  /*
1332   * Serializes updates of utilization clamp values
1333   *
1334   * The (slow-path) user-space triggers utilization clamp value updates which
1335   * can require updates on (fast-path) scheduler's data structures used to
1336   * support enqueue/dequeue operations.
1337   * While the per-CPU rq lock protects fast-path update operations, user-space
1338   * requests are serialized using a mutex to reduce the risk of conflicting
1339   * updates or API abuses.
1340   */
1341  static DEFINE_MUTEX(uclamp_mutex);
1342  
1343  /* Max allowed minimum utilization */
1344  static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1345  
1346  /* Max allowed maximum utilization */
1347  static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1348  
1349  /*
1350   * By default RT tasks run at the maximum performance point/capacity of the
1351   * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1352   * SCHED_CAPACITY_SCALE.
1353   *
1354   * This knob allows admins to change the default behavior when uclamp is being
1355   * used. In battery powered devices, particularly, running at the maximum
1356   * capacity and frequency will increase energy consumption and shorten the
1357   * battery life.
1358   *
1359   * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1360   *
1361   * This knob will not override the system default sched_util_clamp_min defined
1362   * above.
1363   */
1364  static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1365  
1366  /* All clamps are required to be less or equal than these values */
1367  static struct uclamp_se uclamp_default[UCLAMP_CNT];
1368  
1369  /*
1370   * This static key is used to reduce the uclamp overhead in the fast path. It
1371   * primarily disables the call to uclamp_rq_{inc, dec}() in
1372   * enqueue/dequeue_task().
1373   *
1374   * This allows users to continue to enable uclamp in their kernel config with
1375   * minimum uclamp overhead in the fast path.
1376   *
1377   * As soon as userspace modifies any of the uclamp knobs, the static key is
1378   * enabled, since we have an actual users that make use of uclamp
1379   * functionality.
1380   *
1381   * The knobs that would enable this static key are:
1382   *
1383   *   * A task modifying its uclamp value with sched_setattr().
1384   *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1385   *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1386   */
1387  DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1388  
1389  /* Integer rounded range for each bucket */
1390  #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
1391  
1392  #define for_each_clamp_id(clamp_id) \
1393  	for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
1394  
uclamp_bucket_id(unsigned int clamp_value)1395  static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
1396  {
1397  	return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
1398  }
1399  
uclamp_none(enum uclamp_id clamp_id)1400  static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
1401  {
1402  	if (clamp_id == UCLAMP_MIN)
1403  		return 0;
1404  	return SCHED_CAPACITY_SCALE;
1405  }
1406  
uclamp_se_set(struct uclamp_se * uc_se,unsigned int value,bool user_defined)1407  static inline void uclamp_se_set(struct uclamp_se *uc_se,
1408  				 unsigned int value, bool user_defined)
1409  {
1410  	uc_se->value = value;
1411  	uc_se->bucket_id = uclamp_bucket_id(value);
1412  	uc_se->user_defined = user_defined;
1413  }
1414  
1415  static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1416  uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1417  		  unsigned int clamp_value)
1418  {
1419  	/*
1420  	 * Avoid blocked utilization pushing up the frequency when we go
1421  	 * idle (which drops the max-clamp) by retaining the last known
1422  	 * max-clamp.
1423  	 */
1424  	if (clamp_id == UCLAMP_MAX) {
1425  		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1426  		return clamp_value;
1427  	}
1428  
1429  	return uclamp_none(UCLAMP_MIN);
1430  }
1431  
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1432  static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1433  				     unsigned int clamp_value)
1434  {
1435  	/* Reset max-clamp retention only on idle exit */
1436  	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1437  		return;
1438  
1439  	uclamp_rq_set(rq, clamp_id, clamp_value);
1440  }
1441  
1442  static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1443  unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1444  				   unsigned int clamp_value)
1445  {
1446  	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1447  	int bucket_id = UCLAMP_BUCKETS - 1;
1448  
1449  	/*
1450  	 * Since both min and max clamps are max aggregated, find the
1451  	 * top most bucket with tasks in.
1452  	 */
1453  	for ( ; bucket_id >= 0; bucket_id--) {
1454  		if (!bucket[bucket_id].tasks)
1455  			continue;
1456  		return bucket[bucket_id].value;
1457  	}
1458  
1459  	/* No tasks -- default clamp values */
1460  	return uclamp_idle_value(rq, clamp_id, clamp_value);
1461  }
1462  
__uclamp_update_util_min_rt_default(struct task_struct * p)1463  static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1464  {
1465  	unsigned int default_util_min;
1466  	struct uclamp_se *uc_se;
1467  
1468  	lockdep_assert_held(&p->pi_lock);
1469  
1470  	uc_se = &p->uclamp_req[UCLAMP_MIN];
1471  
1472  	/* Only sync if user didn't override the default */
1473  	if (uc_se->user_defined)
1474  		return;
1475  
1476  	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1477  	uclamp_se_set(uc_se, default_util_min, false);
1478  }
1479  
uclamp_update_util_min_rt_default(struct task_struct * p)1480  static void uclamp_update_util_min_rt_default(struct task_struct *p)
1481  {
1482  	struct rq_flags rf;
1483  	struct rq *rq;
1484  
1485  	if (!rt_task(p))
1486  		return;
1487  
1488  	/* Protect updates to p->uclamp_* */
1489  	rq = task_rq_lock(p, &rf);
1490  	__uclamp_update_util_min_rt_default(p);
1491  	task_rq_unlock(rq, p, &rf);
1492  }
1493  
1494  static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1495  uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1496  {
1497  	/* Copy by value as we could modify it */
1498  	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1499  #ifdef CONFIG_UCLAMP_TASK_GROUP
1500  	unsigned int tg_min, tg_max, value;
1501  
1502  	/*
1503  	 * Tasks in autogroups or root task group will be
1504  	 * restricted by system defaults.
1505  	 */
1506  	if (task_group_is_autogroup(task_group(p)))
1507  		return uc_req;
1508  	if (task_group(p) == &root_task_group)
1509  		return uc_req;
1510  
1511  	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1512  	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1513  	value = uc_req.value;
1514  	value = clamp(value, tg_min, tg_max);
1515  	uclamp_se_set(&uc_req, value, false);
1516  #endif
1517  
1518  	return uc_req;
1519  }
1520  
1521  /*
1522   * The effective clamp bucket index of a task depends on, by increasing
1523   * priority:
1524   * - the task specific clamp value, when explicitly requested from userspace
1525   * - the task group effective clamp value, for tasks not either in the root
1526   *   group or in an autogroup
1527   * - the system default clamp value, defined by the sysadmin
1528   */
1529  static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1530  uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1531  {
1532  	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1533  	struct uclamp_se uc_max = uclamp_default[clamp_id];
1534  
1535  	/* System default restrictions always apply */
1536  	if (unlikely(uc_req.value > uc_max.value))
1537  		return uc_max;
1538  
1539  	return uc_req;
1540  }
1541  
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1542  unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1543  {
1544  	struct uclamp_se uc_eff;
1545  
1546  	/* Task currently refcounted: use back-annotated (effective) value */
1547  	if (p->uclamp[clamp_id].active)
1548  		return (unsigned long)p->uclamp[clamp_id].value;
1549  
1550  	uc_eff = uclamp_eff_get(p, clamp_id);
1551  
1552  	return (unsigned long)uc_eff.value;
1553  }
1554  
1555  /*
1556   * When a task is enqueued on a rq, the clamp bucket currently defined by the
1557   * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1558   * updates the rq's clamp value if required.
1559   *
1560   * Tasks can have a task-specific value requested from user-space, track
1561   * within each bucket the maximum value for tasks refcounted in it.
1562   * This "local max aggregation" allows to track the exact "requested" value
1563   * for each bucket when all its RUNNABLE tasks require the same clamp.
1564   */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1565  static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1566  				    enum uclamp_id clamp_id)
1567  {
1568  	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1569  	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1570  	struct uclamp_bucket *bucket;
1571  
1572  	lockdep_assert_rq_held(rq);
1573  
1574  	/* Update task effective clamp */
1575  	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1576  
1577  	bucket = &uc_rq->bucket[uc_se->bucket_id];
1578  	bucket->tasks++;
1579  	uc_se->active = true;
1580  
1581  	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1582  
1583  	/*
1584  	 * Local max aggregation: rq buckets always track the max
1585  	 * "requested" clamp value of its RUNNABLE tasks.
1586  	 */
1587  	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1588  		bucket->value = uc_se->value;
1589  
1590  	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1591  		uclamp_rq_set(rq, clamp_id, uc_se->value);
1592  }
1593  
1594  /*
1595   * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1596   * is released. If this is the last task reference counting the rq's max
1597   * active clamp value, then the rq's clamp value is updated.
1598   *
1599   * Both refcounted tasks and rq's cached clamp values are expected to be
1600   * always valid. If it's detected they are not, as defensive programming,
1601   * enforce the expected state and warn.
1602   */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1603  static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1604  				    enum uclamp_id clamp_id)
1605  {
1606  	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1607  	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1608  	struct uclamp_bucket *bucket;
1609  	unsigned int bkt_clamp;
1610  	unsigned int rq_clamp;
1611  
1612  	lockdep_assert_rq_held(rq);
1613  
1614  	/*
1615  	 * If sched_uclamp_used was enabled after task @p was enqueued,
1616  	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1617  	 *
1618  	 * In this case the uc_se->active flag should be false since no uclamp
1619  	 * accounting was performed at enqueue time and we can just return
1620  	 * here.
1621  	 *
1622  	 * Need to be careful of the following enqueue/dequeue ordering
1623  	 * problem too
1624  	 *
1625  	 *	enqueue(taskA)
1626  	 *	// sched_uclamp_used gets enabled
1627  	 *	enqueue(taskB)
1628  	 *	dequeue(taskA)
1629  	 *	// Must not decrement bucket->tasks here
1630  	 *	dequeue(taskB)
1631  	 *
1632  	 * where we could end up with stale data in uc_se and
1633  	 * bucket[uc_se->bucket_id].
1634  	 *
1635  	 * The following check here eliminates the possibility of such race.
1636  	 */
1637  	if (unlikely(!uc_se->active))
1638  		return;
1639  
1640  	bucket = &uc_rq->bucket[uc_se->bucket_id];
1641  
1642  	SCHED_WARN_ON(!bucket->tasks);
1643  	if (likely(bucket->tasks))
1644  		bucket->tasks--;
1645  
1646  	uc_se->active = false;
1647  
1648  	/*
1649  	 * Keep "local max aggregation" simple and accept to (possibly)
1650  	 * overboost some RUNNABLE tasks in the same bucket.
1651  	 * The rq clamp bucket value is reset to its base value whenever
1652  	 * there are no more RUNNABLE tasks refcounting it.
1653  	 */
1654  	if (likely(bucket->tasks))
1655  		return;
1656  
1657  	rq_clamp = uclamp_rq_get(rq, clamp_id);
1658  	/*
1659  	 * Defensive programming: this should never happen. If it happens,
1660  	 * e.g. due to future modification, warn and fixup the expected value.
1661  	 */
1662  	SCHED_WARN_ON(bucket->value > rq_clamp);
1663  	if (bucket->value >= rq_clamp) {
1664  		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1665  		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1666  	}
1667  }
1668  
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1669  static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1670  {
1671  	enum uclamp_id clamp_id;
1672  
1673  	/*
1674  	 * Avoid any overhead until uclamp is actually used by the userspace.
1675  	 *
1676  	 * The condition is constructed such that a NOP is generated when
1677  	 * sched_uclamp_used is disabled.
1678  	 */
1679  	if (!static_branch_unlikely(&sched_uclamp_used))
1680  		return;
1681  
1682  	if (unlikely(!p->sched_class->uclamp_enabled))
1683  		return;
1684  
1685  	for_each_clamp_id(clamp_id)
1686  		uclamp_rq_inc_id(rq, p, clamp_id);
1687  
1688  	/* Reset clamp idle holding when there is one RUNNABLE task */
1689  	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1690  		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1691  }
1692  
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1693  static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1694  {
1695  	enum uclamp_id clamp_id;
1696  
1697  	/*
1698  	 * Avoid any overhead until uclamp is actually used by the userspace.
1699  	 *
1700  	 * The condition is constructed such that a NOP is generated when
1701  	 * sched_uclamp_used is disabled.
1702  	 */
1703  	if (!static_branch_unlikely(&sched_uclamp_used))
1704  		return;
1705  
1706  	if (unlikely(!p->sched_class->uclamp_enabled))
1707  		return;
1708  
1709  	for_each_clamp_id(clamp_id)
1710  		uclamp_rq_dec_id(rq, p, clamp_id);
1711  }
1712  
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1713  static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1714  				      enum uclamp_id clamp_id)
1715  {
1716  	if (!p->uclamp[clamp_id].active)
1717  		return;
1718  
1719  	uclamp_rq_dec_id(rq, p, clamp_id);
1720  	uclamp_rq_inc_id(rq, p, clamp_id);
1721  
1722  	/*
1723  	 * Make sure to clear the idle flag if we've transiently reached 0
1724  	 * active tasks on rq.
1725  	 */
1726  	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1727  		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1728  }
1729  
1730  static inline void
uclamp_update_active(struct task_struct * p)1731  uclamp_update_active(struct task_struct *p)
1732  {
1733  	enum uclamp_id clamp_id;
1734  	struct rq_flags rf;
1735  	struct rq *rq;
1736  
1737  	/*
1738  	 * Lock the task and the rq where the task is (or was) queued.
1739  	 *
1740  	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1741  	 * price to pay to safely serialize util_{min,max} updates with
1742  	 * enqueues, dequeues and migration operations.
1743  	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1744  	 */
1745  	rq = task_rq_lock(p, &rf);
1746  
1747  	/*
1748  	 * Setting the clamp bucket is serialized by task_rq_lock().
1749  	 * If the task is not yet RUNNABLE and its task_struct is not
1750  	 * affecting a valid clamp bucket, the next time it's enqueued,
1751  	 * it will already see the updated clamp bucket value.
1752  	 */
1753  	for_each_clamp_id(clamp_id)
1754  		uclamp_rq_reinc_id(rq, p, clamp_id);
1755  
1756  	task_rq_unlock(rq, p, &rf);
1757  }
1758  
1759  #ifdef CONFIG_UCLAMP_TASK_GROUP
1760  static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1761  uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1762  {
1763  	struct css_task_iter it;
1764  	struct task_struct *p;
1765  
1766  	css_task_iter_start(css, 0, &it);
1767  	while ((p = css_task_iter_next(&it)))
1768  		uclamp_update_active(p);
1769  	css_task_iter_end(&it);
1770  }
1771  
1772  static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1773  #endif
1774  
1775  #ifdef CONFIG_SYSCTL
1776  #ifdef CONFIG_UCLAMP_TASK
1777  #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1778  static void uclamp_update_root_tg(void)
1779  {
1780  	struct task_group *tg = &root_task_group;
1781  
1782  	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1783  		      sysctl_sched_uclamp_util_min, false);
1784  	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1785  		      sysctl_sched_uclamp_util_max, false);
1786  
1787  	rcu_read_lock();
1788  	cpu_util_update_eff(&root_task_group.css);
1789  	rcu_read_unlock();
1790  }
1791  #else
uclamp_update_root_tg(void)1792  static void uclamp_update_root_tg(void) { }
1793  #endif
1794  
uclamp_sync_util_min_rt_default(void)1795  static void uclamp_sync_util_min_rt_default(void)
1796  {
1797  	struct task_struct *g, *p;
1798  
1799  	/*
1800  	 * copy_process()			sysctl_uclamp
1801  	 *					  uclamp_min_rt = X;
1802  	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1803  	 *   // link thread			  smp_mb__after_spinlock()
1804  	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1805  	 *   sched_post_fork()			  for_each_process_thread()
1806  	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1807  	 *
1808  	 * Ensures that either sched_post_fork() will observe the new
1809  	 * uclamp_min_rt or for_each_process_thread() will observe the new
1810  	 * task.
1811  	 */
1812  	read_lock(&tasklist_lock);
1813  	smp_mb__after_spinlock();
1814  	read_unlock(&tasklist_lock);
1815  
1816  	rcu_read_lock();
1817  	for_each_process_thread(g, p)
1818  		uclamp_update_util_min_rt_default(p);
1819  	rcu_read_unlock();
1820  }
1821  
sysctl_sched_uclamp_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1822  static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1823  				void *buffer, size_t *lenp, loff_t *ppos)
1824  {
1825  	bool update_root_tg = false;
1826  	int old_min, old_max, old_min_rt;
1827  	int result;
1828  
1829  	guard(mutex)(&uclamp_mutex);
1830  
1831  	old_min = sysctl_sched_uclamp_util_min;
1832  	old_max = sysctl_sched_uclamp_util_max;
1833  	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1834  
1835  	result = proc_dointvec(table, write, buffer, lenp, ppos);
1836  	if (result)
1837  		goto undo;
1838  	if (!write)
1839  		return 0;
1840  
1841  	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1842  	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1843  	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1844  
1845  		result = -EINVAL;
1846  		goto undo;
1847  	}
1848  
1849  	if (old_min != sysctl_sched_uclamp_util_min) {
1850  		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1851  			      sysctl_sched_uclamp_util_min, false);
1852  		update_root_tg = true;
1853  	}
1854  	if (old_max != sysctl_sched_uclamp_util_max) {
1855  		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1856  			      sysctl_sched_uclamp_util_max, false);
1857  		update_root_tg = true;
1858  	}
1859  
1860  	if (update_root_tg) {
1861  		static_branch_enable(&sched_uclamp_used);
1862  		uclamp_update_root_tg();
1863  	}
1864  
1865  	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1866  		static_branch_enable(&sched_uclamp_used);
1867  		uclamp_sync_util_min_rt_default();
1868  	}
1869  
1870  	/*
1871  	 * We update all RUNNABLE tasks only when task groups are in use.
1872  	 * Otherwise, keep it simple and do just a lazy update at each next
1873  	 * task enqueue time.
1874  	 */
1875  	return 0;
1876  
1877  undo:
1878  	sysctl_sched_uclamp_util_min = old_min;
1879  	sysctl_sched_uclamp_util_max = old_max;
1880  	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1881  	return result;
1882  }
1883  #endif
1884  #endif
1885  
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)1886  static int uclamp_validate(struct task_struct *p,
1887  			   const struct sched_attr *attr)
1888  {
1889  	int util_min = p->uclamp_req[UCLAMP_MIN].value;
1890  	int util_max = p->uclamp_req[UCLAMP_MAX].value;
1891  
1892  	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1893  		util_min = attr->sched_util_min;
1894  
1895  		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1896  			return -EINVAL;
1897  	}
1898  
1899  	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1900  		util_max = attr->sched_util_max;
1901  
1902  		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1903  			return -EINVAL;
1904  	}
1905  
1906  	if (util_min != -1 && util_max != -1 && util_min > util_max)
1907  		return -EINVAL;
1908  
1909  	/*
1910  	 * We have valid uclamp attributes; make sure uclamp is enabled.
1911  	 *
1912  	 * We need to do that here, because enabling static branches is a
1913  	 * blocking operation which obviously cannot be done while holding
1914  	 * scheduler locks.
1915  	 */
1916  	static_branch_enable(&sched_uclamp_used);
1917  
1918  	return 0;
1919  }
1920  
uclamp_reset(const struct sched_attr * attr,enum uclamp_id clamp_id,struct uclamp_se * uc_se)1921  static bool uclamp_reset(const struct sched_attr *attr,
1922  			 enum uclamp_id clamp_id,
1923  			 struct uclamp_se *uc_se)
1924  {
1925  	/* Reset on sched class change for a non user-defined clamp value. */
1926  	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1927  	    !uc_se->user_defined)
1928  		return true;
1929  
1930  	/* Reset on sched_util_{min,max} == -1. */
1931  	if (clamp_id == UCLAMP_MIN &&
1932  	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1933  	    attr->sched_util_min == -1) {
1934  		return true;
1935  	}
1936  
1937  	if (clamp_id == UCLAMP_MAX &&
1938  	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1939  	    attr->sched_util_max == -1) {
1940  		return true;
1941  	}
1942  
1943  	return false;
1944  }
1945  
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)1946  static void __setscheduler_uclamp(struct task_struct *p,
1947  				  const struct sched_attr *attr)
1948  {
1949  	enum uclamp_id clamp_id;
1950  
1951  	for_each_clamp_id(clamp_id) {
1952  		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1953  		unsigned int value;
1954  
1955  		if (!uclamp_reset(attr, clamp_id, uc_se))
1956  			continue;
1957  
1958  		/*
1959  		 * RT by default have a 100% boost value that could be modified
1960  		 * at runtime.
1961  		 */
1962  		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1963  			value = sysctl_sched_uclamp_util_min_rt_default;
1964  		else
1965  			value = uclamp_none(clamp_id);
1966  
1967  		uclamp_se_set(uc_se, value, false);
1968  
1969  	}
1970  
1971  	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1972  		return;
1973  
1974  	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1975  	    attr->sched_util_min != -1) {
1976  		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1977  			      attr->sched_util_min, true);
1978  	}
1979  
1980  	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1981  	    attr->sched_util_max != -1) {
1982  		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1983  			      attr->sched_util_max, true);
1984  	}
1985  }
1986  
uclamp_fork(struct task_struct * p)1987  static void uclamp_fork(struct task_struct *p)
1988  {
1989  	enum uclamp_id clamp_id;
1990  
1991  	/*
1992  	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1993  	 * as the task is still at its early fork stages.
1994  	 */
1995  	for_each_clamp_id(clamp_id)
1996  		p->uclamp[clamp_id].active = false;
1997  
1998  	if (likely(!p->sched_reset_on_fork))
1999  		return;
2000  
2001  	for_each_clamp_id(clamp_id) {
2002  		uclamp_se_set(&p->uclamp_req[clamp_id],
2003  			      uclamp_none(clamp_id), false);
2004  	}
2005  }
2006  
uclamp_post_fork(struct task_struct * p)2007  static void uclamp_post_fork(struct task_struct *p)
2008  {
2009  	uclamp_update_util_min_rt_default(p);
2010  }
2011  
init_uclamp_rq(struct rq * rq)2012  static void __init init_uclamp_rq(struct rq *rq)
2013  {
2014  	enum uclamp_id clamp_id;
2015  	struct uclamp_rq *uc_rq = rq->uclamp;
2016  
2017  	for_each_clamp_id(clamp_id) {
2018  		uc_rq[clamp_id] = (struct uclamp_rq) {
2019  			.value = uclamp_none(clamp_id)
2020  		};
2021  	}
2022  
2023  	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2024  }
2025  
init_uclamp(void)2026  static void __init init_uclamp(void)
2027  {
2028  	struct uclamp_se uc_max = {};
2029  	enum uclamp_id clamp_id;
2030  	int cpu;
2031  
2032  	for_each_possible_cpu(cpu)
2033  		init_uclamp_rq(cpu_rq(cpu));
2034  
2035  	for_each_clamp_id(clamp_id) {
2036  		uclamp_se_set(&init_task.uclamp_req[clamp_id],
2037  			      uclamp_none(clamp_id), false);
2038  	}
2039  
2040  	/* System defaults allow max clamp values for both indexes */
2041  	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2042  	for_each_clamp_id(clamp_id) {
2043  		uclamp_default[clamp_id] = uc_max;
2044  #ifdef CONFIG_UCLAMP_TASK_GROUP
2045  		root_task_group.uclamp_req[clamp_id] = uc_max;
2046  		root_task_group.uclamp[clamp_id] = uc_max;
2047  #endif
2048  	}
2049  }
2050  
2051  #else /* CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)2052  static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2053  static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_validate(struct task_struct * p,const struct sched_attr * attr)2054  static inline int uclamp_validate(struct task_struct *p,
2055  				  const struct sched_attr *attr)
2056  {
2057  	return -EOPNOTSUPP;
2058  }
__setscheduler_uclamp(struct task_struct * p,const struct sched_attr * attr)2059  static void __setscheduler_uclamp(struct task_struct *p,
2060  				  const struct sched_attr *attr) { }
uclamp_fork(struct task_struct * p)2061  static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2062  static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2063  static inline void init_uclamp(void) { }
2064  #endif /* CONFIG_UCLAMP_TASK */
2065  
sched_task_on_rq(struct task_struct * p)2066  bool sched_task_on_rq(struct task_struct *p)
2067  {
2068  	return task_on_rq_queued(p);
2069  }
2070  
get_wchan(struct task_struct * p)2071  unsigned long get_wchan(struct task_struct *p)
2072  {
2073  	unsigned long ip = 0;
2074  	unsigned int state;
2075  
2076  	if (!p || p == current)
2077  		return 0;
2078  
2079  	/* Only get wchan if task is blocked and we can keep it that way. */
2080  	raw_spin_lock_irq(&p->pi_lock);
2081  	state = READ_ONCE(p->__state);
2082  	smp_rmb(); /* see try_to_wake_up() */
2083  	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2084  		ip = __get_wchan(p);
2085  	raw_spin_unlock_irq(&p->pi_lock);
2086  
2087  	return ip;
2088  }
2089  
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2090  static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2091  {
2092  	if (!(flags & ENQUEUE_NOCLOCK))
2093  		update_rq_clock(rq);
2094  
2095  	if (!(flags & ENQUEUE_RESTORE)) {
2096  		sched_info_enqueue(rq, p);
2097  		psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
2098  	}
2099  
2100  	uclamp_rq_inc(rq, p);
2101  	p->sched_class->enqueue_task(rq, p, flags);
2102  
2103  	if (sched_core_enabled(rq))
2104  		sched_core_enqueue(rq, p);
2105  }
2106  
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2107  static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2108  {
2109  	if (sched_core_enabled(rq))
2110  		sched_core_dequeue(rq, p, flags);
2111  
2112  	if (!(flags & DEQUEUE_NOCLOCK))
2113  		update_rq_clock(rq);
2114  
2115  	if (!(flags & DEQUEUE_SAVE)) {
2116  		sched_info_dequeue(rq, p);
2117  		psi_dequeue(p, flags & DEQUEUE_SLEEP);
2118  	}
2119  
2120  	uclamp_rq_dec(rq, p);
2121  	p->sched_class->dequeue_task(rq, p, flags);
2122  }
2123  
activate_task(struct rq * rq,struct task_struct * p,int flags)2124  void activate_task(struct rq *rq, struct task_struct *p, int flags)
2125  {
2126  	if (task_on_rq_migrating(p))
2127  		flags |= ENQUEUE_MIGRATED;
2128  	if (flags & ENQUEUE_MIGRATED)
2129  		sched_mm_cid_migrate_to(rq, p);
2130  
2131  	enqueue_task(rq, p, flags);
2132  
2133  	p->on_rq = TASK_ON_RQ_QUEUED;
2134  }
2135  
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2136  void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2137  {
2138  	p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
2139  
2140  	dequeue_task(rq, p, flags);
2141  }
2142  
__normal_prio(int policy,int rt_prio,int nice)2143  static inline int __normal_prio(int policy, int rt_prio, int nice)
2144  {
2145  	int prio;
2146  
2147  	if (dl_policy(policy))
2148  		prio = MAX_DL_PRIO - 1;
2149  	else if (rt_policy(policy))
2150  		prio = MAX_RT_PRIO - 1 - rt_prio;
2151  	else
2152  		prio = NICE_TO_PRIO(nice);
2153  
2154  	return prio;
2155  }
2156  
2157  /*
2158   * Calculate the expected normal priority: i.e. priority
2159   * without taking RT-inheritance into account. Might be
2160   * boosted by interactivity modifiers. Changes upon fork,
2161   * setprio syscalls, and whenever the interactivity
2162   * estimator recalculates.
2163   */
normal_prio(struct task_struct * p)2164  static inline int normal_prio(struct task_struct *p)
2165  {
2166  	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
2167  }
2168  
2169  /*
2170   * Calculate the current priority, i.e. the priority
2171   * taken into account by the scheduler. This value might
2172   * be boosted by RT tasks, or might be boosted by
2173   * interactivity modifiers. Will be RT if the task got
2174   * RT-boosted. If not then it returns p->normal_prio.
2175   */
effective_prio(struct task_struct * p)2176  static int effective_prio(struct task_struct *p)
2177  {
2178  	p->normal_prio = normal_prio(p);
2179  	/*
2180  	 * If we are RT tasks or we were boosted to RT priority,
2181  	 * keep the priority unchanged. Otherwise, update priority
2182  	 * to the normal priority:
2183  	 */
2184  	if (!rt_prio(p->prio))
2185  		return p->normal_prio;
2186  	return p->prio;
2187  }
2188  
2189  /**
2190   * task_curr - is this task currently executing on a CPU?
2191   * @p: the task in question.
2192   *
2193   * Return: 1 if the task is currently executing. 0 otherwise.
2194   */
task_curr(const struct task_struct * p)2195  inline int task_curr(const struct task_struct *p)
2196  {
2197  	return cpu_curr(task_cpu(p)) == p;
2198  }
2199  
2200  /*
2201   * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2202   * use the balance_callback list if you want balancing.
2203   *
2204   * this means any call to check_class_changed() must be followed by a call to
2205   * balance_callback().
2206   */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2207  static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2208  				       const struct sched_class *prev_class,
2209  				       int oldprio)
2210  {
2211  	if (prev_class != p->sched_class) {
2212  		if (prev_class->switched_from)
2213  			prev_class->switched_from(rq, p);
2214  
2215  		p->sched_class->switched_to(rq, p);
2216  	} else if (oldprio != p->prio || dl_task(p))
2217  		p->sched_class->prio_changed(rq, p, oldprio);
2218  }
2219  
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2220  void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2221  {
2222  	if (p->sched_class == rq->curr->sched_class)
2223  		rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2224  	else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2225  		resched_curr(rq);
2226  
2227  	/*
2228  	 * A queue event has occurred, and we're going to schedule.  In
2229  	 * this case, we can save a useless back to back clock update.
2230  	 */
2231  	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2232  		rq_clock_skip_update(rq);
2233  }
2234  
2235  static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2236  int __task_state_match(struct task_struct *p, unsigned int state)
2237  {
2238  	if (READ_ONCE(p->__state) & state)
2239  		return 1;
2240  
2241  #ifdef CONFIG_PREEMPT_RT
2242  	if (READ_ONCE(p->saved_state) & state)
2243  		return -1;
2244  #endif
2245  	return 0;
2246  }
2247  
2248  static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2249  int task_state_match(struct task_struct *p, unsigned int state)
2250  {
2251  #ifdef CONFIG_PREEMPT_RT
2252  	int match;
2253  
2254  	/*
2255  	 * Serialize against current_save_and_set_rtlock_wait_state() and
2256  	 * current_restore_rtlock_saved_state().
2257  	 */
2258  	raw_spin_lock_irq(&p->pi_lock);
2259  	match = __task_state_match(p, state);
2260  	raw_spin_unlock_irq(&p->pi_lock);
2261  
2262  	return match;
2263  #else
2264  	return __task_state_match(p, state);
2265  #endif
2266  }
2267  
2268  /*
2269   * wait_task_inactive - wait for a thread to unschedule.
2270   *
2271   * Wait for the thread to block in any of the states set in @match_state.
2272   * If it changes, i.e. @p might have woken up, then return zero.  When we
2273   * succeed in waiting for @p to be off its CPU, we return a positive number
2274   * (its total switch count).  If a second call a short while later returns the
2275   * same number, the caller can be sure that @p has remained unscheduled the
2276   * whole time.
2277   *
2278   * The caller must ensure that the task *will* unschedule sometime soon,
2279   * else this function might spin for a *long* time. This function can't
2280   * be called with interrupts off, or it may introduce deadlock with
2281   * smp_call_function() if an IPI is sent by the same process we are
2282   * waiting to become inactive.
2283   */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2284  unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2285  {
2286  	int running, queued, match;
2287  	struct rq_flags rf;
2288  	unsigned long ncsw;
2289  	struct rq *rq;
2290  
2291  	for (;;) {
2292  		/*
2293  		 * We do the initial early heuristics without holding
2294  		 * any task-queue locks at all. We'll only try to get
2295  		 * the runqueue lock when things look like they will
2296  		 * work out!
2297  		 */
2298  		rq = task_rq(p);
2299  
2300  		/*
2301  		 * If the task is actively running on another CPU
2302  		 * still, just relax and busy-wait without holding
2303  		 * any locks.
2304  		 *
2305  		 * NOTE! Since we don't hold any locks, it's not
2306  		 * even sure that "rq" stays as the right runqueue!
2307  		 * But we don't care, since "task_on_cpu()" will
2308  		 * return false if the runqueue has changed and p
2309  		 * is actually now running somewhere else!
2310  		 */
2311  		while (task_on_cpu(rq, p)) {
2312  			if (!task_state_match(p, match_state))
2313  				return 0;
2314  			cpu_relax();
2315  		}
2316  
2317  		/*
2318  		 * Ok, time to look more closely! We need the rq
2319  		 * lock now, to be *sure*. If we're wrong, we'll
2320  		 * just go back and repeat.
2321  		 */
2322  		rq = task_rq_lock(p, &rf);
2323  		trace_sched_wait_task(p);
2324  		running = task_on_cpu(rq, p);
2325  		queued = task_on_rq_queued(p);
2326  		ncsw = 0;
2327  		if ((match = __task_state_match(p, match_state))) {
2328  			/*
2329  			 * When matching on p->saved_state, consider this task
2330  			 * still queued so it will wait.
2331  			 */
2332  			if (match < 0)
2333  				queued = 1;
2334  			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2335  		}
2336  		task_rq_unlock(rq, p, &rf);
2337  
2338  		/*
2339  		 * If it changed from the expected state, bail out now.
2340  		 */
2341  		if (unlikely(!ncsw))
2342  			break;
2343  
2344  		/*
2345  		 * Was it really running after all now that we
2346  		 * checked with the proper locks actually held?
2347  		 *
2348  		 * Oops. Go back and try again..
2349  		 */
2350  		if (unlikely(running)) {
2351  			cpu_relax();
2352  			continue;
2353  		}
2354  
2355  		/*
2356  		 * It's not enough that it's not actively running,
2357  		 * it must be off the runqueue _entirely_, and not
2358  		 * preempted!
2359  		 *
2360  		 * So if it was still runnable (but just not actively
2361  		 * running right now), it's preempted, and we should
2362  		 * yield - it could be a while.
2363  		 */
2364  		if (unlikely(queued)) {
2365  			ktime_t to = NSEC_PER_SEC / HZ;
2366  
2367  			set_current_state(TASK_UNINTERRUPTIBLE);
2368  			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2369  			continue;
2370  		}
2371  
2372  		/*
2373  		 * Ahh, all good. It wasn't running, and it wasn't
2374  		 * runnable, which means that it will never become
2375  		 * running in the future either. We're all done!
2376  		 */
2377  		break;
2378  	}
2379  
2380  	return ncsw;
2381  }
2382  
2383  #ifdef CONFIG_SMP
2384  
2385  static void
2386  __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2387  
2388  static int __set_cpus_allowed_ptr(struct task_struct *p,
2389  				  struct affinity_context *ctx);
2390  
migrate_disable_switch(struct rq * rq,struct task_struct * p)2391  static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2392  {
2393  	struct affinity_context ac = {
2394  		.new_mask  = cpumask_of(rq->cpu),
2395  		.flags     = SCA_MIGRATE_DISABLE,
2396  	};
2397  
2398  	if (likely(!p->migration_disabled))
2399  		return;
2400  
2401  	if (p->cpus_ptr != &p->cpus_mask)
2402  		return;
2403  
2404  	/*
2405  	 * Violates locking rules! see comment in __do_set_cpus_allowed().
2406  	 */
2407  	__do_set_cpus_allowed(p, &ac);
2408  }
2409  
migrate_disable(void)2410  void migrate_disable(void)
2411  {
2412  	struct task_struct *p = current;
2413  
2414  	if (p->migration_disabled) {
2415  		p->migration_disabled++;
2416  		return;
2417  	}
2418  
2419  	preempt_disable();
2420  	this_rq()->nr_pinned++;
2421  	p->migration_disabled = 1;
2422  	preempt_enable();
2423  }
2424  EXPORT_SYMBOL_GPL(migrate_disable);
2425  
migrate_enable(void)2426  void migrate_enable(void)
2427  {
2428  	struct task_struct *p = current;
2429  	struct affinity_context ac = {
2430  		.new_mask  = &p->cpus_mask,
2431  		.flags     = SCA_MIGRATE_ENABLE,
2432  	};
2433  
2434  	if (p->migration_disabled > 1) {
2435  		p->migration_disabled--;
2436  		return;
2437  	}
2438  
2439  	if (WARN_ON_ONCE(!p->migration_disabled))
2440  		return;
2441  
2442  	/*
2443  	 * Ensure stop_task runs either before or after this, and that
2444  	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2445  	 */
2446  	preempt_disable();
2447  	if (p->cpus_ptr != &p->cpus_mask)
2448  		__set_cpus_allowed_ptr(p, &ac);
2449  	/*
2450  	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2451  	 * regular cpus_mask, otherwise things that race (eg.
2452  	 * select_fallback_rq) get confused.
2453  	 */
2454  	barrier();
2455  	p->migration_disabled = 0;
2456  	this_rq()->nr_pinned--;
2457  	preempt_enable();
2458  }
2459  EXPORT_SYMBOL_GPL(migrate_enable);
2460  
rq_has_pinned_tasks(struct rq * rq)2461  static inline bool rq_has_pinned_tasks(struct rq *rq)
2462  {
2463  	return rq->nr_pinned;
2464  }
2465  
2466  /*
2467   * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2468   * __set_cpus_allowed_ptr() and select_fallback_rq().
2469   */
is_cpu_allowed(struct task_struct * p,int cpu)2470  static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2471  {
2472  	/* When not in the task's cpumask, no point in looking further. */
2473  	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2474  		return false;
2475  
2476  	/* migrate_disabled() must be allowed to finish. */
2477  	if (is_migration_disabled(p))
2478  		return cpu_online(cpu);
2479  
2480  	/* Non kernel threads are not allowed during either online or offline. */
2481  	if (!(p->flags & PF_KTHREAD))
2482  		return cpu_active(cpu) && task_cpu_possible(cpu, p);
2483  
2484  	/* KTHREAD_IS_PER_CPU is always allowed. */
2485  	if (kthread_is_per_cpu(p))
2486  		return cpu_online(cpu);
2487  
2488  	/* Regular kernel threads don't get to stay during offline. */
2489  	if (cpu_dying(cpu))
2490  		return false;
2491  
2492  	/* But are allowed during online. */
2493  	return cpu_online(cpu);
2494  }
2495  
2496  /*
2497   * This is how migration works:
2498   *
2499   * 1) we invoke migration_cpu_stop() on the target CPU using
2500   *    stop_one_cpu().
2501   * 2) stopper starts to run (implicitly forcing the migrated thread
2502   *    off the CPU)
2503   * 3) it checks whether the migrated task is still in the wrong runqueue.
2504   * 4) if it's in the wrong runqueue then the migration thread removes
2505   *    it and puts it into the right queue.
2506   * 5) stopper completes and stop_one_cpu() returns and the migration
2507   *    is done.
2508   */
2509  
2510  /*
2511   * move_queued_task - move a queued task to new rq.
2512   *
2513   * Returns (locked) new rq. Old rq's lock is released.
2514   */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2515  static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2516  				   struct task_struct *p, int new_cpu)
2517  {
2518  	lockdep_assert_rq_held(rq);
2519  
2520  	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2521  	set_task_cpu(p, new_cpu);
2522  	rq_unlock(rq, rf);
2523  
2524  	rq = cpu_rq(new_cpu);
2525  
2526  	rq_lock(rq, rf);
2527  	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2528  	activate_task(rq, p, 0);
2529  	wakeup_preempt(rq, p, 0);
2530  
2531  	return rq;
2532  }
2533  
2534  struct migration_arg {
2535  	struct task_struct		*task;
2536  	int				dest_cpu;
2537  	struct set_affinity_pending	*pending;
2538  };
2539  
2540  /*
2541   * @refs: number of wait_for_completion()
2542   * @stop_pending: is @stop_work in use
2543   */
2544  struct set_affinity_pending {
2545  	refcount_t		refs;
2546  	unsigned int		stop_pending;
2547  	struct completion	done;
2548  	struct cpu_stop_work	stop_work;
2549  	struct migration_arg	arg;
2550  };
2551  
2552  /*
2553   * Move (not current) task off this CPU, onto the destination CPU. We're doing
2554   * this because either it can't run here any more (set_cpus_allowed()
2555   * away from this CPU, or CPU going down), or because we're
2556   * attempting to rebalance this task on exec (sched_exec).
2557   *
2558   * So we race with normal scheduler movements, but that's OK, as long
2559   * as the task is no longer on this CPU.
2560   */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2561  static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2562  				 struct task_struct *p, int dest_cpu)
2563  {
2564  	/* Affinity changed (again). */
2565  	if (!is_cpu_allowed(p, dest_cpu))
2566  		return rq;
2567  
2568  	rq = move_queued_task(rq, rf, p, dest_cpu);
2569  
2570  	return rq;
2571  }
2572  
2573  /*
2574   * migration_cpu_stop - this will be executed by a highprio stopper thread
2575   * and performs thread migration by bumping thread off CPU then
2576   * 'pushing' onto another runqueue.
2577   */
migration_cpu_stop(void * data)2578  static int migration_cpu_stop(void *data)
2579  {
2580  	struct migration_arg *arg = data;
2581  	struct set_affinity_pending *pending = arg->pending;
2582  	struct task_struct *p = arg->task;
2583  	struct rq *rq = this_rq();
2584  	bool complete = false;
2585  	struct rq_flags rf;
2586  
2587  	/*
2588  	 * The original target CPU might have gone down and we might
2589  	 * be on another CPU but it doesn't matter.
2590  	 */
2591  	local_irq_save(rf.flags);
2592  	/*
2593  	 * We need to explicitly wake pending tasks before running
2594  	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2595  	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2596  	 */
2597  	flush_smp_call_function_queue();
2598  
2599  	raw_spin_lock(&p->pi_lock);
2600  	rq_lock(rq, &rf);
2601  
2602  	/*
2603  	 * If we were passed a pending, then ->stop_pending was set, thus
2604  	 * p->migration_pending must have remained stable.
2605  	 */
2606  	WARN_ON_ONCE(pending && pending != p->migration_pending);
2607  
2608  	/*
2609  	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2610  	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2611  	 * we're holding p->pi_lock.
2612  	 */
2613  	if (task_rq(p) == rq) {
2614  		if (is_migration_disabled(p))
2615  			goto out;
2616  
2617  		if (pending) {
2618  			p->migration_pending = NULL;
2619  			complete = true;
2620  
2621  			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2622  				goto out;
2623  		}
2624  
2625  		if (task_on_rq_queued(p)) {
2626  			update_rq_clock(rq);
2627  			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2628  		} else {
2629  			p->wake_cpu = arg->dest_cpu;
2630  		}
2631  
2632  		/*
2633  		 * XXX __migrate_task() can fail, at which point we might end
2634  		 * up running on a dodgy CPU, AFAICT this can only happen
2635  		 * during CPU hotplug, at which point we'll get pushed out
2636  		 * anyway, so it's probably not a big deal.
2637  		 */
2638  
2639  	} else if (pending) {
2640  		/*
2641  		 * This happens when we get migrated between migrate_enable()'s
2642  		 * preempt_enable() and scheduling the stopper task. At that
2643  		 * point we're a regular task again and not current anymore.
2644  		 *
2645  		 * A !PREEMPT kernel has a giant hole here, which makes it far
2646  		 * more likely.
2647  		 */
2648  
2649  		/*
2650  		 * The task moved before the stopper got to run. We're holding
2651  		 * ->pi_lock, so the allowed mask is stable - if it got
2652  		 * somewhere allowed, we're done.
2653  		 */
2654  		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2655  			p->migration_pending = NULL;
2656  			complete = true;
2657  			goto out;
2658  		}
2659  
2660  		/*
2661  		 * When migrate_enable() hits a rq mis-match we can't reliably
2662  		 * determine is_migration_disabled() and so have to chase after
2663  		 * it.
2664  		 */
2665  		WARN_ON_ONCE(!pending->stop_pending);
2666  		preempt_disable();
2667  		task_rq_unlock(rq, p, &rf);
2668  		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2669  				    &pending->arg, &pending->stop_work);
2670  		preempt_enable();
2671  		return 0;
2672  	}
2673  out:
2674  	if (pending)
2675  		pending->stop_pending = false;
2676  	task_rq_unlock(rq, p, &rf);
2677  
2678  	if (complete)
2679  		complete_all(&pending->done);
2680  
2681  	return 0;
2682  }
2683  
push_cpu_stop(void * arg)2684  int push_cpu_stop(void *arg)
2685  {
2686  	struct rq *lowest_rq = NULL, *rq = this_rq();
2687  	struct task_struct *p = arg;
2688  
2689  	raw_spin_lock_irq(&p->pi_lock);
2690  	raw_spin_rq_lock(rq);
2691  
2692  	if (task_rq(p) != rq)
2693  		goto out_unlock;
2694  
2695  	if (is_migration_disabled(p)) {
2696  		p->migration_flags |= MDF_PUSH;
2697  		goto out_unlock;
2698  	}
2699  
2700  	p->migration_flags &= ~MDF_PUSH;
2701  
2702  	if (p->sched_class->find_lock_rq)
2703  		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2704  
2705  	if (!lowest_rq)
2706  		goto out_unlock;
2707  
2708  	// XXX validate p is still the highest prio task
2709  	if (task_rq(p) == rq) {
2710  		deactivate_task(rq, p, 0);
2711  		set_task_cpu(p, lowest_rq->cpu);
2712  		activate_task(lowest_rq, p, 0);
2713  		resched_curr(lowest_rq);
2714  	}
2715  
2716  	double_unlock_balance(rq, lowest_rq);
2717  
2718  out_unlock:
2719  	rq->push_busy = false;
2720  	raw_spin_rq_unlock(rq);
2721  	raw_spin_unlock_irq(&p->pi_lock);
2722  
2723  	put_task_struct(p);
2724  	return 0;
2725  }
2726  
2727  /*
2728   * sched_class::set_cpus_allowed must do the below, but is not required to
2729   * actually call this function.
2730   */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2731  void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2732  {
2733  	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2734  		p->cpus_ptr = ctx->new_mask;
2735  		return;
2736  	}
2737  
2738  	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2739  	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2740  
2741  	/*
2742  	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2743  	 */
2744  	if (ctx->flags & SCA_USER)
2745  		swap(p->user_cpus_ptr, ctx->user_mask);
2746  }
2747  
2748  static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2749  __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2750  {
2751  	struct rq *rq = task_rq(p);
2752  	bool queued, running;
2753  
2754  	/*
2755  	 * This here violates the locking rules for affinity, since we're only
2756  	 * supposed to change these variables while holding both rq->lock and
2757  	 * p->pi_lock.
2758  	 *
2759  	 * HOWEVER, it magically works, because ttwu() is the only code that
2760  	 * accesses these variables under p->pi_lock and only does so after
2761  	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2762  	 * before finish_task().
2763  	 *
2764  	 * XXX do further audits, this smells like something putrid.
2765  	 */
2766  	if (ctx->flags & SCA_MIGRATE_DISABLE)
2767  		SCHED_WARN_ON(!p->on_cpu);
2768  	else
2769  		lockdep_assert_held(&p->pi_lock);
2770  
2771  	queued = task_on_rq_queued(p);
2772  	running = task_current(rq, p);
2773  
2774  	if (queued) {
2775  		/*
2776  		 * Because __kthread_bind() calls this on blocked tasks without
2777  		 * holding rq->lock.
2778  		 */
2779  		lockdep_assert_rq_held(rq);
2780  		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2781  	}
2782  	if (running)
2783  		put_prev_task(rq, p);
2784  
2785  	p->sched_class->set_cpus_allowed(p, ctx);
2786  
2787  	if (queued)
2788  		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2789  	if (running)
2790  		set_next_task(rq, p);
2791  }
2792  
2793  /*
2794   * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2795   * affinity (if any) should be destroyed too.
2796   */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2797  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2798  {
2799  	struct affinity_context ac = {
2800  		.new_mask  = new_mask,
2801  		.user_mask = NULL,
2802  		.flags     = SCA_USER,	/* clear the user requested mask */
2803  	};
2804  	union cpumask_rcuhead {
2805  		cpumask_t cpumask;
2806  		struct rcu_head rcu;
2807  	};
2808  
2809  	__do_set_cpus_allowed(p, &ac);
2810  
2811  	/*
2812  	 * Because this is called with p->pi_lock held, it is not possible
2813  	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2814  	 * kfree_rcu().
2815  	 */
2816  	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2817  }
2818  
alloc_user_cpus_ptr(int node)2819  static cpumask_t *alloc_user_cpus_ptr(int node)
2820  {
2821  	/*
2822  	 * See do_set_cpus_allowed() above for the rcu_head usage.
2823  	 */
2824  	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
2825  
2826  	return kmalloc_node(size, GFP_KERNEL, node);
2827  }
2828  
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2829  int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2830  		      int node)
2831  {
2832  	cpumask_t *user_mask;
2833  	unsigned long flags;
2834  
2835  	/*
2836  	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2837  	 * may differ by now due to racing.
2838  	 */
2839  	dst->user_cpus_ptr = NULL;
2840  
2841  	/*
2842  	 * This check is racy and losing the race is a valid situation.
2843  	 * It is not worth the extra overhead of taking the pi_lock on
2844  	 * every fork/clone.
2845  	 */
2846  	if (data_race(!src->user_cpus_ptr))
2847  		return 0;
2848  
2849  	user_mask = alloc_user_cpus_ptr(node);
2850  	if (!user_mask)
2851  		return -ENOMEM;
2852  
2853  	/*
2854  	 * Use pi_lock to protect content of user_cpus_ptr
2855  	 *
2856  	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2857  	 * do_set_cpus_allowed().
2858  	 */
2859  	raw_spin_lock_irqsave(&src->pi_lock, flags);
2860  	if (src->user_cpus_ptr) {
2861  		swap(dst->user_cpus_ptr, user_mask);
2862  		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2863  	}
2864  	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2865  
2866  	if (unlikely(user_mask))
2867  		kfree(user_mask);
2868  
2869  	return 0;
2870  }
2871  
clear_user_cpus_ptr(struct task_struct * p)2872  static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2873  {
2874  	struct cpumask *user_mask = NULL;
2875  
2876  	swap(p->user_cpus_ptr, user_mask);
2877  
2878  	return user_mask;
2879  }
2880  
release_user_cpus_ptr(struct task_struct * p)2881  void release_user_cpus_ptr(struct task_struct *p)
2882  {
2883  	kfree(clear_user_cpus_ptr(p));
2884  }
2885  
2886  /*
2887   * This function is wildly self concurrent; here be dragons.
2888   *
2889   *
2890   * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2891   * designated task is enqueued on an allowed CPU. If that task is currently
2892   * running, we have to kick it out using the CPU stopper.
2893   *
2894   * Migrate-Disable comes along and tramples all over our nice sandcastle.
2895   * Consider:
2896   *
2897   *     Initial conditions: P0->cpus_mask = [0, 1]
2898   *
2899   *     P0@CPU0                  P1
2900   *
2901   *     migrate_disable();
2902   *     <preempted>
2903   *                              set_cpus_allowed_ptr(P0, [1]);
2904   *
2905   * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2906   * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2907   * This means we need the following scheme:
2908   *
2909   *     P0@CPU0                  P1
2910   *
2911   *     migrate_disable();
2912   *     <preempted>
2913   *                              set_cpus_allowed_ptr(P0, [1]);
2914   *                                <blocks>
2915   *     <resumes>
2916   *     migrate_enable();
2917   *       __set_cpus_allowed_ptr();
2918   *       <wakes local stopper>
2919   *                         `--> <woken on migration completion>
2920   *
2921   * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2922   * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2923   * task p are serialized by p->pi_lock, which we can leverage: the one that
2924   * should come into effect at the end of the Migrate-Disable region is the last
2925   * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2926   * but we still need to properly signal those waiting tasks at the appropriate
2927   * moment.
2928   *
2929   * This is implemented using struct set_affinity_pending. The first
2930   * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2931   * setup an instance of that struct and install it on the targeted task_struct.
2932   * Any and all further callers will reuse that instance. Those then wait for
2933   * a completion signaled at the tail of the CPU stopper callback (1), triggered
2934   * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2935   *
2936   *
2937   * (1) In the cases covered above. There is one more where the completion is
2938   * signaled within affine_move_task() itself: when a subsequent affinity request
2939   * occurs after the stopper bailed out due to the targeted task still being
2940   * Migrate-Disable. Consider:
2941   *
2942   *     Initial conditions: P0->cpus_mask = [0, 1]
2943   *
2944   *     CPU0		  P1				P2
2945   *     <P0>
2946   *       migrate_disable();
2947   *       <preempted>
2948   *                        set_cpus_allowed_ptr(P0, [1]);
2949   *                          <blocks>
2950   *     <migration/0>
2951   *       migration_cpu_stop()
2952   *         is_migration_disabled()
2953   *           <bails>
2954   *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2955   *                                                         <signal completion>
2956   *                          <awakes>
2957   *
2958   * Note that the above is safe vs a concurrent migrate_enable(), as any
2959   * pending affinity completion is preceded by an uninstallation of
2960   * p->migration_pending done with p->pi_lock held.
2961   */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2962  static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2963  			    int dest_cpu, unsigned int flags)
2964  	__releases(rq->lock)
2965  	__releases(p->pi_lock)
2966  {
2967  	struct set_affinity_pending my_pending = { }, *pending = NULL;
2968  	bool stop_pending, complete = false;
2969  
2970  	/* Can the task run on the task's current CPU? If so, we're done */
2971  	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2972  		struct task_struct *push_task = NULL;
2973  
2974  		if ((flags & SCA_MIGRATE_ENABLE) &&
2975  		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2976  			rq->push_busy = true;
2977  			push_task = get_task_struct(p);
2978  		}
2979  
2980  		/*
2981  		 * If there are pending waiters, but no pending stop_work,
2982  		 * then complete now.
2983  		 */
2984  		pending = p->migration_pending;
2985  		if (pending && !pending->stop_pending) {
2986  			p->migration_pending = NULL;
2987  			complete = true;
2988  		}
2989  
2990  		preempt_disable();
2991  		task_rq_unlock(rq, p, rf);
2992  		if (push_task) {
2993  			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2994  					    p, &rq->push_work);
2995  		}
2996  		preempt_enable();
2997  
2998  		if (complete)
2999  			complete_all(&pending->done);
3000  
3001  		return 0;
3002  	}
3003  
3004  	if (!(flags & SCA_MIGRATE_ENABLE)) {
3005  		/* serialized by p->pi_lock */
3006  		if (!p->migration_pending) {
3007  			/* Install the request */
3008  			refcount_set(&my_pending.refs, 1);
3009  			init_completion(&my_pending.done);
3010  			my_pending.arg = (struct migration_arg) {
3011  				.task = p,
3012  				.dest_cpu = dest_cpu,
3013  				.pending = &my_pending,
3014  			};
3015  
3016  			p->migration_pending = &my_pending;
3017  		} else {
3018  			pending = p->migration_pending;
3019  			refcount_inc(&pending->refs);
3020  			/*
3021  			 * Affinity has changed, but we've already installed a
3022  			 * pending. migration_cpu_stop() *must* see this, else
3023  			 * we risk a completion of the pending despite having a
3024  			 * task on a disallowed CPU.
3025  			 *
3026  			 * Serialized by p->pi_lock, so this is safe.
3027  			 */
3028  			pending->arg.dest_cpu = dest_cpu;
3029  		}
3030  	}
3031  	pending = p->migration_pending;
3032  	/*
3033  	 * - !MIGRATE_ENABLE:
3034  	 *   we'll have installed a pending if there wasn't one already.
3035  	 *
3036  	 * - MIGRATE_ENABLE:
3037  	 *   we're here because the current CPU isn't matching anymore,
3038  	 *   the only way that can happen is because of a concurrent
3039  	 *   set_cpus_allowed_ptr() call, which should then still be
3040  	 *   pending completion.
3041  	 *
3042  	 * Either way, we really should have a @pending here.
3043  	 */
3044  	if (WARN_ON_ONCE(!pending)) {
3045  		task_rq_unlock(rq, p, rf);
3046  		return -EINVAL;
3047  	}
3048  
3049  	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3050  		/*
3051  		 * MIGRATE_ENABLE gets here because 'p == current', but for
3052  		 * anything else we cannot do is_migration_disabled(), punt
3053  		 * and have the stopper function handle it all race-free.
3054  		 */
3055  		stop_pending = pending->stop_pending;
3056  		if (!stop_pending)
3057  			pending->stop_pending = true;
3058  
3059  		if (flags & SCA_MIGRATE_ENABLE)
3060  			p->migration_flags &= ~MDF_PUSH;
3061  
3062  		preempt_disable();
3063  		task_rq_unlock(rq, p, rf);
3064  		if (!stop_pending) {
3065  			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3066  					    &pending->arg, &pending->stop_work);
3067  		}
3068  		preempt_enable();
3069  
3070  		if (flags & SCA_MIGRATE_ENABLE)
3071  			return 0;
3072  	} else {
3073  
3074  		if (!is_migration_disabled(p)) {
3075  			if (task_on_rq_queued(p))
3076  				rq = move_queued_task(rq, rf, p, dest_cpu);
3077  
3078  			if (!pending->stop_pending) {
3079  				p->migration_pending = NULL;
3080  				complete = true;
3081  			}
3082  		}
3083  		task_rq_unlock(rq, p, rf);
3084  
3085  		if (complete)
3086  			complete_all(&pending->done);
3087  	}
3088  
3089  	wait_for_completion(&pending->done);
3090  
3091  	if (refcount_dec_and_test(&pending->refs))
3092  		wake_up_var(&pending->refs); /* No UaF, just an address */
3093  
3094  	/*
3095  	 * Block the original owner of &pending until all subsequent callers
3096  	 * have seen the completion and decremented the refcount
3097  	 */
3098  	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3099  
3100  	/* ARGH */
3101  	WARN_ON_ONCE(my_pending.stop_pending);
3102  
3103  	return 0;
3104  }
3105  
3106  /*
3107   * Called with both p->pi_lock and rq->lock held; drops both before returning.
3108   */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3109  static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3110  					 struct affinity_context *ctx,
3111  					 struct rq *rq,
3112  					 struct rq_flags *rf)
3113  	__releases(rq->lock)
3114  	__releases(p->pi_lock)
3115  {
3116  	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3117  	const struct cpumask *cpu_valid_mask = cpu_active_mask;
3118  	bool kthread = p->flags & PF_KTHREAD;
3119  	unsigned int dest_cpu;
3120  	int ret = 0;
3121  
3122  	update_rq_clock(rq);
3123  
3124  	if (kthread || is_migration_disabled(p)) {
3125  		/*
3126  		 * Kernel threads are allowed on online && !active CPUs,
3127  		 * however, during cpu-hot-unplug, even these might get pushed
3128  		 * away if not KTHREAD_IS_PER_CPU.
3129  		 *
3130  		 * Specifically, migration_disabled() tasks must not fail the
3131  		 * cpumask_any_and_distribute() pick below, esp. so on
3132  		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3133  		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3134  		 */
3135  		cpu_valid_mask = cpu_online_mask;
3136  	}
3137  
3138  	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3139  		ret = -EINVAL;
3140  		goto out;
3141  	}
3142  
3143  	/*
3144  	 * Must re-check here, to close a race against __kthread_bind(),
3145  	 * sched_setaffinity() is not guaranteed to observe the flag.
3146  	 */
3147  	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3148  		ret = -EINVAL;
3149  		goto out;
3150  	}
3151  
3152  	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3153  		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3154  			if (ctx->flags & SCA_USER)
3155  				swap(p->user_cpus_ptr, ctx->user_mask);
3156  			goto out;
3157  		}
3158  
3159  		if (WARN_ON_ONCE(p == current &&
3160  				 is_migration_disabled(p) &&
3161  				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3162  			ret = -EBUSY;
3163  			goto out;
3164  		}
3165  	}
3166  
3167  	/*
3168  	 * Picking a ~random cpu helps in cases where we are changing affinity
3169  	 * for groups of tasks (ie. cpuset), so that load balancing is not
3170  	 * immediately required to distribute the tasks within their new mask.
3171  	 */
3172  	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3173  	if (dest_cpu >= nr_cpu_ids) {
3174  		ret = -EINVAL;
3175  		goto out;
3176  	}
3177  
3178  	__do_set_cpus_allowed(p, ctx);
3179  
3180  	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3181  
3182  out:
3183  	task_rq_unlock(rq, p, rf);
3184  
3185  	return ret;
3186  }
3187  
3188  /*
3189   * Change a given task's CPU affinity. Migrate the thread to a
3190   * proper CPU and schedule it away if the CPU it's executing on
3191   * is removed from the allowed bitmask.
3192   *
3193   * NOTE: the caller must have a valid reference to the task, the
3194   * task must not exit() & deallocate itself prematurely. The
3195   * call is not atomic; no spinlocks may be held.
3196   */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3197  static int __set_cpus_allowed_ptr(struct task_struct *p,
3198  				  struct affinity_context *ctx)
3199  {
3200  	struct rq_flags rf;
3201  	struct rq *rq;
3202  
3203  	rq = task_rq_lock(p, &rf);
3204  	/*
3205  	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3206  	 * flags are set.
3207  	 */
3208  	if (p->user_cpus_ptr &&
3209  	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3210  	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3211  		ctx->new_mask = rq->scratch_mask;
3212  
3213  	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3214  }
3215  
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3216  int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3217  {
3218  	struct affinity_context ac = {
3219  		.new_mask  = new_mask,
3220  		.flags     = 0,
3221  	};
3222  
3223  	return __set_cpus_allowed_ptr(p, &ac);
3224  }
3225  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3226  
3227  /*
3228   * Change a given task's CPU affinity to the intersection of its current
3229   * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3230   * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3231   * affinity or use cpu_online_mask instead.
3232   *
3233   * If the resulting mask is empty, leave the affinity unchanged and return
3234   * -EINVAL.
3235   */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3236  static int restrict_cpus_allowed_ptr(struct task_struct *p,
3237  				     struct cpumask *new_mask,
3238  				     const struct cpumask *subset_mask)
3239  {
3240  	struct affinity_context ac = {
3241  		.new_mask  = new_mask,
3242  		.flags     = 0,
3243  	};
3244  	struct rq_flags rf;
3245  	struct rq *rq;
3246  	int err;
3247  
3248  	rq = task_rq_lock(p, &rf);
3249  
3250  	/*
3251  	 * Forcefully restricting the affinity of a deadline task is
3252  	 * likely to cause problems, so fail and noisily override the
3253  	 * mask entirely.
3254  	 */
3255  	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3256  		err = -EPERM;
3257  		goto err_unlock;
3258  	}
3259  
3260  	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3261  		err = -EINVAL;
3262  		goto err_unlock;
3263  	}
3264  
3265  	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3266  
3267  err_unlock:
3268  	task_rq_unlock(rq, p, &rf);
3269  	return err;
3270  }
3271  
3272  /*
3273   * Restrict the CPU affinity of task @p so that it is a subset of
3274   * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3275   * old affinity mask. If the resulting mask is empty, we warn and walk
3276   * up the cpuset hierarchy until we find a suitable mask.
3277   */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3278  void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3279  {
3280  	cpumask_var_t new_mask;
3281  	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3282  
3283  	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3284  
3285  	/*
3286  	 * __migrate_task() can fail silently in the face of concurrent
3287  	 * offlining of the chosen destination CPU, so take the hotplug
3288  	 * lock to ensure that the migration succeeds.
3289  	 */
3290  	cpus_read_lock();
3291  	if (!cpumask_available(new_mask))
3292  		goto out_set_mask;
3293  
3294  	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3295  		goto out_free_mask;
3296  
3297  	/*
3298  	 * We failed to find a valid subset of the affinity mask for the
3299  	 * task, so override it based on its cpuset hierarchy.
3300  	 */
3301  	cpuset_cpus_allowed(p, new_mask);
3302  	override_mask = new_mask;
3303  
3304  out_set_mask:
3305  	if (printk_ratelimit()) {
3306  		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3307  				task_pid_nr(p), p->comm,
3308  				cpumask_pr_args(override_mask));
3309  	}
3310  
3311  	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3312  out_free_mask:
3313  	cpus_read_unlock();
3314  	free_cpumask_var(new_mask);
3315  }
3316  
3317  static int
3318  __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
3319  
3320  /*
3321   * Restore the affinity of a task @p which was previously restricted by a
3322   * call to force_compatible_cpus_allowed_ptr().
3323   *
3324   * It is the caller's responsibility to serialise this with any calls to
3325   * force_compatible_cpus_allowed_ptr(@p).
3326   */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3327  void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3328  {
3329  	struct affinity_context ac = {
3330  		.new_mask  = task_user_cpus(p),
3331  		.flags     = 0,
3332  	};
3333  	int ret;
3334  
3335  	/*
3336  	 * Try to restore the old affinity mask with __sched_setaffinity().
3337  	 * Cpuset masking will be done there too.
3338  	 */
3339  	ret = __sched_setaffinity(p, &ac);
3340  	WARN_ON_ONCE(ret);
3341  }
3342  
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3343  void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3344  {
3345  #ifdef CONFIG_SCHED_DEBUG
3346  	unsigned int state = READ_ONCE(p->__state);
3347  
3348  	/*
3349  	 * We should never call set_task_cpu() on a blocked task,
3350  	 * ttwu() will sort out the placement.
3351  	 */
3352  	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3353  
3354  	/*
3355  	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3356  	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3357  	 * time relying on p->on_rq.
3358  	 */
3359  	WARN_ON_ONCE(state == TASK_RUNNING &&
3360  		     p->sched_class == &fair_sched_class &&
3361  		     (p->on_rq && !task_on_rq_migrating(p)));
3362  
3363  #ifdef CONFIG_LOCKDEP
3364  	/*
3365  	 * The caller should hold either p->pi_lock or rq->lock, when changing
3366  	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3367  	 *
3368  	 * sched_move_task() holds both and thus holding either pins the cgroup,
3369  	 * see task_group().
3370  	 *
3371  	 * Furthermore, all task_rq users should acquire both locks, see
3372  	 * task_rq_lock().
3373  	 */
3374  	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3375  				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3376  #endif
3377  	/*
3378  	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3379  	 */
3380  	WARN_ON_ONCE(!cpu_online(new_cpu));
3381  
3382  	WARN_ON_ONCE(is_migration_disabled(p));
3383  #endif
3384  
3385  	trace_sched_migrate_task(p, new_cpu);
3386  
3387  	if (task_cpu(p) != new_cpu) {
3388  		if (p->sched_class->migrate_task_rq)
3389  			p->sched_class->migrate_task_rq(p, new_cpu);
3390  		p->se.nr_migrations++;
3391  		rseq_migrate(p);
3392  		sched_mm_cid_migrate_from(p);
3393  		perf_event_task_migrate(p);
3394  	}
3395  
3396  	__set_task_cpu(p, new_cpu);
3397  }
3398  
3399  #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3400  static void __migrate_swap_task(struct task_struct *p, int cpu)
3401  {
3402  	if (task_on_rq_queued(p)) {
3403  		struct rq *src_rq, *dst_rq;
3404  		struct rq_flags srf, drf;
3405  
3406  		src_rq = task_rq(p);
3407  		dst_rq = cpu_rq(cpu);
3408  
3409  		rq_pin_lock(src_rq, &srf);
3410  		rq_pin_lock(dst_rq, &drf);
3411  
3412  		deactivate_task(src_rq, p, 0);
3413  		set_task_cpu(p, cpu);
3414  		activate_task(dst_rq, p, 0);
3415  		wakeup_preempt(dst_rq, p, 0);
3416  
3417  		rq_unpin_lock(dst_rq, &drf);
3418  		rq_unpin_lock(src_rq, &srf);
3419  
3420  	} else {
3421  		/*
3422  		 * Task isn't running anymore; make it appear like we migrated
3423  		 * it before it went to sleep. This means on wakeup we make the
3424  		 * previous CPU our target instead of where it really is.
3425  		 */
3426  		p->wake_cpu = cpu;
3427  	}
3428  }
3429  
3430  struct migration_swap_arg {
3431  	struct task_struct *src_task, *dst_task;
3432  	int src_cpu, dst_cpu;
3433  };
3434  
migrate_swap_stop(void * data)3435  static int migrate_swap_stop(void *data)
3436  {
3437  	struct migration_swap_arg *arg = data;
3438  	struct rq *src_rq, *dst_rq;
3439  
3440  	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3441  		return -EAGAIN;
3442  
3443  	src_rq = cpu_rq(arg->src_cpu);
3444  	dst_rq = cpu_rq(arg->dst_cpu);
3445  
3446  	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3447  	guard(double_rq_lock)(src_rq, dst_rq);
3448  
3449  	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3450  		return -EAGAIN;
3451  
3452  	if (task_cpu(arg->src_task) != arg->src_cpu)
3453  		return -EAGAIN;
3454  
3455  	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3456  		return -EAGAIN;
3457  
3458  	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3459  		return -EAGAIN;
3460  
3461  	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3462  	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3463  
3464  	return 0;
3465  }
3466  
3467  /*
3468   * Cross migrate two tasks
3469   */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3470  int migrate_swap(struct task_struct *cur, struct task_struct *p,
3471  		int target_cpu, int curr_cpu)
3472  {
3473  	struct migration_swap_arg arg;
3474  	int ret = -EINVAL;
3475  
3476  	arg = (struct migration_swap_arg){
3477  		.src_task = cur,
3478  		.src_cpu = curr_cpu,
3479  		.dst_task = p,
3480  		.dst_cpu = target_cpu,
3481  	};
3482  
3483  	if (arg.src_cpu == arg.dst_cpu)
3484  		goto out;
3485  
3486  	/*
3487  	 * These three tests are all lockless; this is OK since all of them
3488  	 * will be re-checked with proper locks held further down the line.
3489  	 */
3490  	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3491  		goto out;
3492  
3493  	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3494  		goto out;
3495  
3496  	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3497  		goto out;
3498  
3499  	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3500  	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3501  
3502  out:
3503  	return ret;
3504  }
3505  #endif /* CONFIG_NUMA_BALANCING */
3506  
3507  /***
3508   * kick_process - kick a running thread to enter/exit the kernel
3509   * @p: the to-be-kicked thread
3510   *
3511   * Cause a process which is running on another CPU to enter
3512   * kernel-mode, without any delay. (to get signals handled.)
3513   *
3514   * NOTE: this function doesn't have to take the runqueue lock,
3515   * because all it wants to ensure is that the remote task enters
3516   * the kernel. If the IPI races and the task has been migrated
3517   * to another CPU then no harm is done and the purpose has been
3518   * achieved as well.
3519   */
kick_process(struct task_struct * p)3520  void kick_process(struct task_struct *p)
3521  {
3522  	int cpu;
3523  
3524  	preempt_disable();
3525  	cpu = task_cpu(p);
3526  	if ((cpu != smp_processor_id()) && task_curr(p))
3527  		smp_send_reschedule(cpu);
3528  	preempt_enable();
3529  }
3530  EXPORT_SYMBOL_GPL(kick_process);
3531  
3532  /*
3533   * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3534   *
3535   * A few notes on cpu_active vs cpu_online:
3536   *
3537   *  - cpu_active must be a subset of cpu_online
3538   *
3539   *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3540   *    see __set_cpus_allowed_ptr(). At this point the newly online
3541   *    CPU isn't yet part of the sched domains, and balancing will not
3542   *    see it.
3543   *
3544   *  - on CPU-down we clear cpu_active() to mask the sched domains and
3545   *    avoid the load balancer to place new tasks on the to be removed
3546   *    CPU. Existing tasks will remain running there and will be taken
3547   *    off.
3548   *
3549   * This means that fallback selection must not select !active CPUs.
3550   * And can assume that any active CPU must be online. Conversely
3551   * select_task_rq() below may allow selection of !active CPUs in order
3552   * to satisfy the above rules.
3553   */
select_fallback_rq(int cpu,struct task_struct * p)3554  static int select_fallback_rq(int cpu, struct task_struct *p)
3555  {
3556  	int nid = cpu_to_node(cpu);
3557  	const struct cpumask *nodemask = NULL;
3558  	enum { cpuset, possible, fail } state = cpuset;
3559  	int dest_cpu;
3560  
3561  	/*
3562  	 * If the node that the CPU is on has been offlined, cpu_to_node()
3563  	 * will return -1. There is no CPU on the node, and we should
3564  	 * select the CPU on the other node.
3565  	 */
3566  	if (nid != -1) {
3567  		nodemask = cpumask_of_node(nid);
3568  
3569  		/* Look for allowed, online CPU in same node. */
3570  		for_each_cpu(dest_cpu, nodemask) {
3571  			if (is_cpu_allowed(p, dest_cpu))
3572  				return dest_cpu;
3573  		}
3574  	}
3575  
3576  	for (;;) {
3577  		/* Any allowed, online CPU? */
3578  		for_each_cpu(dest_cpu, p->cpus_ptr) {
3579  			if (!is_cpu_allowed(p, dest_cpu))
3580  				continue;
3581  
3582  			goto out;
3583  		}
3584  
3585  		/* No more Mr. Nice Guy. */
3586  		switch (state) {
3587  		case cpuset:
3588  			if (cpuset_cpus_allowed_fallback(p)) {
3589  				state = possible;
3590  				break;
3591  			}
3592  			fallthrough;
3593  		case possible:
3594  			/*
3595  			 * XXX When called from select_task_rq() we only
3596  			 * hold p->pi_lock and again violate locking order.
3597  			 *
3598  			 * More yuck to audit.
3599  			 */
3600  			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3601  			state = fail;
3602  			break;
3603  		case fail:
3604  			BUG();
3605  			break;
3606  		}
3607  	}
3608  
3609  out:
3610  	if (state != cpuset) {
3611  		/*
3612  		 * Don't tell them about moving exiting tasks or
3613  		 * kernel threads (both mm NULL), since they never
3614  		 * leave kernel.
3615  		 */
3616  		if (p->mm && printk_ratelimit()) {
3617  			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3618  					task_pid_nr(p), p->comm, cpu);
3619  		}
3620  	}
3621  
3622  	return dest_cpu;
3623  }
3624  
3625  /*
3626   * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3627   */
3628  static inline
select_task_rq(struct task_struct * p,int cpu,int wake_flags)3629  int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3630  {
3631  	lockdep_assert_held(&p->pi_lock);
3632  
3633  	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3634  		cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3635  	else
3636  		cpu = cpumask_any(p->cpus_ptr);
3637  
3638  	/*
3639  	 * In order not to call set_task_cpu() on a blocking task we need
3640  	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3641  	 * CPU.
3642  	 *
3643  	 * Since this is common to all placement strategies, this lives here.
3644  	 *
3645  	 * [ this allows ->select_task() to simply return task_cpu(p) and
3646  	 *   not worry about this generic constraint ]
3647  	 */
3648  	if (unlikely(!is_cpu_allowed(p, cpu)))
3649  		cpu = select_fallback_rq(task_cpu(p), p);
3650  
3651  	return cpu;
3652  }
3653  
sched_set_stop_task(int cpu,struct task_struct * stop)3654  void sched_set_stop_task(int cpu, struct task_struct *stop)
3655  {
3656  	static struct lock_class_key stop_pi_lock;
3657  	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3658  	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3659  
3660  	if (stop) {
3661  		/*
3662  		 * Make it appear like a SCHED_FIFO task, its something
3663  		 * userspace knows about and won't get confused about.
3664  		 *
3665  		 * Also, it will make PI more or less work without too
3666  		 * much confusion -- but then, stop work should not
3667  		 * rely on PI working anyway.
3668  		 */
3669  		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3670  
3671  		stop->sched_class = &stop_sched_class;
3672  
3673  		/*
3674  		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3675  		 * adjust the effective priority of a task. As a result,
3676  		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3677  		 * which can then trigger wakeups of the stop thread to push
3678  		 * around the current task.
3679  		 *
3680  		 * The stop task itself will never be part of the PI-chain, it
3681  		 * never blocks, therefore that ->pi_lock recursion is safe.
3682  		 * Tell lockdep about this by placing the stop->pi_lock in its
3683  		 * own class.
3684  		 */
3685  		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3686  	}
3687  
3688  	cpu_rq(cpu)->stop = stop;
3689  
3690  	if (old_stop) {
3691  		/*
3692  		 * Reset it back to a normal scheduling class so that
3693  		 * it can die in pieces.
3694  		 */
3695  		old_stop->sched_class = &rt_sched_class;
3696  	}
3697  }
3698  
3699  #else /* CONFIG_SMP */
3700  
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3701  static inline int __set_cpus_allowed_ptr(struct task_struct *p,
3702  					 struct affinity_context *ctx)
3703  {
3704  	return set_cpus_allowed_ptr(p, ctx->new_mask);
3705  }
3706  
migrate_disable_switch(struct rq * rq,struct task_struct * p)3707  static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3708  
rq_has_pinned_tasks(struct rq * rq)3709  static inline bool rq_has_pinned_tasks(struct rq *rq)
3710  {
3711  	return false;
3712  }
3713  
alloc_user_cpus_ptr(int node)3714  static inline cpumask_t *alloc_user_cpus_ptr(int node)
3715  {
3716  	return NULL;
3717  }
3718  
3719  #endif /* !CONFIG_SMP */
3720  
3721  static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3722  ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3723  {
3724  	struct rq *rq;
3725  
3726  	if (!schedstat_enabled())
3727  		return;
3728  
3729  	rq = this_rq();
3730  
3731  #ifdef CONFIG_SMP
3732  	if (cpu == rq->cpu) {
3733  		__schedstat_inc(rq->ttwu_local);
3734  		__schedstat_inc(p->stats.nr_wakeups_local);
3735  	} else {
3736  		struct sched_domain *sd;
3737  
3738  		__schedstat_inc(p->stats.nr_wakeups_remote);
3739  
3740  		guard(rcu)();
3741  		for_each_domain(rq->cpu, sd) {
3742  			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3743  				__schedstat_inc(sd->ttwu_wake_remote);
3744  				break;
3745  			}
3746  		}
3747  	}
3748  
3749  	if (wake_flags & WF_MIGRATED)
3750  		__schedstat_inc(p->stats.nr_wakeups_migrate);
3751  #endif /* CONFIG_SMP */
3752  
3753  	__schedstat_inc(rq->ttwu_count);
3754  	__schedstat_inc(p->stats.nr_wakeups);
3755  
3756  	if (wake_flags & WF_SYNC)
3757  		__schedstat_inc(p->stats.nr_wakeups_sync);
3758  }
3759  
3760  /*
3761   * Mark the task runnable.
3762   */
ttwu_do_wakeup(struct task_struct * p)3763  static inline void ttwu_do_wakeup(struct task_struct *p)
3764  {
3765  	WRITE_ONCE(p->__state, TASK_RUNNING);
3766  	trace_sched_wakeup(p);
3767  }
3768  
3769  static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3770  ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3771  		 struct rq_flags *rf)
3772  {
3773  	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3774  
3775  	lockdep_assert_rq_held(rq);
3776  
3777  	if (p->sched_contributes_to_load)
3778  		rq->nr_uninterruptible--;
3779  
3780  #ifdef CONFIG_SMP
3781  	if (wake_flags & WF_MIGRATED)
3782  		en_flags |= ENQUEUE_MIGRATED;
3783  	else
3784  #endif
3785  	if (p->in_iowait) {
3786  		delayacct_blkio_end(p);
3787  		atomic_dec(&task_rq(p)->nr_iowait);
3788  	}
3789  
3790  	activate_task(rq, p, en_flags);
3791  	wakeup_preempt(rq, p, wake_flags);
3792  
3793  	ttwu_do_wakeup(p);
3794  
3795  #ifdef CONFIG_SMP
3796  	if (p->sched_class->task_woken) {
3797  		/*
3798  		 * Our task @p is fully woken up and running; so it's safe to
3799  		 * drop the rq->lock, hereafter rq is only used for statistics.
3800  		 */
3801  		rq_unpin_lock(rq, rf);
3802  		p->sched_class->task_woken(rq, p);
3803  		rq_repin_lock(rq, rf);
3804  	}
3805  
3806  	if (rq->idle_stamp) {
3807  		u64 delta = rq_clock(rq) - rq->idle_stamp;
3808  		u64 max = 2*rq->max_idle_balance_cost;
3809  
3810  		update_avg(&rq->avg_idle, delta);
3811  
3812  		if (rq->avg_idle > max)
3813  			rq->avg_idle = max;
3814  
3815  		rq->wake_stamp = jiffies;
3816  		rq->wake_avg_idle = rq->avg_idle / 2;
3817  
3818  		rq->idle_stamp = 0;
3819  	}
3820  #endif
3821  }
3822  
3823  /*
3824   * Consider @p being inside a wait loop:
3825   *
3826   *   for (;;) {
3827   *      set_current_state(TASK_UNINTERRUPTIBLE);
3828   *
3829   *      if (CONDITION)
3830   *         break;
3831   *
3832   *      schedule();
3833   *   }
3834   *   __set_current_state(TASK_RUNNING);
3835   *
3836   * between set_current_state() and schedule(). In this case @p is still
3837   * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3838   * an atomic manner.
3839   *
3840   * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3841   * then schedule() must still happen and p->state can be changed to
3842   * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3843   * need to do a full wakeup with enqueue.
3844   *
3845   * Returns: %true when the wakeup is done,
3846   *          %false otherwise.
3847   */
ttwu_runnable(struct task_struct * p,int wake_flags)3848  static int ttwu_runnable(struct task_struct *p, int wake_flags)
3849  {
3850  	struct rq_flags rf;
3851  	struct rq *rq;
3852  	int ret = 0;
3853  
3854  	rq = __task_rq_lock(p, &rf);
3855  	if (task_on_rq_queued(p)) {
3856  		if (!task_on_cpu(rq, p)) {
3857  			/*
3858  			 * When on_rq && !on_cpu the task is preempted, see if
3859  			 * it should preempt the task that is current now.
3860  			 */
3861  			update_rq_clock(rq);
3862  			wakeup_preempt(rq, p, wake_flags);
3863  		}
3864  		ttwu_do_wakeup(p);
3865  		ret = 1;
3866  	}
3867  	__task_rq_unlock(rq, &rf);
3868  
3869  	return ret;
3870  }
3871  
3872  #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3873  void sched_ttwu_pending(void *arg)
3874  {
3875  	struct llist_node *llist = arg;
3876  	struct rq *rq = this_rq();
3877  	struct task_struct *p, *t;
3878  	struct rq_flags rf;
3879  
3880  	if (!llist)
3881  		return;
3882  
3883  	rq_lock_irqsave(rq, &rf);
3884  	update_rq_clock(rq);
3885  
3886  	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3887  		if (WARN_ON_ONCE(p->on_cpu))
3888  			smp_cond_load_acquire(&p->on_cpu, !VAL);
3889  
3890  		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3891  			set_task_cpu(p, cpu_of(rq));
3892  
3893  		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3894  	}
3895  
3896  	/*
3897  	 * Must be after enqueueing at least once task such that
3898  	 * idle_cpu() does not observe a false-negative -- if it does,
3899  	 * it is possible for select_idle_siblings() to stack a number
3900  	 * of tasks on this CPU during that window.
3901  	 *
3902  	 * It is ok to clear ttwu_pending when another task pending.
3903  	 * We will receive IPI after local irq enabled and then enqueue it.
3904  	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3905  	 */
3906  	WRITE_ONCE(rq->ttwu_pending, 0);
3907  	rq_unlock_irqrestore(rq, &rf);
3908  }
3909  
3910  /*
3911   * Prepare the scene for sending an IPI for a remote smp_call
3912   *
3913   * Returns true if the caller can proceed with sending the IPI.
3914   * Returns false otherwise.
3915   */
call_function_single_prep_ipi(int cpu)3916  bool call_function_single_prep_ipi(int cpu)
3917  {
3918  	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3919  		trace_sched_wake_idle_without_ipi(cpu);
3920  		return false;
3921  	}
3922  
3923  	return true;
3924  }
3925  
3926  /*
3927   * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3928   * necessary. The wakee CPU on receipt of the IPI will queue the task
3929   * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3930   * of the wakeup instead of the waker.
3931   */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3932  static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3933  {
3934  	struct rq *rq = cpu_rq(cpu);
3935  
3936  	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3937  
3938  	WRITE_ONCE(rq->ttwu_pending, 1);
3939  	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3940  }
3941  
wake_up_if_idle(int cpu)3942  void wake_up_if_idle(int cpu)
3943  {
3944  	struct rq *rq = cpu_rq(cpu);
3945  
3946  	guard(rcu)();
3947  	if (is_idle_task(rcu_dereference(rq->curr))) {
3948  		guard(rq_lock_irqsave)(rq);
3949  		if (is_idle_task(rq->curr))
3950  			resched_curr(rq);
3951  	}
3952  }
3953  
cpus_share_cache(int this_cpu,int that_cpu)3954  bool cpus_share_cache(int this_cpu, int that_cpu)
3955  {
3956  	if (this_cpu == that_cpu)
3957  		return true;
3958  
3959  	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3960  }
3961  
ttwu_queue_cond(struct task_struct * p,int cpu)3962  static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3963  {
3964  	/*
3965  	 * Do not complicate things with the async wake_list while the CPU is
3966  	 * in hotplug state.
3967  	 */
3968  	if (!cpu_active(cpu))
3969  		return false;
3970  
3971  	/* Ensure the task will still be allowed to run on the CPU. */
3972  	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3973  		return false;
3974  
3975  	/*
3976  	 * If the CPU does not share cache, then queue the task on the
3977  	 * remote rqs wakelist to avoid accessing remote data.
3978  	 */
3979  	if (!cpus_share_cache(smp_processor_id(), cpu))
3980  		return true;
3981  
3982  	if (cpu == smp_processor_id())
3983  		return false;
3984  
3985  	/*
3986  	 * If the wakee cpu is idle, or the task is descheduling and the
3987  	 * only running task on the CPU, then use the wakelist to offload
3988  	 * the task activation to the idle (or soon-to-be-idle) CPU as
3989  	 * the current CPU is likely busy. nr_running is checked to
3990  	 * avoid unnecessary task stacking.
3991  	 *
3992  	 * Note that we can only get here with (wakee) p->on_rq=0,
3993  	 * p->on_cpu can be whatever, we've done the dequeue, so
3994  	 * the wakee has been accounted out of ->nr_running.
3995  	 */
3996  	if (!cpu_rq(cpu)->nr_running)
3997  		return true;
3998  
3999  	return false;
4000  }
4001  
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)4002  static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
4003  {
4004  	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
4005  		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
4006  		__ttwu_queue_wakelist(p, cpu, wake_flags);
4007  		return true;
4008  	}
4009  
4010  	return false;
4011  }
4012  
4013  #else /* !CONFIG_SMP */
4014  
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)4015  static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
4016  {
4017  	return false;
4018  }
4019  
4020  #endif /* CONFIG_SMP */
4021  
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)4022  static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
4023  {
4024  	struct rq *rq = cpu_rq(cpu);
4025  	struct rq_flags rf;
4026  
4027  	if (ttwu_queue_wakelist(p, cpu, wake_flags))
4028  		return;
4029  
4030  	rq_lock(rq, &rf);
4031  	update_rq_clock(rq);
4032  	ttwu_do_activate(rq, p, wake_flags, &rf);
4033  	rq_unlock(rq, &rf);
4034  }
4035  
4036  /*
4037   * Invoked from try_to_wake_up() to check whether the task can be woken up.
4038   *
4039   * The caller holds p::pi_lock if p != current or has preemption
4040   * disabled when p == current.
4041   *
4042   * The rules of PREEMPT_RT saved_state:
4043   *
4044   *   The related locking code always holds p::pi_lock when updating
4045   *   p::saved_state, which means the code is fully serialized in both cases.
4046   *
4047   *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
4048   *   bits set. This allows to distinguish all wakeup scenarios.
4049   */
4050  static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)4051  bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4052  {
4053  	int match;
4054  
4055  	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4056  		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4057  			     state != TASK_RTLOCK_WAIT);
4058  	}
4059  
4060  	*success = !!(match = __task_state_match(p, state));
4061  
4062  #ifdef CONFIG_PREEMPT_RT
4063  	/*
4064  	 * Saved state preserves the task state across blocking on
4065  	 * an RT lock.  If the state matches, set p::saved_state to
4066  	 * TASK_RUNNING, but do not wake the task because it waits
4067  	 * for a lock wakeup. Also indicate success because from
4068  	 * the regular waker's point of view this has succeeded.
4069  	 *
4070  	 * After acquiring the lock the task will restore p::__state
4071  	 * from p::saved_state which ensures that the regular
4072  	 * wakeup is not lost. The restore will also set
4073  	 * p::saved_state to TASK_RUNNING so any further tests will
4074  	 * not result in false positives vs. @success
4075  	 */
4076  	if (match < 0)
4077  		p->saved_state = TASK_RUNNING;
4078  #endif
4079  	return match > 0;
4080  }
4081  
4082  /*
4083   * Notes on Program-Order guarantees on SMP systems.
4084   *
4085   *  MIGRATION
4086   *
4087   * The basic program-order guarantee on SMP systems is that when a task [t]
4088   * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4089   * execution on its new CPU [c1].
4090   *
4091   * For migration (of runnable tasks) this is provided by the following means:
4092   *
4093   *  A) UNLOCK of the rq(c0)->lock scheduling out task t
4094   *  B) migration for t is required to synchronize *both* rq(c0)->lock and
4095   *     rq(c1)->lock (if not at the same time, then in that order).
4096   *  C) LOCK of the rq(c1)->lock scheduling in task
4097   *
4098   * Release/acquire chaining guarantees that B happens after A and C after B.
4099   * Note: the CPU doing B need not be c0 or c1
4100   *
4101   * Example:
4102   *
4103   *   CPU0            CPU1            CPU2
4104   *
4105   *   LOCK rq(0)->lock
4106   *   sched-out X
4107   *   sched-in Y
4108   *   UNLOCK rq(0)->lock
4109   *
4110   *                                   LOCK rq(0)->lock // orders against CPU0
4111   *                                   dequeue X
4112   *                                   UNLOCK rq(0)->lock
4113   *
4114   *                                   LOCK rq(1)->lock
4115   *                                   enqueue X
4116   *                                   UNLOCK rq(1)->lock
4117   *
4118   *                   LOCK rq(1)->lock // orders against CPU2
4119   *                   sched-out Z
4120   *                   sched-in X
4121   *                   UNLOCK rq(1)->lock
4122   *
4123   *
4124   *  BLOCKING -- aka. SLEEP + WAKEUP
4125   *
4126   * For blocking we (obviously) need to provide the same guarantee as for
4127   * migration. However the means are completely different as there is no lock
4128   * chain to provide order. Instead we do:
4129   *
4130   *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
4131   *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4132   *
4133   * Example:
4134   *
4135   *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
4136   *
4137   *   LOCK rq(0)->lock LOCK X->pi_lock
4138   *   dequeue X
4139   *   sched-out X
4140   *   smp_store_release(X->on_cpu, 0);
4141   *
4142   *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
4143   *                    X->state = WAKING
4144   *                    set_task_cpu(X,2)
4145   *
4146   *                    LOCK rq(2)->lock
4147   *                    enqueue X
4148   *                    X->state = RUNNING
4149   *                    UNLOCK rq(2)->lock
4150   *
4151   *                                          LOCK rq(2)->lock // orders against CPU1
4152   *                                          sched-out Z
4153   *                                          sched-in X
4154   *                                          UNLOCK rq(2)->lock
4155   *
4156   *                    UNLOCK X->pi_lock
4157   *   UNLOCK rq(0)->lock
4158   *
4159   *
4160   * However, for wakeups there is a second guarantee we must provide, namely we
4161   * must ensure that CONDITION=1 done by the caller can not be reordered with
4162   * accesses to the task state; see try_to_wake_up() and set_current_state().
4163   */
4164  
4165  /**
4166   * try_to_wake_up - wake up a thread
4167   * @p: the thread to be awakened
4168   * @state: the mask of task states that can be woken
4169   * @wake_flags: wake modifier flags (WF_*)
4170   *
4171   * Conceptually does:
4172   *
4173   *   If (@state & @p->state) @p->state = TASK_RUNNING.
4174   *
4175   * If the task was not queued/runnable, also place it back on a runqueue.
4176   *
4177   * This function is atomic against schedule() which would dequeue the task.
4178   *
4179   * It issues a full memory barrier before accessing @p->state, see the comment
4180   * with set_current_state().
4181   *
4182   * Uses p->pi_lock to serialize against concurrent wake-ups.
4183   *
4184   * Relies on p->pi_lock stabilizing:
4185   *  - p->sched_class
4186   *  - p->cpus_ptr
4187   *  - p->sched_task_group
4188   * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4189   *
4190   * Tries really hard to only take one task_rq(p)->lock for performance.
4191   * Takes rq->lock in:
4192   *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4193   *  - ttwu_queue()       -- new rq, for enqueue of the task;
4194   *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4195   *
4196   * As a consequence we race really badly with just about everything. See the
4197   * many memory barriers and their comments for details.
4198   *
4199   * Return: %true if @p->state changes (an actual wakeup was done),
4200   *	   %false otherwise.
4201   */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4202  int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4203  {
4204  	guard(preempt)();
4205  	int cpu, success = 0;
4206  
4207  	if (p == current) {
4208  		/*
4209  		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4210  		 * == smp_processor_id()'. Together this means we can special
4211  		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4212  		 * without taking any locks.
4213  		 *
4214  		 * In particular:
4215  		 *  - we rely on Program-Order guarantees for all the ordering,
4216  		 *  - we're serialized against set_special_state() by virtue of
4217  		 *    it disabling IRQs (this allows not taking ->pi_lock).
4218  		 */
4219  		if (!ttwu_state_match(p, state, &success))
4220  			goto out;
4221  
4222  		trace_sched_waking(p);
4223  		ttwu_do_wakeup(p);
4224  		goto out;
4225  	}
4226  
4227  	/*
4228  	 * If we are going to wake up a thread waiting for CONDITION we
4229  	 * need to ensure that CONDITION=1 done by the caller can not be
4230  	 * reordered with p->state check below. This pairs with smp_store_mb()
4231  	 * in set_current_state() that the waiting thread does.
4232  	 */
4233  	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4234  		smp_mb__after_spinlock();
4235  		if (!ttwu_state_match(p, state, &success))
4236  			break;
4237  
4238  		trace_sched_waking(p);
4239  
4240  		/*
4241  		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4242  		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4243  		 * in smp_cond_load_acquire() below.
4244  		 *
4245  		 * sched_ttwu_pending()			try_to_wake_up()
4246  		 *   STORE p->on_rq = 1			  LOAD p->state
4247  		 *   UNLOCK rq->lock
4248  		 *
4249  		 * __schedule() (switch to task 'p')
4250  		 *   LOCK rq->lock			  smp_rmb();
4251  		 *   smp_mb__after_spinlock();
4252  		 *   UNLOCK rq->lock
4253  		 *
4254  		 * [task p]
4255  		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4256  		 *
4257  		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4258  		 * __schedule().  See the comment for smp_mb__after_spinlock().
4259  		 *
4260  		 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
4261  		 */
4262  		smp_rmb();
4263  		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4264  			break;
4265  
4266  #ifdef CONFIG_SMP
4267  		/*
4268  		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4269  		 * possible to, falsely, observe p->on_cpu == 0.
4270  		 *
4271  		 * One must be running (->on_cpu == 1) in order to remove oneself
4272  		 * from the runqueue.
4273  		 *
4274  		 * __schedule() (switch to task 'p')	try_to_wake_up()
4275  		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4276  		 *   UNLOCK rq->lock
4277  		 *
4278  		 * __schedule() (put 'p' to sleep)
4279  		 *   LOCK rq->lock			  smp_rmb();
4280  		 *   smp_mb__after_spinlock();
4281  		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4282  		 *
4283  		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4284  		 * __schedule().  See the comment for smp_mb__after_spinlock().
4285  		 *
4286  		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4287  		 * schedule()'s deactivate_task() has 'happened' and p will no longer
4288  		 * care about it's own p->state. See the comment in __schedule().
4289  		 */
4290  		smp_acquire__after_ctrl_dep();
4291  
4292  		/*
4293  		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4294  		 * == 0), which means we need to do an enqueue, change p->state to
4295  		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4296  		 * enqueue, such as ttwu_queue_wakelist().
4297  		 */
4298  		WRITE_ONCE(p->__state, TASK_WAKING);
4299  
4300  		/*
4301  		 * If the owning (remote) CPU is still in the middle of schedule() with
4302  		 * this task as prev, considering queueing p on the remote CPUs wake_list
4303  		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4304  		 * let the waker make forward progress. This is safe because IRQs are
4305  		 * disabled and the IPI will deliver after on_cpu is cleared.
4306  		 *
4307  		 * Ensure we load task_cpu(p) after p->on_cpu:
4308  		 *
4309  		 * set_task_cpu(p, cpu);
4310  		 *   STORE p->cpu = @cpu
4311  		 * __schedule() (switch to task 'p')
4312  		 *   LOCK rq->lock
4313  		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4314  		 *   STORE p->on_cpu = 1		LOAD p->cpu
4315  		 *
4316  		 * to ensure we observe the correct CPU on which the task is currently
4317  		 * scheduling.
4318  		 */
4319  		if (smp_load_acquire(&p->on_cpu) &&
4320  		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4321  			break;
4322  
4323  		/*
4324  		 * If the owning (remote) CPU is still in the middle of schedule() with
4325  		 * this task as prev, wait until it's done referencing the task.
4326  		 *
4327  		 * Pairs with the smp_store_release() in finish_task().
4328  		 *
4329  		 * This ensures that tasks getting woken will be fully ordered against
4330  		 * their previous state and preserve Program Order.
4331  		 */
4332  		smp_cond_load_acquire(&p->on_cpu, !VAL);
4333  
4334  		cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4335  		if (task_cpu(p) != cpu) {
4336  			if (p->in_iowait) {
4337  				delayacct_blkio_end(p);
4338  				atomic_dec(&task_rq(p)->nr_iowait);
4339  			}
4340  
4341  			wake_flags |= WF_MIGRATED;
4342  			psi_ttwu_dequeue(p);
4343  			set_task_cpu(p, cpu);
4344  		}
4345  #else
4346  		cpu = task_cpu(p);
4347  #endif /* CONFIG_SMP */
4348  
4349  		ttwu_queue(p, cpu, wake_flags);
4350  	}
4351  out:
4352  	if (success)
4353  		ttwu_stat(p, task_cpu(p), wake_flags);
4354  
4355  	return success;
4356  }
4357  
__task_needs_rq_lock(struct task_struct * p)4358  static bool __task_needs_rq_lock(struct task_struct *p)
4359  {
4360  	unsigned int state = READ_ONCE(p->__state);
4361  
4362  	/*
4363  	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4364  	 * the task is blocked. Make sure to check @state since ttwu() can drop
4365  	 * locks at the end, see ttwu_queue_wakelist().
4366  	 */
4367  	if (state == TASK_RUNNING || state == TASK_WAKING)
4368  		return true;
4369  
4370  	/*
4371  	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4372  	 * possible to, falsely, observe p->on_rq == 0.
4373  	 *
4374  	 * See try_to_wake_up() for a longer comment.
4375  	 */
4376  	smp_rmb();
4377  	if (p->on_rq)
4378  		return true;
4379  
4380  #ifdef CONFIG_SMP
4381  	/*
4382  	 * Ensure the task has finished __schedule() and will not be referenced
4383  	 * anymore. Again, see try_to_wake_up() for a longer comment.
4384  	 */
4385  	smp_rmb();
4386  	smp_cond_load_acquire(&p->on_cpu, !VAL);
4387  #endif
4388  
4389  	return false;
4390  }
4391  
4392  /**
4393   * task_call_func - Invoke a function on task in fixed state
4394   * @p: Process for which the function is to be invoked, can be @current.
4395   * @func: Function to invoke.
4396   * @arg: Argument to function.
4397   *
4398   * Fix the task in it's current state by avoiding wakeups and or rq operations
4399   * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
4400   * to work out what the state is, if required.  Given that @func can be invoked
4401   * with a runqueue lock held, it had better be quite lightweight.
4402   *
4403   * Returns:
4404   *   Whatever @func returns
4405   */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4406  int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4407  {
4408  	struct rq *rq = NULL;
4409  	struct rq_flags rf;
4410  	int ret;
4411  
4412  	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4413  
4414  	if (__task_needs_rq_lock(p))
4415  		rq = __task_rq_lock(p, &rf);
4416  
4417  	/*
4418  	 * At this point the task is pinned; either:
4419  	 *  - blocked and we're holding off wakeups	 (pi->lock)
4420  	 *  - woken, and we're holding off enqueue	 (rq->lock)
4421  	 *  - queued, and we're holding off schedule	 (rq->lock)
4422  	 *  - running, and we're holding off de-schedule (rq->lock)
4423  	 *
4424  	 * The called function (@func) can use: task_curr(), p->on_rq and
4425  	 * p->__state to differentiate between these states.
4426  	 */
4427  	ret = func(p, arg);
4428  
4429  	if (rq)
4430  		rq_unlock(rq, &rf);
4431  
4432  	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4433  	return ret;
4434  }
4435  
4436  /**
4437   * cpu_curr_snapshot - Return a snapshot of the currently running task
4438   * @cpu: The CPU on which to snapshot the task.
4439   *
4440   * Returns the task_struct pointer of the task "currently" running on
4441   * the specified CPU.
4442   *
4443   * If the specified CPU was offline, the return value is whatever it
4444   * is, perhaps a pointer to the task_struct structure of that CPU's idle
4445   * task, but there is no guarantee.  Callers wishing a useful return
4446   * value must take some action to ensure that the specified CPU remains
4447   * online throughout.
4448   *
4449   * This function executes full memory barriers before and after fetching
4450   * the pointer, which permits the caller to confine this function's fetch
4451   * with respect to the caller's accesses to other shared variables.
4452   */
cpu_curr_snapshot(int cpu)4453  struct task_struct *cpu_curr_snapshot(int cpu)
4454  {
4455  	struct rq *rq = cpu_rq(cpu);
4456  	struct task_struct *t;
4457  	struct rq_flags rf;
4458  
4459  	rq_lock_irqsave(rq, &rf);
4460  	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4461  	t = rcu_dereference(cpu_curr(cpu));
4462  	rq_unlock_irqrestore(rq, &rf);
4463  	smp_mb(); /* Pairing determined by caller's synchronization design. */
4464  
4465  	return t;
4466  }
4467  
4468  /**
4469   * wake_up_process - Wake up a specific process
4470   * @p: The process to be woken up.
4471   *
4472   * Attempt to wake up the nominated process and move it to the set of runnable
4473   * processes.
4474   *
4475   * Return: 1 if the process was woken up, 0 if it was already running.
4476   *
4477   * This function executes a full memory barrier before accessing the task state.
4478   */
wake_up_process(struct task_struct * p)4479  int wake_up_process(struct task_struct *p)
4480  {
4481  	return try_to_wake_up(p, TASK_NORMAL, 0);
4482  }
4483  EXPORT_SYMBOL(wake_up_process);
4484  
wake_up_state(struct task_struct * p,unsigned int state)4485  int wake_up_state(struct task_struct *p, unsigned int state)
4486  {
4487  	return try_to_wake_up(p, state, 0);
4488  }
4489  
4490  /*
4491   * Perform scheduler related setup for a newly forked process p.
4492   * p is forked by current.
4493   *
4494   * __sched_fork() is basic setup which is also used by sched_init() to
4495   * initialize the boot CPU's idle task.
4496   */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4497  static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4498  {
4499  	p->on_rq			= 0;
4500  
4501  	p->se.on_rq			= 0;
4502  	p->se.exec_start		= 0;
4503  	p->se.sum_exec_runtime		= 0;
4504  	p->se.prev_sum_exec_runtime	= 0;
4505  	p->se.nr_migrations		= 0;
4506  	p->se.vruntime			= 0;
4507  	p->se.vlag			= 0;
4508  	p->se.slice			= sysctl_sched_base_slice;
4509  	INIT_LIST_HEAD(&p->se.group_node);
4510  
4511  #ifdef CONFIG_FAIR_GROUP_SCHED
4512  	p->se.cfs_rq			= NULL;
4513  #endif
4514  
4515  #ifdef CONFIG_SCHEDSTATS
4516  	/* Even if schedstat is disabled, there should not be garbage */
4517  	memset(&p->stats, 0, sizeof(p->stats));
4518  #endif
4519  
4520  	init_dl_entity(&p->dl);
4521  
4522  	INIT_LIST_HEAD(&p->rt.run_list);
4523  	p->rt.timeout		= 0;
4524  	p->rt.time_slice	= sched_rr_timeslice;
4525  	p->rt.on_rq		= 0;
4526  	p->rt.on_list		= 0;
4527  
4528  #ifdef CONFIG_PREEMPT_NOTIFIERS
4529  	INIT_HLIST_HEAD(&p->preempt_notifiers);
4530  #endif
4531  
4532  #ifdef CONFIG_COMPACTION
4533  	p->capture_control = NULL;
4534  #endif
4535  	init_numa_balancing(clone_flags, p);
4536  #ifdef CONFIG_SMP
4537  	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4538  	p->migration_pending = NULL;
4539  #endif
4540  	init_sched_mm_cid(p);
4541  }
4542  
4543  DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4544  
4545  #ifdef CONFIG_NUMA_BALANCING
4546  
4547  int sysctl_numa_balancing_mode;
4548  
__set_numabalancing_state(bool enabled)4549  static void __set_numabalancing_state(bool enabled)
4550  {
4551  	if (enabled)
4552  		static_branch_enable(&sched_numa_balancing);
4553  	else
4554  		static_branch_disable(&sched_numa_balancing);
4555  }
4556  
set_numabalancing_state(bool enabled)4557  void set_numabalancing_state(bool enabled)
4558  {
4559  	if (enabled)
4560  		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4561  	else
4562  		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4563  	__set_numabalancing_state(enabled);
4564  }
4565  
4566  #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4567  static void reset_memory_tiering(void)
4568  {
4569  	struct pglist_data *pgdat;
4570  
4571  	for_each_online_pgdat(pgdat) {
4572  		pgdat->nbp_threshold = 0;
4573  		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4574  		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4575  	}
4576  }
4577  
sysctl_numa_balancing(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4578  static int sysctl_numa_balancing(struct ctl_table *table, int write,
4579  			  void *buffer, size_t *lenp, loff_t *ppos)
4580  {
4581  	struct ctl_table t;
4582  	int err;
4583  	int state = sysctl_numa_balancing_mode;
4584  
4585  	if (write && !capable(CAP_SYS_ADMIN))
4586  		return -EPERM;
4587  
4588  	t = *table;
4589  	t.data = &state;
4590  	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4591  	if (err < 0)
4592  		return err;
4593  	if (write) {
4594  		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4595  		    (state & NUMA_BALANCING_MEMORY_TIERING))
4596  			reset_memory_tiering();
4597  		sysctl_numa_balancing_mode = state;
4598  		__set_numabalancing_state(state);
4599  	}
4600  	return err;
4601  }
4602  #endif
4603  #endif
4604  
4605  #ifdef CONFIG_SCHEDSTATS
4606  
4607  DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4608  
set_schedstats(bool enabled)4609  static void set_schedstats(bool enabled)
4610  {
4611  	if (enabled)
4612  		static_branch_enable(&sched_schedstats);
4613  	else
4614  		static_branch_disable(&sched_schedstats);
4615  }
4616  
force_schedstat_enabled(void)4617  void force_schedstat_enabled(void)
4618  {
4619  	if (!schedstat_enabled()) {
4620  		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4621  		static_branch_enable(&sched_schedstats);
4622  	}
4623  }
4624  
setup_schedstats(char * str)4625  static int __init setup_schedstats(char *str)
4626  {
4627  	int ret = 0;
4628  	if (!str)
4629  		goto out;
4630  
4631  	if (!strcmp(str, "enable")) {
4632  		set_schedstats(true);
4633  		ret = 1;
4634  	} else if (!strcmp(str, "disable")) {
4635  		set_schedstats(false);
4636  		ret = 1;
4637  	}
4638  out:
4639  	if (!ret)
4640  		pr_warn("Unable to parse schedstats=\n");
4641  
4642  	return ret;
4643  }
4644  __setup("schedstats=", setup_schedstats);
4645  
4646  #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4647  static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
4648  		size_t *lenp, loff_t *ppos)
4649  {
4650  	struct ctl_table t;
4651  	int err;
4652  	int state = static_branch_likely(&sched_schedstats);
4653  
4654  	if (write && !capable(CAP_SYS_ADMIN))
4655  		return -EPERM;
4656  
4657  	t = *table;
4658  	t.data = &state;
4659  	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4660  	if (err < 0)
4661  		return err;
4662  	if (write)
4663  		set_schedstats(state);
4664  	return err;
4665  }
4666  #endif /* CONFIG_PROC_SYSCTL */
4667  #endif /* CONFIG_SCHEDSTATS */
4668  
4669  #ifdef CONFIG_SYSCTL
4670  static struct ctl_table sched_core_sysctls[] = {
4671  #ifdef CONFIG_SCHEDSTATS
4672  	{
4673  		.procname       = "sched_schedstats",
4674  		.data           = NULL,
4675  		.maxlen         = sizeof(unsigned int),
4676  		.mode           = 0644,
4677  		.proc_handler   = sysctl_schedstats,
4678  		.extra1         = SYSCTL_ZERO,
4679  		.extra2         = SYSCTL_ONE,
4680  	},
4681  #endif /* CONFIG_SCHEDSTATS */
4682  #ifdef CONFIG_UCLAMP_TASK
4683  	{
4684  		.procname       = "sched_util_clamp_min",
4685  		.data           = &sysctl_sched_uclamp_util_min,
4686  		.maxlen         = sizeof(unsigned int),
4687  		.mode           = 0644,
4688  		.proc_handler   = sysctl_sched_uclamp_handler,
4689  	},
4690  	{
4691  		.procname       = "sched_util_clamp_max",
4692  		.data           = &sysctl_sched_uclamp_util_max,
4693  		.maxlen         = sizeof(unsigned int),
4694  		.mode           = 0644,
4695  		.proc_handler   = sysctl_sched_uclamp_handler,
4696  	},
4697  	{
4698  		.procname       = "sched_util_clamp_min_rt_default",
4699  		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4700  		.maxlen         = sizeof(unsigned int),
4701  		.mode           = 0644,
4702  		.proc_handler   = sysctl_sched_uclamp_handler,
4703  	},
4704  #endif /* CONFIG_UCLAMP_TASK */
4705  #ifdef CONFIG_NUMA_BALANCING
4706  	{
4707  		.procname	= "numa_balancing",
4708  		.data		= NULL, /* filled in by handler */
4709  		.maxlen		= sizeof(unsigned int),
4710  		.mode		= 0644,
4711  		.proc_handler	= sysctl_numa_balancing,
4712  		.extra1		= SYSCTL_ZERO,
4713  		.extra2		= SYSCTL_FOUR,
4714  	},
4715  #endif /* CONFIG_NUMA_BALANCING */
4716  	{}
4717  };
sched_core_sysctl_init(void)4718  static int __init sched_core_sysctl_init(void)
4719  {
4720  	register_sysctl_init("kernel", sched_core_sysctls);
4721  	return 0;
4722  }
4723  late_initcall(sched_core_sysctl_init);
4724  #endif /* CONFIG_SYSCTL */
4725  
4726  /*
4727   * fork()/clone()-time setup:
4728   */
sched_fork(unsigned long clone_flags,struct task_struct * p)4729  int sched_fork(unsigned long clone_flags, struct task_struct *p)
4730  {
4731  	__sched_fork(clone_flags, p);
4732  	/*
4733  	 * We mark the process as NEW here. This guarantees that
4734  	 * nobody will actually run it, and a signal or other external
4735  	 * event cannot wake it up and insert it on the runqueue either.
4736  	 */
4737  	p->__state = TASK_NEW;
4738  
4739  	/*
4740  	 * Make sure we do not leak PI boosting priority to the child.
4741  	 */
4742  	p->prio = current->normal_prio;
4743  
4744  	uclamp_fork(p);
4745  
4746  	/*
4747  	 * Revert to default priority/policy on fork if requested.
4748  	 */
4749  	if (unlikely(p->sched_reset_on_fork)) {
4750  		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4751  			p->policy = SCHED_NORMAL;
4752  			p->static_prio = NICE_TO_PRIO(0);
4753  			p->rt_priority = 0;
4754  		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4755  			p->static_prio = NICE_TO_PRIO(0);
4756  
4757  		p->prio = p->normal_prio = p->static_prio;
4758  		set_load_weight(p, false);
4759  
4760  		/*
4761  		 * We don't need the reset flag anymore after the fork. It has
4762  		 * fulfilled its duty:
4763  		 */
4764  		p->sched_reset_on_fork = 0;
4765  	}
4766  
4767  	if (dl_prio(p->prio))
4768  		return -EAGAIN;
4769  	else if (rt_prio(p->prio))
4770  		p->sched_class = &rt_sched_class;
4771  	else
4772  		p->sched_class = &fair_sched_class;
4773  
4774  	init_entity_runnable_average(&p->se);
4775  
4776  
4777  #ifdef CONFIG_SCHED_INFO
4778  	if (likely(sched_info_on()))
4779  		memset(&p->sched_info, 0, sizeof(p->sched_info));
4780  #endif
4781  #if defined(CONFIG_SMP)
4782  	p->on_cpu = 0;
4783  #endif
4784  	init_task_preempt_count(p);
4785  #ifdef CONFIG_SMP
4786  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4787  	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4788  #endif
4789  	return 0;
4790  }
4791  
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4792  void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4793  {
4794  	unsigned long flags;
4795  
4796  	/*
4797  	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4798  	 * required yet, but lockdep gets upset if rules are violated.
4799  	 */
4800  	raw_spin_lock_irqsave(&p->pi_lock, flags);
4801  #ifdef CONFIG_CGROUP_SCHED
4802  	if (1) {
4803  		struct task_group *tg;
4804  		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4805  				  struct task_group, css);
4806  		tg = autogroup_task_group(p, tg);
4807  		p->sched_task_group = tg;
4808  	}
4809  #endif
4810  	rseq_migrate(p);
4811  	/*
4812  	 * We're setting the CPU for the first time, we don't migrate,
4813  	 * so use __set_task_cpu().
4814  	 */
4815  	__set_task_cpu(p, smp_processor_id());
4816  	if (p->sched_class->task_fork)
4817  		p->sched_class->task_fork(p);
4818  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4819  }
4820  
sched_post_fork(struct task_struct * p)4821  void sched_post_fork(struct task_struct *p)
4822  {
4823  	uclamp_post_fork(p);
4824  }
4825  
to_ratio(u64 period,u64 runtime)4826  unsigned long to_ratio(u64 period, u64 runtime)
4827  {
4828  	if (runtime == RUNTIME_INF)
4829  		return BW_UNIT;
4830  
4831  	/*
4832  	 * Doing this here saves a lot of checks in all
4833  	 * the calling paths, and returning zero seems
4834  	 * safe for them anyway.
4835  	 */
4836  	if (period == 0)
4837  		return 0;
4838  
4839  	return div64_u64(runtime << BW_SHIFT, period);
4840  }
4841  
4842  /*
4843   * wake_up_new_task - wake up a newly created task for the first time.
4844   *
4845   * This function will do some initial scheduler statistics housekeeping
4846   * that must be done for every newly created context, then puts the task
4847   * on the runqueue and wakes it.
4848   */
wake_up_new_task(struct task_struct * p)4849  void wake_up_new_task(struct task_struct *p)
4850  {
4851  	struct rq_flags rf;
4852  	struct rq *rq;
4853  
4854  	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4855  	WRITE_ONCE(p->__state, TASK_RUNNING);
4856  #ifdef CONFIG_SMP
4857  	/*
4858  	 * Fork balancing, do it here and not earlier because:
4859  	 *  - cpus_ptr can change in the fork path
4860  	 *  - any previously selected CPU might disappear through hotplug
4861  	 *
4862  	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4863  	 * as we're not fully set-up yet.
4864  	 */
4865  	p->recent_used_cpu = task_cpu(p);
4866  	rseq_migrate(p);
4867  	__set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4868  #endif
4869  	rq = __task_rq_lock(p, &rf);
4870  	update_rq_clock(rq);
4871  	post_init_entity_util_avg(p);
4872  
4873  	activate_task(rq, p, ENQUEUE_NOCLOCK);
4874  	trace_sched_wakeup_new(p);
4875  	wakeup_preempt(rq, p, WF_FORK);
4876  #ifdef CONFIG_SMP
4877  	if (p->sched_class->task_woken) {
4878  		/*
4879  		 * Nothing relies on rq->lock after this, so it's fine to
4880  		 * drop it.
4881  		 */
4882  		rq_unpin_lock(rq, &rf);
4883  		p->sched_class->task_woken(rq, p);
4884  		rq_repin_lock(rq, &rf);
4885  	}
4886  #endif
4887  	task_rq_unlock(rq, p, &rf);
4888  }
4889  
4890  #ifdef CONFIG_PREEMPT_NOTIFIERS
4891  
4892  static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4893  
preempt_notifier_inc(void)4894  void preempt_notifier_inc(void)
4895  {
4896  	static_branch_inc(&preempt_notifier_key);
4897  }
4898  EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4899  
preempt_notifier_dec(void)4900  void preempt_notifier_dec(void)
4901  {
4902  	static_branch_dec(&preempt_notifier_key);
4903  }
4904  EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4905  
4906  /**
4907   * preempt_notifier_register - tell me when current is being preempted & rescheduled
4908   * @notifier: notifier struct to register
4909   */
preempt_notifier_register(struct preempt_notifier * notifier)4910  void preempt_notifier_register(struct preempt_notifier *notifier)
4911  {
4912  	if (!static_branch_unlikely(&preempt_notifier_key))
4913  		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4914  
4915  	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4916  }
4917  EXPORT_SYMBOL_GPL(preempt_notifier_register);
4918  
4919  /**
4920   * preempt_notifier_unregister - no longer interested in preemption notifications
4921   * @notifier: notifier struct to unregister
4922   *
4923   * This is *not* safe to call from within a preemption notifier.
4924   */
preempt_notifier_unregister(struct preempt_notifier * notifier)4925  void preempt_notifier_unregister(struct preempt_notifier *notifier)
4926  {
4927  	hlist_del(&notifier->link);
4928  }
4929  EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4930  
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4931  static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4932  {
4933  	struct preempt_notifier *notifier;
4934  
4935  	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4936  		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4937  }
4938  
fire_sched_in_preempt_notifiers(struct task_struct * curr)4939  static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4940  {
4941  	if (static_branch_unlikely(&preempt_notifier_key))
4942  		__fire_sched_in_preempt_notifiers(curr);
4943  }
4944  
4945  static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4946  __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4947  				   struct task_struct *next)
4948  {
4949  	struct preempt_notifier *notifier;
4950  
4951  	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4952  		notifier->ops->sched_out(notifier, next);
4953  }
4954  
4955  static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4956  fire_sched_out_preempt_notifiers(struct task_struct *curr,
4957  				 struct task_struct *next)
4958  {
4959  	if (static_branch_unlikely(&preempt_notifier_key))
4960  		__fire_sched_out_preempt_notifiers(curr, next);
4961  }
4962  
4963  #else /* !CONFIG_PREEMPT_NOTIFIERS */
4964  
fire_sched_in_preempt_notifiers(struct task_struct * curr)4965  static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4966  {
4967  }
4968  
4969  static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4970  fire_sched_out_preempt_notifiers(struct task_struct *curr,
4971  				 struct task_struct *next)
4972  {
4973  }
4974  
4975  #endif /* CONFIG_PREEMPT_NOTIFIERS */
4976  
prepare_task(struct task_struct * next)4977  static inline void prepare_task(struct task_struct *next)
4978  {
4979  #ifdef CONFIG_SMP
4980  	/*
4981  	 * Claim the task as running, we do this before switching to it
4982  	 * such that any running task will have this set.
4983  	 *
4984  	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4985  	 * its ordering comment.
4986  	 */
4987  	WRITE_ONCE(next->on_cpu, 1);
4988  #endif
4989  }
4990  
finish_task(struct task_struct * prev)4991  static inline void finish_task(struct task_struct *prev)
4992  {
4993  #ifdef CONFIG_SMP
4994  	/*
4995  	 * This must be the very last reference to @prev from this CPU. After
4996  	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4997  	 * must ensure this doesn't happen until the switch is completely
4998  	 * finished.
4999  	 *
5000  	 * In particular, the load of prev->state in finish_task_switch() must
5001  	 * happen before this.
5002  	 *
5003  	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5004  	 */
5005  	smp_store_release(&prev->on_cpu, 0);
5006  #endif
5007  }
5008  
5009  #ifdef CONFIG_SMP
5010  
do_balance_callbacks(struct rq * rq,struct balance_callback * head)5011  static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5012  {
5013  	void (*func)(struct rq *rq);
5014  	struct balance_callback *next;
5015  
5016  	lockdep_assert_rq_held(rq);
5017  
5018  	while (head) {
5019  		func = (void (*)(struct rq *))head->func;
5020  		next = head->next;
5021  		head->next = NULL;
5022  		head = next;
5023  
5024  		func(rq);
5025  	}
5026  }
5027  
5028  static void balance_push(struct rq *rq);
5029  
5030  /*
5031   * balance_push_callback is a right abuse of the callback interface and plays
5032   * by significantly different rules.
5033   *
5034   * Where the normal balance_callback's purpose is to be ran in the same context
5035   * that queued it (only later, when it's safe to drop rq->lock again),
5036   * balance_push_callback is specifically targeted at __schedule().
5037   *
5038   * This abuse is tolerated because it places all the unlikely/odd cases behind
5039   * a single test, namely: rq->balance_callback == NULL.
5040   */
5041  struct balance_callback balance_push_callback = {
5042  	.next = NULL,
5043  	.func = balance_push,
5044  };
5045  
5046  static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5047  __splice_balance_callbacks(struct rq *rq, bool split)
5048  {
5049  	struct balance_callback *head = rq->balance_callback;
5050  
5051  	if (likely(!head))
5052  		return NULL;
5053  
5054  	lockdep_assert_rq_held(rq);
5055  	/*
5056  	 * Must not take balance_push_callback off the list when
5057  	 * splice_balance_callbacks() and balance_callbacks() are not
5058  	 * in the same rq->lock section.
5059  	 *
5060  	 * In that case it would be possible for __schedule() to interleave
5061  	 * and observe the list empty.
5062  	 */
5063  	if (split && head == &balance_push_callback)
5064  		head = NULL;
5065  	else
5066  		rq->balance_callback = NULL;
5067  
5068  	return head;
5069  }
5070  
splice_balance_callbacks(struct rq * rq)5071  static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5072  {
5073  	return __splice_balance_callbacks(rq, true);
5074  }
5075  
__balance_callbacks(struct rq * rq)5076  static void __balance_callbacks(struct rq *rq)
5077  {
5078  	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5079  }
5080  
balance_callbacks(struct rq * rq,struct balance_callback * head)5081  static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5082  {
5083  	unsigned long flags;
5084  
5085  	if (unlikely(head)) {
5086  		raw_spin_rq_lock_irqsave(rq, flags);
5087  		do_balance_callbacks(rq, head);
5088  		raw_spin_rq_unlock_irqrestore(rq, flags);
5089  	}
5090  }
5091  
5092  #else
5093  
__balance_callbacks(struct rq * rq)5094  static inline void __balance_callbacks(struct rq *rq)
5095  {
5096  }
5097  
splice_balance_callbacks(struct rq * rq)5098  static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5099  {
5100  	return NULL;
5101  }
5102  
balance_callbacks(struct rq * rq,struct balance_callback * head)5103  static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5104  {
5105  }
5106  
5107  #endif
5108  
5109  static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5110  prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5111  {
5112  	/*
5113  	 * Since the runqueue lock will be released by the next
5114  	 * task (which is an invalid locking op but in the case
5115  	 * of the scheduler it's an obvious special-case), so we
5116  	 * do an early lockdep release here:
5117  	 */
5118  	rq_unpin_lock(rq, rf);
5119  	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5120  #ifdef CONFIG_DEBUG_SPINLOCK
5121  	/* this is a valid case when another task releases the spinlock */
5122  	rq_lockp(rq)->owner = next;
5123  #endif
5124  }
5125  
finish_lock_switch(struct rq * rq)5126  static inline void finish_lock_switch(struct rq *rq)
5127  {
5128  	/*
5129  	 * If we are tracking spinlock dependencies then we have to
5130  	 * fix up the runqueue lock - which gets 'carried over' from
5131  	 * prev into current:
5132  	 */
5133  	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5134  	__balance_callbacks(rq);
5135  	raw_spin_rq_unlock_irq(rq);
5136  }
5137  
5138  /*
5139   * NOP if the arch has not defined these:
5140   */
5141  
5142  #ifndef prepare_arch_switch
5143  # define prepare_arch_switch(next)	do { } while (0)
5144  #endif
5145  
5146  #ifndef finish_arch_post_lock_switch
5147  # define finish_arch_post_lock_switch()	do { } while (0)
5148  #endif
5149  
kmap_local_sched_out(void)5150  static inline void kmap_local_sched_out(void)
5151  {
5152  #ifdef CONFIG_KMAP_LOCAL
5153  	if (unlikely(current->kmap_ctrl.idx))
5154  		__kmap_local_sched_out();
5155  #endif
5156  }
5157  
kmap_local_sched_in(void)5158  static inline void kmap_local_sched_in(void)
5159  {
5160  #ifdef CONFIG_KMAP_LOCAL
5161  	if (unlikely(current->kmap_ctrl.idx))
5162  		__kmap_local_sched_in();
5163  #endif
5164  }
5165  
5166  /**
5167   * prepare_task_switch - prepare to switch tasks
5168   * @rq: the runqueue preparing to switch
5169   * @prev: the current task that is being switched out
5170   * @next: the task we are going to switch to.
5171   *
5172   * This is called with the rq lock held and interrupts off. It must
5173   * be paired with a subsequent finish_task_switch after the context
5174   * switch.
5175   *
5176   * prepare_task_switch sets up locking and calls architecture specific
5177   * hooks.
5178   */
5179  static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5180  prepare_task_switch(struct rq *rq, struct task_struct *prev,
5181  		    struct task_struct *next)
5182  {
5183  	kcov_prepare_switch(prev);
5184  	sched_info_switch(rq, prev, next);
5185  	perf_event_task_sched_out(prev, next);
5186  	rseq_preempt(prev);
5187  	fire_sched_out_preempt_notifiers(prev, next);
5188  	kmap_local_sched_out();
5189  	prepare_task(next);
5190  	prepare_arch_switch(next);
5191  }
5192  
5193  /**
5194   * finish_task_switch - clean up after a task-switch
5195   * @prev: the thread we just switched away from.
5196   *
5197   * finish_task_switch must be called after the context switch, paired
5198   * with a prepare_task_switch call before the context switch.
5199   * finish_task_switch will reconcile locking set up by prepare_task_switch,
5200   * and do any other architecture-specific cleanup actions.
5201   *
5202   * Note that we may have delayed dropping an mm in context_switch(). If
5203   * so, we finish that here outside of the runqueue lock. (Doing it
5204   * with the lock held can cause deadlocks; see schedule() for
5205   * details.)
5206   *
5207   * The context switch have flipped the stack from under us and restored the
5208   * local variables which were saved when this task called schedule() in the
5209   * past. prev == current is still correct but we need to recalculate this_rq
5210   * because prev may have moved to another CPU.
5211   */
finish_task_switch(struct task_struct * prev)5212  static struct rq *finish_task_switch(struct task_struct *prev)
5213  	__releases(rq->lock)
5214  {
5215  	struct rq *rq = this_rq();
5216  	struct mm_struct *mm = rq->prev_mm;
5217  	unsigned int prev_state;
5218  
5219  	/*
5220  	 * The previous task will have left us with a preempt_count of 2
5221  	 * because it left us after:
5222  	 *
5223  	 *	schedule()
5224  	 *	  preempt_disable();			// 1
5225  	 *	  __schedule()
5226  	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5227  	 *
5228  	 * Also, see FORK_PREEMPT_COUNT.
5229  	 */
5230  	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5231  		      "corrupted preempt_count: %s/%d/0x%x\n",
5232  		      current->comm, current->pid, preempt_count()))
5233  		preempt_count_set(FORK_PREEMPT_COUNT);
5234  
5235  	rq->prev_mm = NULL;
5236  
5237  	/*
5238  	 * A task struct has one reference for the use as "current".
5239  	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5240  	 * schedule one last time. The schedule call will never return, and
5241  	 * the scheduled task must drop that reference.
5242  	 *
5243  	 * We must observe prev->state before clearing prev->on_cpu (in
5244  	 * finish_task), otherwise a concurrent wakeup can get prev
5245  	 * running on another CPU and we could rave with its RUNNING -> DEAD
5246  	 * transition, resulting in a double drop.
5247  	 */
5248  	prev_state = READ_ONCE(prev->__state);
5249  	vtime_task_switch(prev);
5250  	perf_event_task_sched_in(prev, current);
5251  	finish_task(prev);
5252  	tick_nohz_task_switch();
5253  	finish_lock_switch(rq);
5254  	finish_arch_post_lock_switch();
5255  	kcov_finish_switch(current);
5256  	/*
5257  	 * kmap_local_sched_out() is invoked with rq::lock held and
5258  	 * interrupts disabled. There is no requirement for that, but the
5259  	 * sched out code does not have an interrupt enabled section.
5260  	 * Restoring the maps on sched in does not require interrupts being
5261  	 * disabled either.
5262  	 */
5263  	kmap_local_sched_in();
5264  
5265  	fire_sched_in_preempt_notifiers(current);
5266  	/*
5267  	 * When switching through a kernel thread, the loop in
5268  	 * membarrier_{private,global}_expedited() may have observed that
5269  	 * kernel thread and not issued an IPI. It is therefore possible to
5270  	 * schedule between user->kernel->user threads without passing though
5271  	 * switch_mm(). Membarrier requires a barrier after storing to
5272  	 * rq->curr, before returning to userspace, so provide them here:
5273  	 *
5274  	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5275  	 *   provided by mmdrop_lazy_tlb(),
5276  	 * - a sync_core for SYNC_CORE.
5277  	 */
5278  	if (mm) {
5279  		membarrier_mm_sync_core_before_usermode(mm);
5280  		mmdrop_lazy_tlb_sched(mm);
5281  	}
5282  
5283  	if (unlikely(prev_state == TASK_DEAD)) {
5284  		if (prev->sched_class->task_dead)
5285  			prev->sched_class->task_dead(prev);
5286  
5287  		/* Task is done with its stack. */
5288  		put_task_stack(prev);
5289  
5290  		put_task_struct_rcu_user(prev);
5291  	}
5292  
5293  	return rq;
5294  }
5295  
5296  /**
5297   * schedule_tail - first thing a freshly forked thread must call.
5298   * @prev: the thread we just switched away from.
5299   */
schedule_tail(struct task_struct * prev)5300  asmlinkage __visible void schedule_tail(struct task_struct *prev)
5301  	__releases(rq->lock)
5302  {
5303  	/*
5304  	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5305  	 * finish_task_switch() for details.
5306  	 *
5307  	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5308  	 * and the preempt_enable() will end up enabling preemption (on
5309  	 * PREEMPT_COUNT kernels).
5310  	 */
5311  
5312  	finish_task_switch(prev);
5313  	preempt_enable();
5314  
5315  	if (current->set_child_tid)
5316  		put_user(task_pid_vnr(current), current->set_child_tid);
5317  
5318  	calculate_sigpending();
5319  }
5320  
5321  /*
5322   * context_switch - switch to the new MM and the new thread's register state.
5323   */
5324  static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5325  context_switch(struct rq *rq, struct task_struct *prev,
5326  	       struct task_struct *next, struct rq_flags *rf)
5327  {
5328  	prepare_task_switch(rq, prev, next);
5329  
5330  	/*
5331  	 * For paravirt, this is coupled with an exit in switch_to to
5332  	 * combine the page table reload and the switch backend into
5333  	 * one hypercall.
5334  	 */
5335  	arch_start_context_switch(prev);
5336  
5337  	/*
5338  	 * kernel -> kernel   lazy + transfer active
5339  	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5340  	 *
5341  	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5342  	 *   user ->   user   switch
5343  	 *
5344  	 * switch_mm_cid() needs to be updated if the barriers provided
5345  	 * by context_switch() are modified.
5346  	 */
5347  	if (!next->mm) {                                // to kernel
5348  		enter_lazy_tlb(prev->active_mm, next);
5349  
5350  		next->active_mm = prev->active_mm;
5351  		if (prev->mm)                           // from user
5352  			mmgrab_lazy_tlb(prev->active_mm);
5353  		else
5354  			prev->active_mm = NULL;
5355  	} else {                                        // to user
5356  		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5357  		/*
5358  		 * sys_membarrier() requires an smp_mb() between setting
5359  		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5360  		 *
5361  		 * The below provides this either through switch_mm(), or in
5362  		 * case 'prev->active_mm == next->mm' through
5363  		 * finish_task_switch()'s mmdrop().
5364  		 */
5365  		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5366  		lru_gen_use_mm(next->mm);
5367  
5368  		if (!prev->mm) {                        // from kernel
5369  			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5370  			rq->prev_mm = prev->active_mm;
5371  			prev->active_mm = NULL;
5372  		}
5373  	}
5374  
5375  	/* switch_mm_cid() requires the memory barriers above. */
5376  	switch_mm_cid(rq, prev, next);
5377  
5378  	prepare_lock_switch(rq, next, rf);
5379  
5380  	/* Here we just switch the register state and the stack. */
5381  	switch_to(prev, next, prev);
5382  	barrier();
5383  
5384  	return finish_task_switch(prev);
5385  }
5386  
5387  /*
5388   * nr_running and nr_context_switches:
5389   *
5390   * externally visible scheduler statistics: current number of runnable
5391   * threads, total number of context switches performed since bootup.
5392   */
nr_running(void)5393  unsigned int nr_running(void)
5394  {
5395  	unsigned int i, sum = 0;
5396  
5397  	for_each_online_cpu(i)
5398  		sum += cpu_rq(i)->nr_running;
5399  
5400  	return sum;
5401  }
5402  
5403  /*
5404   * Check if only the current task is running on the CPU.
5405   *
5406   * Caution: this function does not check that the caller has disabled
5407   * preemption, thus the result might have a time-of-check-to-time-of-use
5408   * race.  The caller is responsible to use it correctly, for example:
5409   *
5410   * - from a non-preemptible section (of course)
5411   *
5412   * - from a thread that is bound to a single CPU
5413   *
5414   * - in a loop with very short iterations (e.g. a polling loop)
5415   */
single_task_running(void)5416  bool single_task_running(void)
5417  {
5418  	return raw_rq()->nr_running == 1;
5419  }
5420  EXPORT_SYMBOL(single_task_running);
5421  
nr_context_switches_cpu(int cpu)5422  unsigned long long nr_context_switches_cpu(int cpu)
5423  {
5424  	return cpu_rq(cpu)->nr_switches;
5425  }
5426  
nr_context_switches(void)5427  unsigned long long nr_context_switches(void)
5428  {
5429  	int i;
5430  	unsigned long long sum = 0;
5431  
5432  	for_each_possible_cpu(i)
5433  		sum += cpu_rq(i)->nr_switches;
5434  
5435  	return sum;
5436  }
5437  
5438  /*
5439   * Consumers of these two interfaces, like for example the cpuidle menu
5440   * governor, are using nonsensical data. Preferring shallow idle state selection
5441   * for a CPU that has IO-wait which might not even end up running the task when
5442   * it does become runnable.
5443   */
5444  
nr_iowait_cpu(int cpu)5445  unsigned int nr_iowait_cpu(int cpu)
5446  {
5447  	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5448  }
5449  
5450  /*
5451   * IO-wait accounting, and how it's mostly bollocks (on SMP).
5452   *
5453   * The idea behind IO-wait account is to account the idle time that we could
5454   * have spend running if it were not for IO. That is, if we were to improve the
5455   * storage performance, we'd have a proportional reduction in IO-wait time.
5456   *
5457   * This all works nicely on UP, where, when a task blocks on IO, we account
5458   * idle time as IO-wait, because if the storage were faster, it could've been
5459   * running and we'd not be idle.
5460   *
5461   * This has been extended to SMP, by doing the same for each CPU. This however
5462   * is broken.
5463   *
5464   * Imagine for instance the case where two tasks block on one CPU, only the one
5465   * CPU will have IO-wait accounted, while the other has regular idle. Even
5466   * though, if the storage were faster, both could've ran at the same time,
5467   * utilising both CPUs.
5468   *
5469   * This means, that when looking globally, the current IO-wait accounting on
5470   * SMP is a lower bound, by reason of under accounting.
5471   *
5472   * Worse, since the numbers are provided per CPU, they are sometimes
5473   * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5474   * associated with any one particular CPU, it can wake to another CPU than it
5475   * blocked on. This means the per CPU IO-wait number is meaningless.
5476   *
5477   * Task CPU affinities can make all that even more 'interesting'.
5478   */
5479  
nr_iowait(void)5480  unsigned int nr_iowait(void)
5481  {
5482  	unsigned int i, sum = 0;
5483  
5484  	for_each_possible_cpu(i)
5485  		sum += nr_iowait_cpu(i);
5486  
5487  	return sum;
5488  }
5489  
5490  #ifdef CONFIG_SMP
5491  
5492  /*
5493   * sched_exec - execve() is a valuable balancing opportunity, because at
5494   * this point the task has the smallest effective memory and cache footprint.
5495   */
sched_exec(void)5496  void sched_exec(void)
5497  {
5498  	struct task_struct *p = current;
5499  	struct migration_arg arg;
5500  	int dest_cpu;
5501  
5502  	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5503  		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5504  		if (dest_cpu == smp_processor_id())
5505  			return;
5506  
5507  		if (unlikely(!cpu_active(dest_cpu)))
5508  			return;
5509  
5510  		arg = (struct migration_arg){ p, dest_cpu };
5511  	}
5512  	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5513  }
5514  
5515  #endif
5516  
5517  DEFINE_PER_CPU(struct kernel_stat, kstat);
5518  DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5519  
5520  EXPORT_PER_CPU_SYMBOL(kstat);
5521  EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5522  
5523  /*
5524   * The function fair_sched_class.update_curr accesses the struct curr
5525   * and its field curr->exec_start; when called from task_sched_runtime(),
5526   * we observe a high rate of cache misses in practice.
5527   * Prefetching this data results in improved performance.
5528   */
prefetch_curr_exec_start(struct task_struct * p)5529  static inline void prefetch_curr_exec_start(struct task_struct *p)
5530  {
5531  #ifdef CONFIG_FAIR_GROUP_SCHED
5532  	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
5533  #else
5534  	struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
5535  #endif
5536  	prefetch(curr);
5537  	prefetch(&curr->exec_start);
5538  }
5539  
5540  /*
5541   * Return accounted runtime for the task.
5542   * In case the task is currently running, return the runtime plus current's
5543   * pending runtime that have not been accounted yet.
5544   */
task_sched_runtime(struct task_struct * p)5545  unsigned long long task_sched_runtime(struct task_struct *p)
5546  {
5547  	struct rq_flags rf;
5548  	struct rq *rq;
5549  	u64 ns;
5550  
5551  #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5552  	/*
5553  	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5554  	 * So we have a optimization chance when the task's delta_exec is 0.
5555  	 * Reading ->on_cpu is racy, but this is ok.
5556  	 *
5557  	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5558  	 * If we race with it entering CPU, unaccounted time is 0. This is
5559  	 * indistinguishable from the read occurring a few cycles earlier.
5560  	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5561  	 * been accounted, so we're correct here as well.
5562  	 */
5563  	if (!p->on_cpu || !task_on_rq_queued(p))
5564  		return p->se.sum_exec_runtime;
5565  #endif
5566  
5567  	rq = task_rq_lock(p, &rf);
5568  	/*
5569  	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5570  	 * project cycles that may never be accounted to this
5571  	 * thread, breaking clock_gettime().
5572  	 */
5573  	if (task_current(rq, p) && task_on_rq_queued(p)) {
5574  		prefetch_curr_exec_start(p);
5575  		update_rq_clock(rq);
5576  		p->sched_class->update_curr(rq);
5577  	}
5578  	ns = p->se.sum_exec_runtime;
5579  	task_rq_unlock(rq, p, &rf);
5580  
5581  	return ns;
5582  }
5583  
5584  #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5585  static u64 cpu_resched_latency(struct rq *rq)
5586  {
5587  	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5588  	u64 resched_latency, now = rq_clock(rq);
5589  	static bool warned_once;
5590  
5591  	if (sysctl_resched_latency_warn_once && warned_once)
5592  		return 0;
5593  
5594  	if (!need_resched() || !latency_warn_ms)
5595  		return 0;
5596  
5597  	if (system_state == SYSTEM_BOOTING)
5598  		return 0;
5599  
5600  	if (!rq->last_seen_need_resched_ns) {
5601  		rq->last_seen_need_resched_ns = now;
5602  		rq->ticks_without_resched = 0;
5603  		return 0;
5604  	}
5605  
5606  	rq->ticks_without_resched++;
5607  	resched_latency = now - rq->last_seen_need_resched_ns;
5608  	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5609  		return 0;
5610  
5611  	warned_once = true;
5612  
5613  	return resched_latency;
5614  }
5615  
setup_resched_latency_warn_ms(char * str)5616  static int __init setup_resched_latency_warn_ms(char *str)
5617  {
5618  	long val;
5619  
5620  	if ((kstrtol(str, 0, &val))) {
5621  		pr_warn("Unable to set resched_latency_warn_ms\n");
5622  		return 1;
5623  	}
5624  
5625  	sysctl_resched_latency_warn_ms = val;
5626  	return 1;
5627  }
5628  __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5629  #else
cpu_resched_latency(struct rq * rq)5630  static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5631  #endif /* CONFIG_SCHED_DEBUG */
5632  
5633  /*
5634   * This function gets called by the timer code, with HZ frequency.
5635   * We call it with interrupts disabled.
5636   */
scheduler_tick(void)5637  void scheduler_tick(void)
5638  {
5639  	int cpu = smp_processor_id();
5640  	struct rq *rq = cpu_rq(cpu);
5641  	struct task_struct *curr;
5642  	struct rq_flags rf;
5643  	unsigned long thermal_pressure;
5644  	u64 resched_latency;
5645  
5646  	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5647  		arch_scale_freq_tick();
5648  
5649  	sched_clock_tick();
5650  
5651  	rq_lock(rq, &rf);
5652  
5653  	curr = rq->curr;
5654  	psi_account_irqtime(rq, curr, NULL);
5655  
5656  	update_rq_clock(rq);
5657  	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
5658  	update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
5659  	curr->sched_class->task_tick(rq, curr, 0);
5660  	if (sched_feat(LATENCY_WARN))
5661  		resched_latency = cpu_resched_latency(rq);
5662  	calc_global_load_tick(rq);
5663  	sched_core_tick(rq);
5664  	task_tick_mm_cid(rq, curr);
5665  
5666  	rq_unlock(rq, &rf);
5667  
5668  	if (sched_feat(LATENCY_WARN) && resched_latency)
5669  		resched_latency_warn(cpu, resched_latency);
5670  
5671  	perf_event_task_tick();
5672  
5673  	if (curr->flags & PF_WQ_WORKER)
5674  		wq_worker_tick(curr);
5675  
5676  #ifdef CONFIG_SMP
5677  	rq->idle_balance = idle_cpu(cpu);
5678  	trigger_load_balance(rq);
5679  #endif
5680  }
5681  
5682  #ifdef CONFIG_NO_HZ_FULL
5683  
5684  struct tick_work {
5685  	int			cpu;
5686  	atomic_t		state;
5687  	struct delayed_work	work;
5688  };
5689  /* Values for ->state, see diagram below. */
5690  #define TICK_SCHED_REMOTE_OFFLINE	0
5691  #define TICK_SCHED_REMOTE_OFFLINING	1
5692  #define TICK_SCHED_REMOTE_RUNNING	2
5693  
5694  /*
5695   * State diagram for ->state:
5696   *
5697   *
5698   *          TICK_SCHED_REMOTE_OFFLINE
5699   *                    |   ^
5700   *                    |   |
5701   *                    |   | sched_tick_remote()
5702   *                    |   |
5703   *                    |   |
5704   *                    +--TICK_SCHED_REMOTE_OFFLINING
5705   *                    |   ^
5706   *                    |   |
5707   * sched_tick_start() |   | sched_tick_stop()
5708   *                    |   |
5709   *                    V   |
5710   *          TICK_SCHED_REMOTE_RUNNING
5711   *
5712   *
5713   * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5714   * and sched_tick_start() are happy to leave the state in RUNNING.
5715   */
5716  
5717  static struct tick_work __percpu *tick_work_cpu;
5718  
sched_tick_remote(struct work_struct * work)5719  static void sched_tick_remote(struct work_struct *work)
5720  {
5721  	struct delayed_work *dwork = to_delayed_work(work);
5722  	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5723  	int cpu = twork->cpu;
5724  	struct rq *rq = cpu_rq(cpu);
5725  	int os;
5726  
5727  	/*
5728  	 * Handle the tick only if it appears the remote CPU is running in full
5729  	 * dynticks mode. The check is racy by nature, but missing a tick or
5730  	 * having one too much is no big deal because the scheduler tick updates
5731  	 * statistics and checks timeslices in a time-independent way, regardless
5732  	 * of when exactly it is running.
5733  	 */
5734  	if (tick_nohz_tick_stopped_cpu(cpu)) {
5735  		guard(rq_lock_irq)(rq);
5736  		struct task_struct *curr = rq->curr;
5737  
5738  		if (cpu_online(cpu)) {
5739  			update_rq_clock(rq);
5740  
5741  			if (!is_idle_task(curr)) {
5742  				/*
5743  				 * Make sure the next tick runs within a
5744  				 * reasonable amount of time.
5745  				 */
5746  				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5747  				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5748  			}
5749  			curr->sched_class->task_tick(rq, curr, 0);
5750  
5751  			calc_load_nohz_remote(rq);
5752  		}
5753  	}
5754  
5755  	/*
5756  	 * Run the remote tick once per second (1Hz). This arbitrary
5757  	 * frequency is large enough to avoid overload but short enough
5758  	 * to keep scheduler internal stats reasonably up to date.  But
5759  	 * first update state to reflect hotplug activity if required.
5760  	 */
5761  	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5762  	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5763  	if (os == TICK_SCHED_REMOTE_RUNNING)
5764  		queue_delayed_work(system_unbound_wq, dwork, HZ);
5765  }
5766  
sched_tick_start(int cpu)5767  static void sched_tick_start(int cpu)
5768  {
5769  	int os;
5770  	struct tick_work *twork;
5771  
5772  	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5773  		return;
5774  
5775  	WARN_ON_ONCE(!tick_work_cpu);
5776  
5777  	twork = per_cpu_ptr(tick_work_cpu, cpu);
5778  	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5779  	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5780  	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5781  		twork->cpu = cpu;
5782  		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5783  		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5784  	}
5785  }
5786  
5787  #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5788  static void sched_tick_stop(int cpu)
5789  {
5790  	struct tick_work *twork;
5791  	int os;
5792  
5793  	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5794  		return;
5795  
5796  	WARN_ON_ONCE(!tick_work_cpu);
5797  
5798  	twork = per_cpu_ptr(tick_work_cpu, cpu);
5799  	/* There cannot be competing actions, but don't rely on stop-machine. */
5800  	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5801  	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5802  	/* Don't cancel, as this would mess up the state machine. */
5803  }
5804  #endif /* CONFIG_HOTPLUG_CPU */
5805  
sched_tick_offload_init(void)5806  int __init sched_tick_offload_init(void)
5807  {
5808  	tick_work_cpu = alloc_percpu(struct tick_work);
5809  	BUG_ON(!tick_work_cpu);
5810  	return 0;
5811  }
5812  
5813  #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5814  static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5815  static inline void sched_tick_stop(int cpu) { }
5816  #endif
5817  
5818  #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5819  				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5820  /*
5821   * If the value passed in is equal to the current preempt count
5822   * then we just disabled preemption. Start timing the latency.
5823   */
preempt_latency_start(int val)5824  static inline void preempt_latency_start(int val)
5825  {
5826  	if (preempt_count() == val) {
5827  		unsigned long ip = get_lock_parent_ip();
5828  #ifdef CONFIG_DEBUG_PREEMPT
5829  		current->preempt_disable_ip = ip;
5830  #endif
5831  		trace_preempt_off(CALLER_ADDR0, ip);
5832  	}
5833  }
5834  
preempt_count_add(int val)5835  void preempt_count_add(int val)
5836  {
5837  #ifdef CONFIG_DEBUG_PREEMPT
5838  	/*
5839  	 * Underflow?
5840  	 */
5841  	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5842  		return;
5843  #endif
5844  	__preempt_count_add(val);
5845  #ifdef CONFIG_DEBUG_PREEMPT
5846  	/*
5847  	 * Spinlock count overflowing soon?
5848  	 */
5849  	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5850  				PREEMPT_MASK - 10);
5851  #endif
5852  	preempt_latency_start(val);
5853  }
5854  EXPORT_SYMBOL(preempt_count_add);
5855  NOKPROBE_SYMBOL(preempt_count_add);
5856  
5857  /*
5858   * If the value passed in equals to the current preempt count
5859   * then we just enabled preemption. Stop timing the latency.
5860   */
preempt_latency_stop(int val)5861  static inline void preempt_latency_stop(int val)
5862  {
5863  	if (preempt_count() == val)
5864  		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5865  }
5866  
preempt_count_sub(int val)5867  void preempt_count_sub(int val)
5868  {
5869  #ifdef CONFIG_DEBUG_PREEMPT
5870  	/*
5871  	 * Underflow?
5872  	 */
5873  	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5874  		return;
5875  	/*
5876  	 * Is the spinlock portion underflowing?
5877  	 */
5878  	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5879  			!(preempt_count() & PREEMPT_MASK)))
5880  		return;
5881  #endif
5882  
5883  	preempt_latency_stop(val);
5884  	__preempt_count_sub(val);
5885  }
5886  EXPORT_SYMBOL(preempt_count_sub);
5887  NOKPROBE_SYMBOL(preempt_count_sub);
5888  
5889  #else
preempt_latency_start(int val)5890  static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5891  static inline void preempt_latency_stop(int val) { }
5892  #endif
5893  
get_preempt_disable_ip(struct task_struct * p)5894  static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5895  {
5896  #ifdef CONFIG_DEBUG_PREEMPT
5897  	return p->preempt_disable_ip;
5898  #else
5899  	return 0;
5900  #endif
5901  }
5902  
5903  /*
5904   * Print scheduling while atomic bug:
5905   */
__schedule_bug(struct task_struct * prev)5906  static noinline void __schedule_bug(struct task_struct *prev)
5907  {
5908  	/* Save this before calling printk(), since that will clobber it */
5909  	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5910  
5911  	if (oops_in_progress)
5912  		return;
5913  
5914  	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5915  		prev->comm, prev->pid, preempt_count());
5916  
5917  	debug_show_held_locks(prev);
5918  	print_modules();
5919  	if (irqs_disabled())
5920  		print_irqtrace_events(prev);
5921  	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
5922  	    && in_atomic_preempt_off()) {
5923  		pr_err("Preemption disabled at:");
5924  		print_ip_sym(KERN_ERR, preempt_disable_ip);
5925  	}
5926  	check_panic_on_warn("scheduling while atomic");
5927  
5928  	dump_stack();
5929  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5930  }
5931  
5932  /*
5933   * Various schedule()-time debugging checks and statistics:
5934   */
schedule_debug(struct task_struct * prev,bool preempt)5935  static inline void schedule_debug(struct task_struct *prev, bool preempt)
5936  {
5937  #ifdef CONFIG_SCHED_STACK_END_CHECK
5938  	if (task_stack_end_corrupted(prev))
5939  		panic("corrupted stack end detected inside scheduler\n");
5940  
5941  	if (task_scs_end_corrupted(prev))
5942  		panic("corrupted shadow stack detected inside scheduler\n");
5943  #endif
5944  
5945  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5946  	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5947  		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5948  			prev->comm, prev->pid, prev->non_block_count);
5949  		dump_stack();
5950  		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5951  	}
5952  #endif
5953  
5954  	if (unlikely(in_atomic_preempt_off())) {
5955  		__schedule_bug(prev);
5956  		preempt_count_set(PREEMPT_DISABLED);
5957  	}
5958  	rcu_sleep_check();
5959  	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
5960  
5961  	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5962  
5963  	schedstat_inc(this_rq()->sched_count);
5964  }
5965  
put_prev_task_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5966  static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5967  				  struct rq_flags *rf)
5968  {
5969  #ifdef CONFIG_SMP
5970  	const struct sched_class *class;
5971  	/*
5972  	 * We must do the balancing pass before put_prev_task(), such
5973  	 * that when we release the rq->lock the task is in the same
5974  	 * state as before we took rq->lock.
5975  	 *
5976  	 * We can terminate the balance pass as soon as we know there is
5977  	 * a runnable task of @class priority or higher.
5978  	 */
5979  	for_class_range(class, prev->sched_class, &idle_sched_class) {
5980  		if (class->balance(rq, prev, rf))
5981  			break;
5982  	}
5983  #endif
5984  
5985  	put_prev_task(rq, prev);
5986  }
5987  
5988  /*
5989   * Pick up the highest-prio task:
5990   */
5991  static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5992  __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5993  {
5994  	const struct sched_class *class;
5995  	struct task_struct *p;
5996  
5997  	/*
5998  	 * Optimization: we know that if all tasks are in the fair class we can
5999  	 * call that function directly, but only if the @prev task wasn't of a
6000  	 * higher scheduling class, because otherwise those lose the
6001  	 * opportunity to pull in more work from other CPUs.
6002  	 */
6003  	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6004  		   rq->nr_running == rq->cfs.h_nr_running)) {
6005  
6006  		p = pick_next_task_fair(rq, prev, rf);
6007  		if (unlikely(p == RETRY_TASK))
6008  			goto restart;
6009  
6010  		/* Assume the next prioritized class is idle_sched_class */
6011  		if (!p) {
6012  			put_prev_task(rq, prev);
6013  			p = pick_next_task_idle(rq);
6014  		}
6015  
6016  		return p;
6017  	}
6018  
6019  restart:
6020  	put_prev_task_balance(rq, prev, rf);
6021  
6022  	for_each_class(class) {
6023  		p = class->pick_next_task(rq);
6024  		if (p)
6025  			return p;
6026  	}
6027  
6028  	BUG(); /* The idle class should always have a runnable task. */
6029  }
6030  
6031  #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6032  static inline bool is_task_rq_idle(struct task_struct *t)
6033  {
6034  	return (task_rq(t)->idle == t);
6035  }
6036  
cookie_equals(struct task_struct * a,unsigned long cookie)6037  static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6038  {
6039  	return is_task_rq_idle(a) || (a->core_cookie == cookie);
6040  }
6041  
cookie_match(struct task_struct * a,struct task_struct * b)6042  static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6043  {
6044  	if (is_task_rq_idle(a) || is_task_rq_idle(b))
6045  		return true;
6046  
6047  	return a->core_cookie == b->core_cookie;
6048  }
6049  
pick_task(struct rq * rq)6050  static inline struct task_struct *pick_task(struct rq *rq)
6051  {
6052  	const struct sched_class *class;
6053  	struct task_struct *p;
6054  
6055  	for_each_class(class) {
6056  		p = class->pick_task(rq);
6057  		if (p)
6058  			return p;
6059  	}
6060  
6061  	BUG(); /* The idle class should always have a runnable task. */
6062  }
6063  
6064  extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6065  
6066  static void queue_core_balance(struct rq *rq);
6067  
6068  static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6069  pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6070  {
6071  	struct task_struct *next, *p, *max = NULL;
6072  	const struct cpumask *smt_mask;
6073  	bool fi_before = false;
6074  	bool core_clock_updated = (rq == rq->core);
6075  	unsigned long cookie;
6076  	int i, cpu, occ = 0;
6077  	struct rq *rq_i;
6078  	bool need_sync;
6079  
6080  	if (!sched_core_enabled(rq))
6081  		return __pick_next_task(rq, prev, rf);
6082  
6083  	cpu = cpu_of(rq);
6084  
6085  	/* Stopper task is switching into idle, no need core-wide selection. */
6086  	if (cpu_is_offline(cpu)) {
6087  		/*
6088  		 * Reset core_pick so that we don't enter the fastpath when
6089  		 * coming online. core_pick would already be migrated to
6090  		 * another cpu during offline.
6091  		 */
6092  		rq->core_pick = NULL;
6093  		return __pick_next_task(rq, prev, rf);
6094  	}
6095  
6096  	/*
6097  	 * If there were no {en,de}queues since we picked (IOW, the task
6098  	 * pointers are all still valid), and we haven't scheduled the last
6099  	 * pick yet, do so now.
6100  	 *
6101  	 * rq->core_pick can be NULL if no selection was made for a CPU because
6102  	 * it was either offline or went offline during a sibling's core-wide
6103  	 * selection. In this case, do a core-wide selection.
6104  	 */
6105  	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6106  	    rq->core->core_pick_seq != rq->core_sched_seq &&
6107  	    rq->core_pick) {
6108  		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6109  
6110  		next = rq->core_pick;
6111  		if (next != prev) {
6112  			put_prev_task(rq, prev);
6113  			set_next_task(rq, next);
6114  		}
6115  
6116  		rq->core_pick = NULL;
6117  		goto out;
6118  	}
6119  
6120  	put_prev_task_balance(rq, prev, rf);
6121  
6122  	smt_mask = cpu_smt_mask(cpu);
6123  	need_sync = !!rq->core->core_cookie;
6124  
6125  	/* reset state */
6126  	rq->core->core_cookie = 0UL;
6127  	if (rq->core->core_forceidle_count) {
6128  		if (!core_clock_updated) {
6129  			update_rq_clock(rq->core);
6130  			core_clock_updated = true;
6131  		}
6132  		sched_core_account_forceidle(rq);
6133  		/* reset after accounting force idle */
6134  		rq->core->core_forceidle_start = 0;
6135  		rq->core->core_forceidle_count = 0;
6136  		rq->core->core_forceidle_occupation = 0;
6137  		need_sync = true;
6138  		fi_before = true;
6139  	}
6140  
6141  	/*
6142  	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6143  	 *
6144  	 * @task_seq guards the task state ({en,de}queues)
6145  	 * @pick_seq is the @task_seq we did a selection on
6146  	 * @sched_seq is the @pick_seq we scheduled
6147  	 *
6148  	 * However, preemptions can cause multiple picks on the same task set.
6149  	 * 'Fix' this by also increasing @task_seq for every pick.
6150  	 */
6151  	rq->core->core_task_seq++;
6152  
6153  	/*
6154  	 * Optimize for common case where this CPU has no cookies
6155  	 * and there are no cookied tasks running on siblings.
6156  	 */
6157  	if (!need_sync) {
6158  		next = pick_task(rq);
6159  		if (!next->core_cookie) {
6160  			rq->core_pick = NULL;
6161  			/*
6162  			 * For robustness, update the min_vruntime_fi for
6163  			 * unconstrained picks as well.
6164  			 */
6165  			WARN_ON_ONCE(fi_before);
6166  			task_vruntime_update(rq, next, false);
6167  			goto out_set_next;
6168  		}
6169  	}
6170  
6171  	/*
6172  	 * For each thread: do the regular task pick and find the max prio task
6173  	 * amongst them.
6174  	 *
6175  	 * Tie-break prio towards the current CPU
6176  	 */
6177  	for_each_cpu_wrap(i, smt_mask, cpu) {
6178  		rq_i = cpu_rq(i);
6179  
6180  		/*
6181  		 * Current cpu always has its clock updated on entrance to
6182  		 * pick_next_task(). If the current cpu is not the core,
6183  		 * the core may also have been updated above.
6184  		 */
6185  		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6186  			update_rq_clock(rq_i);
6187  
6188  		p = rq_i->core_pick = pick_task(rq_i);
6189  		if (!max || prio_less(max, p, fi_before))
6190  			max = p;
6191  	}
6192  
6193  	cookie = rq->core->core_cookie = max->core_cookie;
6194  
6195  	/*
6196  	 * For each thread: try and find a runnable task that matches @max or
6197  	 * force idle.
6198  	 */
6199  	for_each_cpu(i, smt_mask) {
6200  		rq_i = cpu_rq(i);
6201  		p = rq_i->core_pick;
6202  
6203  		if (!cookie_equals(p, cookie)) {
6204  			p = NULL;
6205  			if (cookie)
6206  				p = sched_core_find(rq_i, cookie);
6207  			if (!p)
6208  				p = idle_sched_class.pick_task(rq_i);
6209  		}
6210  
6211  		rq_i->core_pick = p;
6212  
6213  		if (p == rq_i->idle) {
6214  			if (rq_i->nr_running) {
6215  				rq->core->core_forceidle_count++;
6216  				if (!fi_before)
6217  					rq->core->core_forceidle_seq++;
6218  			}
6219  		} else {
6220  			occ++;
6221  		}
6222  	}
6223  
6224  	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6225  		rq->core->core_forceidle_start = rq_clock(rq->core);
6226  		rq->core->core_forceidle_occupation = occ;
6227  	}
6228  
6229  	rq->core->core_pick_seq = rq->core->core_task_seq;
6230  	next = rq->core_pick;
6231  	rq->core_sched_seq = rq->core->core_pick_seq;
6232  
6233  	/* Something should have been selected for current CPU */
6234  	WARN_ON_ONCE(!next);
6235  
6236  	/*
6237  	 * Reschedule siblings
6238  	 *
6239  	 * NOTE: L1TF -- at this point we're no longer running the old task and
6240  	 * sending an IPI (below) ensures the sibling will no longer be running
6241  	 * their task. This ensures there is no inter-sibling overlap between
6242  	 * non-matching user state.
6243  	 */
6244  	for_each_cpu(i, smt_mask) {
6245  		rq_i = cpu_rq(i);
6246  
6247  		/*
6248  		 * An online sibling might have gone offline before a task
6249  		 * could be picked for it, or it might be offline but later
6250  		 * happen to come online, but its too late and nothing was
6251  		 * picked for it.  That's Ok - it will pick tasks for itself,
6252  		 * so ignore it.
6253  		 */
6254  		if (!rq_i->core_pick)
6255  			continue;
6256  
6257  		/*
6258  		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6259  		 * fi_before     fi      update?
6260  		 *  0            0       1
6261  		 *  0            1       1
6262  		 *  1            0       1
6263  		 *  1            1       0
6264  		 */
6265  		if (!(fi_before && rq->core->core_forceidle_count))
6266  			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6267  
6268  		rq_i->core_pick->core_occupation = occ;
6269  
6270  		if (i == cpu) {
6271  			rq_i->core_pick = NULL;
6272  			continue;
6273  		}
6274  
6275  		/* Did we break L1TF mitigation requirements? */
6276  		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6277  
6278  		if (rq_i->curr == rq_i->core_pick) {
6279  			rq_i->core_pick = NULL;
6280  			continue;
6281  		}
6282  
6283  		resched_curr(rq_i);
6284  	}
6285  
6286  out_set_next:
6287  	set_next_task(rq, next);
6288  out:
6289  	if (rq->core->core_forceidle_count && next == rq->idle)
6290  		queue_core_balance(rq);
6291  
6292  	return next;
6293  }
6294  
try_steal_cookie(int this,int that)6295  static bool try_steal_cookie(int this, int that)
6296  {
6297  	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6298  	struct task_struct *p;
6299  	unsigned long cookie;
6300  	bool success = false;
6301  
6302  	guard(irq)();
6303  	guard(double_rq_lock)(dst, src);
6304  
6305  	cookie = dst->core->core_cookie;
6306  	if (!cookie)
6307  		return false;
6308  
6309  	if (dst->curr != dst->idle)
6310  		return false;
6311  
6312  	p = sched_core_find(src, cookie);
6313  	if (!p)
6314  		return false;
6315  
6316  	do {
6317  		if (p == src->core_pick || p == src->curr)
6318  			goto next;
6319  
6320  		if (!is_cpu_allowed(p, this))
6321  			goto next;
6322  
6323  		if (p->core_occupation > dst->idle->core_occupation)
6324  			goto next;
6325  		/*
6326  		 * sched_core_find() and sched_core_next() will ensure
6327  		 * that task @p is not throttled now, we also need to
6328  		 * check whether the runqueue of the destination CPU is
6329  		 * being throttled.
6330  		 */
6331  		if (sched_task_is_throttled(p, this))
6332  			goto next;
6333  
6334  		deactivate_task(src, p, 0);
6335  		set_task_cpu(p, this);
6336  		activate_task(dst, p, 0);
6337  
6338  		resched_curr(dst);
6339  
6340  		success = true;
6341  		break;
6342  
6343  next:
6344  		p = sched_core_next(p, cookie);
6345  	} while (p);
6346  
6347  	return success;
6348  }
6349  
steal_cookie_task(int cpu,struct sched_domain * sd)6350  static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6351  {
6352  	int i;
6353  
6354  	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6355  		if (i == cpu)
6356  			continue;
6357  
6358  		if (need_resched())
6359  			break;
6360  
6361  		if (try_steal_cookie(cpu, i))
6362  			return true;
6363  	}
6364  
6365  	return false;
6366  }
6367  
sched_core_balance(struct rq * rq)6368  static void sched_core_balance(struct rq *rq)
6369  {
6370  	struct sched_domain *sd;
6371  	int cpu = cpu_of(rq);
6372  
6373  	preempt_disable();
6374  	rcu_read_lock();
6375  	raw_spin_rq_unlock_irq(rq);
6376  	for_each_domain(cpu, sd) {
6377  		if (need_resched())
6378  			break;
6379  
6380  		if (steal_cookie_task(cpu, sd))
6381  			break;
6382  	}
6383  	raw_spin_rq_lock_irq(rq);
6384  	rcu_read_unlock();
6385  	preempt_enable();
6386  }
6387  
6388  static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6389  
queue_core_balance(struct rq * rq)6390  static void queue_core_balance(struct rq *rq)
6391  {
6392  	if (!sched_core_enabled(rq))
6393  		return;
6394  
6395  	if (!rq->core->core_cookie)
6396  		return;
6397  
6398  	if (!rq->nr_running) /* not forced idle */
6399  		return;
6400  
6401  	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6402  }
6403  
6404  DEFINE_LOCK_GUARD_1(core_lock, int,
6405  		    sched_core_lock(*_T->lock, &_T->flags),
6406  		    sched_core_unlock(*_T->lock, &_T->flags),
6407  		    unsigned long flags)
6408  
sched_core_cpu_starting(unsigned int cpu)6409  static void sched_core_cpu_starting(unsigned int cpu)
6410  {
6411  	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6412  	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6413  	int t;
6414  
6415  	guard(core_lock)(&cpu);
6416  
6417  	WARN_ON_ONCE(rq->core != rq);
6418  
6419  	/* if we're the first, we'll be our own leader */
6420  	if (cpumask_weight(smt_mask) == 1)
6421  		return;
6422  
6423  	/* find the leader */
6424  	for_each_cpu(t, smt_mask) {
6425  		if (t == cpu)
6426  			continue;
6427  		rq = cpu_rq(t);
6428  		if (rq->core == rq) {
6429  			core_rq = rq;
6430  			break;
6431  		}
6432  	}
6433  
6434  	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6435  		return;
6436  
6437  	/* install and validate core_rq */
6438  	for_each_cpu(t, smt_mask) {
6439  		rq = cpu_rq(t);
6440  
6441  		if (t == cpu)
6442  			rq->core = core_rq;
6443  
6444  		WARN_ON_ONCE(rq->core != core_rq);
6445  	}
6446  }
6447  
sched_core_cpu_deactivate(unsigned int cpu)6448  static void sched_core_cpu_deactivate(unsigned int cpu)
6449  {
6450  	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6451  	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6452  	int t;
6453  
6454  	guard(core_lock)(&cpu);
6455  
6456  	/* if we're the last man standing, nothing to do */
6457  	if (cpumask_weight(smt_mask) == 1) {
6458  		WARN_ON_ONCE(rq->core != rq);
6459  		return;
6460  	}
6461  
6462  	/* if we're not the leader, nothing to do */
6463  	if (rq->core != rq)
6464  		return;
6465  
6466  	/* find a new leader */
6467  	for_each_cpu(t, smt_mask) {
6468  		if (t == cpu)
6469  			continue;
6470  		core_rq = cpu_rq(t);
6471  		break;
6472  	}
6473  
6474  	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6475  		return;
6476  
6477  	/* copy the shared state to the new leader */
6478  	core_rq->core_task_seq             = rq->core_task_seq;
6479  	core_rq->core_pick_seq             = rq->core_pick_seq;
6480  	core_rq->core_cookie               = rq->core_cookie;
6481  	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6482  	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6483  	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6484  
6485  	/*
6486  	 * Accounting edge for forced idle is handled in pick_next_task().
6487  	 * Don't need another one here, since the hotplug thread shouldn't
6488  	 * have a cookie.
6489  	 */
6490  	core_rq->core_forceidle_start = 0;
6491  
6492  	/* install new leader */
6493  	for_each_cpu(t, smt_mask) {
6494  		rq = cpu_rq(t);
6495  		rq->core = core_rq;
6496  	}
6497  }
6498  
sched_core_cpu_dying(unsigned int cpu)6499  static inline void sched_core_cpu_dying(unsigned int cpu)
6500  {
6501  	struct rq *rq = cpu_rq(cpu);
6502  
6503  	if (rq->core != rq)
6504  		rq->core = rq;
6505  }
6506  
6507  #else /* !CONFIG_SCHED_CORE */
6508  
sched_core_cpu_starting(unsigned int cpu)6509  static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6510  static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6511  static inline void sched_core_cpu_dying(unsigned int cpu) {}
6512  
6513  static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6514  pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6515  {
6516  	return __pick_next_task(rq, prev, rf);
6517  }
6518  
6519  #endif /* CONFIG_SCHED_CORE */
6520  
6521  /*
6522   * Constants for the sched_mode argument of __schedule().
6523   *
6524   * The mode argument allows RT enabled kernels to differentiate a
6525   * preemption from blocking on an 'sleeping' spin/rwlock. Note that
6526   * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
6527   * optimize the AND operation out and just check for zero.
6528   */
6529  #define SM_NONE			0x0
6530  #define SM_PREEMPT		0x1
6531  #define SM_RTLOCK_WAIT		0x2
6532  
6533  #ifndef CONFIG_PREEMPT_RT
6534  # define SM_MASK_PREEMPT	(~0U)
6535  #else
6536  # define SM_MASK_PREEMPT	SM_PREEMPT
6537  #endif
6538  
6539  /*
6540   * __schedule() is the main scheduler function.
6541   *
6542   * The main means of driving the scheduler and thus entering this function are:
6543   *
6544   *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6545   *
6546   *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6547   *      paths. For example, see arch/x86/entry_64.S.
6548   *
6549   *      To drive preemption between tasks, the scheduler sets the flag in timer
6550   *      interrupt handler scheduler_tick().
6551   *
6552   *   3. Wakeups don't really cause entry into schedule(). They add a
6553   *      task to the run-queue and that's it.
6554   *
6555   *      Now, if the new task added to the run-queue preempts the current
6556   *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6557   *      called on the nearest possible occasion:
6558   *
6559   *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6560   *
6561   *         - in syscall or exception context, at the next outmost
6562   *           preempt_enable(). (this might be as soon as the wake_up()'s
6563   *           spin_unlock()!)
6564   *
6565   *         - in IRQ context, return from interrupt-handler to
6566   *           preemptible context
6567   *
6568   *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6569   *         then at the next:
6570   *
6571   *          - cond_resched() call
6572   *          - explicit schedule() call
6573   *          - return from syscall or exception to user-space
6574   *          - return from interrupt-handler to user-space
6575   *
6576   * WARNING: must be called with preemption disabled!
6577   */
__schedule(unsigned int sched_mode)6578  static void __sched notrace __schedule(unsigned int sched_mode)
6579  {
6580  	struct task_struct *prev, *next;
6581  	unsigned long *switch_count;
6582  	unsigned long prev_state;
6583  	struct rq_flags rf;
6584  	struct rq *rq;
6585  	int cpu;
6586  
6587  	cpu = smp_processor_id();
6588  	rq = cpu_rq(cpu);
6589  	prev = rq->curr;
6590  
6591  	schedule_debug(prev, !!sched_mode);
6592  
6593  	if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6594  		hrtick_clear(rq);
6595  
6596  	local_irq_disable();
6597  	rcu_note_context_switch(!!sched_mode);
6598  
6599  	/*
6600  	 * Make sure that signal_pending_state()->signal_pending() below
6601  	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6602  	 * done by the caller to avoid the race with signal_wake_up():
6603  	 *
6604  	 * __set_current_state(@state)		signal_wake_up()
6605  	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6606  	 *					  wake_up_state(p, state)
6607  	 *   LOCK rq->lock			    LOCK p->pi_state
6608  	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6609  	 *     if (signal_pending_state())	    if (p->state & @state)
6610  	 *
6611  	 * Also, the membarrier system call requires a full memory barrier
6612  	 * after coming from user-space, before storing to rq->curr.
6613  	 */
6614  	rq_lock(rq, &rf);
6615  	smp_mb__after_spinlock();
6616  
6617  	/* Promote REQ to ACT */
6618  	rq->clock_update_flags <<= 1;
6619  	update_rq_clock(rq);
6620  	rq->clock_update_flags = RQCF_UPDATED;
6621  
6622  	switch_count = &prev->nivcsw;
6623  
6624  	/*
6625  	 * We must load prev->state once (task_struct::state is volatile), such
6626  	 * that we form a control dependency vs deactivate_task() below.
6627  	 */
6628  	prev_state = READ_ONCE(prev->__state);
6629  	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
6630  		if (signal_pending_state(prev_state, prev)) {
6631  			WRITE_ONCE(prev->__state, TASK_RUNNING);
6632  		} else {
6633  			prev->sched_contributes_to_load =
6634  				(prev_state & TASK_UNINTERRUPTIBLE) &&
6635  				!(prev_state & TASK_NOLOAD) &&
6636  				!(prev_state & TASK_FROZEN);
6637  
6638  			if (prev->sched_contributes_to_load)
6639  				rq->nr_uninterruptible++;
6640  
6641  			/*
6642  			 * __schedule()			ttwu()
6643  			 *   prev_state = prev->state;    if (p->on_rq && ...)
6644  			 *   if (prev_state)		    goto out;
6645  			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6646  			 *				  p->state = TASK_WAKING
6647  			 *
6648  			 * Where __schedule() and ttwu() have matching control dependencies.
6649  			 *
6650  			 * After this, schedule() must not care about p->state any more.
6651  			 */
6652  			deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6653  
6654  			if (prev->in_iowait) {
6655  				atomic_inc(&rq->nr_iowait);
6656  				delayacct_blkio_start();
6657  			}
6658  		}
6659  		switch_count = &prev->nvcsw;
6660  	}
6661  
6662  	next = pick_next_task(rq, prev, &rf);
6663  	clear_tsk_need_resched(prev);
6664  	clear_preempt_need_resched();
6665  #ifdef CONFIG_SCHED_DEBUG
6666  	rq->last_seen_need_resched_ns = 0;
6667  #endif
6668  
6669  	if (likely(prev != next)) {
6670  		rq->nr_switches++;
6671  		/*
6672  		 * RCU users of rcu_dereference(rq->curr) may not see
6673  		 * changes to task_struct made by pick_next_task().
6674  		 */
6675  		RCU_INIT_POINTER(rq->curr, next);
6676  		/*
6677  		 * The membarrier system call requires each architecture
6678  		 * to have a full memory barrier after updating
6679  		 * rq->curr, before returning to user-space.
6680  		 *
6681  		 * Here are the schemes providing that barrier on the
6682  		 * various architectures:
6683  		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6684  		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6685  		 *   on PowerPC and on RISC-V.
6686  		 * - finish_lock_switch() for weakly-ordered
6687  		 *   architectures where spin_unlock is a full barrier,
6688  		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6689  		 *   is a RELEASE barrier),
6690  		 */
6691  		++*switch_count;
6692  
6693  		migrate_disable_switch(rq, prev);
6694  		psi_account_irqtime(rq, prev, next);
6695  		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6696  
6697  		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
6698  
6699  		/* Also unlocks the rq: */
6700  		rq = context_switch(rq, prev, next, &rf);
6701  	} else {
6702  		rq_unpin_lock(rq, &rf);
6703  		__balance_callbacks(rq);
6704  		raw_spin_rq_unlock_irq(rq);
6705  	}
6706  }
6707  
do_task_dead(void)6708  void __noreturn do_task_dead(void)
6709  {
6710  	/* Causes final put_task_struct in finish_task_switch(): */
6711  	set_special_state(TASK_DEAD);
6712  
6713  	/* Tell freezer to ignore us: */
6714  	current->flags |= PF_NOFREEZE;
6715  
6716  	__schedule(SM_NONE);
6717  	BUG();
6718  
6719  	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6720  	for (;;)
6721  		cpu_relax();
6722  }
6723  
sched_submit_work(struct task_struct * tsk)6724  static inline void sched_submit_work(struct task_struct *tsk)
6725  {
6726  	unsigned int task_flags;
6727  
6728  	if (task_is_running(tsk))
6729  		return;
6730  
6731  	task_flags = tsk->flags;
6732  	/*
6733  	 * If a worker goes to sleep, notify and ask workqueue whether it
6734  	 * wants to wake up a task to maintain concurrency.
6735  	 */
6736  	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6737  		if (task_flags & PF_WQ_WORKER)
6738  			wq_worker_sleeping(tsk);
6739  		else
6740  			io_wq_worker_sleeping(tsk);
6741  	}
6742  
6743  	/*
6744  	 * spinlock and rwlock must not flush block requests.  This will
6745  	 * deadlock if the callback attempts to acquire a lock which is
6746  	 * already acquired.
6747  	 */
6748  	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6749  
6750  	/*
6751  	 * If we are going to sleep and we have plugged IO queued,
6752  	 * make sure to submit it to avoid deadlocks.
6753  	 */
6754  	blk_flush_plug(tsk->plug, true);
6755  }
6756  
sched_update_worker(struct task_struct * tsk)6757  static void sched_update_worker(struct task_struct *tsk)
6758  {
6759  	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6760  		if (tsk->flags & PF_WQ_WORKER)
6761  			wq_worker_running(tsk);
6762  		else
6763  			io_wq_worker_running(tsk);
6764  	}
6765  }
6766  
schedule(void)6767  asmlinkage __visible void __sched schedule(void)
6768  {
6769  	struct task_struct *tsk = current;
6770  
6771  	sched_submit_work(tsk);
6772  	do {
6773  		preempt_disable();
6774  		__schedule(SM_NONE);
6775  		sched_preempt_enable_no_resched();
6776  	} while (need_resched());
6777  	sched_update_worker(tsk);
6778  }
6779  EXPORT_SYMBOL(schedule);
6780  
6781  /*
6782   * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6783   * state (have scheduled out non-voluntarily) by making sure that all
6784   * tasks have either left the run queue or have gone into user space.
6785   * As idle tasks do not do either, they must not ever be preempted
6786   * (schedule out non-voluntarily).
6787   *
6788   * schedule_idle() is similar to schedule_preempt_disable() except that it
6789   * never enables preemption because it does not call sched_submit_work().
6790   */
schedule_idle(void)6791  void __sched schedule_idle(void)
6792  {
6793  	/*
6794  	 * As this skips calling sched_submit_work(), which the idle task does
6795  	 * regardless because that function is a nop when the task is in a
6796  	 * TASK_RUNNING state, make sure this isn't used someplace that the
6797  	 * current task can be in any other state. Note, idle is always in the
6798  	 * TASK_RUNNING state.
6799  	 */
6800  	WARN_ON_ONCE(current->__state);
6801  	do {
6802  		__schedule(SM_NONE);
6803  	} while (need_resched());
6804  }
6805  
6806  #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6807  asmlinkage __visible void __sched schedule_user(void)
6808  {
6809  	/*
6810  	 * If we come here after a random call to set_need_resched(),
6811  	 * or we have been woken up remotely but the IPI has not yet arrived,
6812  	 * we haven't yet exited the RCU idle mode. Do it here manually until
6813  	 * we find a better solution.
6814  	 *
6815  	 * NB: There are buggy callers of this function.  Ideally we
6816  	 * should warn if prev_state != CONTEXT_USER, but that will trigger
6817  	 * too frequently to make sense yet.
6818  	 */
6819  	enum ctx_state prev_state = exception_enter();
6820  	schedule();
6821  	exception_exit(prev_state);
6822  }
6823  #endif
6824  
6825  /**
6826   * schedule_preempt_disabled - called with preemption disabled
6827   *
6828   * Returns with preemption disabled. Note: preempt_count must be 1
6829   */
schedule_preempt_disabled(void)6830  void __sched schedule_preempt_disabled(void)
6831  {
6832  	sched_preempt_enable_no_resched();
6833  	schedule();
6834  	preempt_disable();
6835  }
6836  
6837  #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6838  void __sched notrace schedule_rtlock(void)
6839  {
6840  	do {
6841  		preempt_disable();
6842  		__schedule(SM_RTLOCK_WAIT);
6843  		sched_preempt_enable_no_resched();
6844  	} while (need_resched());
6845  }
6846  NOKPROBE_SYMBOL(schedule_rtlock);
6847  #endif
6848  
preempt_schedule_common(void)6849  static void __sched notrace preempt_schedule_common(void)
6850  {
6851  	do {
6852  		/*
6853  		 * Because the function tracer can trace preempt_count_sub()
6854  		 * and it also uses preempt_enable/disable_notrace(), if
6855  		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6856  		 * by the function tracer will call this function again and
6857  		 * cause infinite recursion.
6858  		 *
6859  		 * Preemption must be disabled here before the function
6860  		 * tracer can trace. Break up preempt_disable() into two
6861  		 * calls. One to disable preemption without fear of being
6862  		 * traced. The other to still record the preemption latency,
6863  		 * which can also be traced by the function tracer.
6864  		 */
6865  		preempt_disable_notrace();
6866  		preempt_latency_start(1);
6867  		__schedule(SM_PREEMPT);
6868  		preempt_latency_stop(1);
6869  		preempt_enable_no_resched_notrace();
6870  
6871  		/*
6872  		 * Check again in case we missed a preemption opportunity
6873  		 * between schedule and now.
6874  		 */
6875  	} while (need_resched());
6876  }
6877  
6878  #ifdef CONFIG_PREEMPTION
6879  /*
6880   * This is the entry point to schedule() from in-kernel preemption
6881   * off of preempt_enable.
6882   */
preempt_schedule(void)6883  asmlinkage __visible void __sched notrace preempt_schedule(void)
6884  {
6885  	/*
6886  	 * If there is a non-zero preempt_count or interrupts are disabled,
6887  	 * we do not want to preempt the current task. Just return..
6888  	 */
6889  	if (likely(!preemptible()))
6890  		return;
6891  	preempt_schedule_common();
6892  }
6893  NOKPROBE_SYMBOL(preempt_schedule);
6894  EXPORT_SYMBOL(preempt_schedule);
6895  
6896  #ifdef CONFIG_PREEMPT_DYNAMIC
6897  #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6898  #ifndef preempt_schedule_dynamic_enabled
6899  #define preempt_schedule_dynamic_enabled	preempt_schedule
6900  #define preempt_schedule_dynamic_disabled	NULL
6901  #endif
6902  DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6903  EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6904  #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6905  static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6906  void __sched notrace dynamic_preempt_schedule(void)
6907  {
6908  	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6909  		return;
6910  	preempt_schedule();
6911  }
6912  NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6913  EXPORT_SYMBOL(dynamic_preempt_schedule);
6914  #endif
6915  #endif
6916  
6917  /**
6918   * preempt_schedule_notrace - preempt_schedule called by tracing
6919   *
6920   * The tracing infrastructure uses preempt_enable_notrace to prevent
6921   * recursion and tracing preempt enabling caused by the tracing
6922   * infrastructure itself. But as tracing can happen in areas coming
6923   * from userspace or just about to enter userspace, a preempt enable
6924   * can occur before user_exit() is called. This will cause the scheduler
6925   * to be called when the system is still in usermode.
6926   *
6927   * To prevent this, the preempt_enable_notrace will use this function
6928   * instead of preempt_schedule() to exit user context if needed before
6929   * calling the scheduler.
6930   */
preempt_schedule_notrace(void)6931  asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6932  {
6933  	enum ctx_state prev_ctx;
6934  
6935  	if (likely(!preemptible()))
6936  		return;
6937  
6938  	do {
6939  		/*
6940  		 * Because the function tracer can trace preempt_count_sub()
6941  		 * and it also uses preempt_enable/disable_notrace(), if
6942  		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6943  		 * by the function tracer will call this function again and
6944  		 * cause infinite recursion.
6945  		 *
6946  		 * Preemption must be disabled here before the function
6947  		 * tracer can trace. Break up preempt_disable() into two
6948  		 * calls. One to disable preemption without fear of being
6949  		 * traced. The other to still record the preemption latency,
6950  		 * which can also be traced by the function tracer.
6951  		 */
6952  		preempt_disable_notrace();
6953  		preempt_latency_start(1);
6954  		/*
6955  		 * Needs preempt disabled in case user_exit() is traced
6956  		 * and the tracer calls preempt_enable_notrace() causing
6957  		 * an infinite recursion.
6958  		 */
6959  		prev_ctx = exception_enter();
6960  		__schedule(SM_PREEMPT);
6961  		exception_exit(prev_ctx);
6962  
6963  		preempt_latency_stop(1);
6964  		preempt_enable_no_resched_notrace();
6965  	} while (need_resched());
6966  }
6967  EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6968  
6969  #ifdef CONFIG_PREEMPT_DYNAMIC
6970  #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6971  #ifndef preempt_schedule_notrace_dynamic_enabled
6972  #define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
6973  #define preempt_schedule_notrace_dynamic_disabled	NULL
6974  #endif
6975  DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6976  EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6977  #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6978  static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)6979  void __sched notrace dynamic_preempt_schedule_notrace(void)
6980  {
6981  	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6982  		return;
6983  	preempt_schedule_notrace();
6984  }
6985  NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6986  EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6987  #endif
6988  #endif
6989  
6990  #endif /* CONFIG_PREEMPTION */
6991  
6992  /*
6993   * This is the entry point to schedule() from kernel preemption
6994   * off of irq context.
6995   * Note, that this is called and return with irqs disabled. This will
6996   * protect us against recursive calling from irq.
6997   */
preempt_schedule_irq(void)6998  asmlinkage __visible void __sched preempt_schedule_irq(void)
6999  {
7000  	enum ctx_state prev_state;
7001  
7002  	/* Catch callers which need to be fixed */
7003  	BUG_ON(preempt_count() || !irqs_disabled());
7004  
7005  	prev_state = exception_enter();
7006  
7007  	do {
7008  		preempt_disable();
7009  		local_irq_enable();
7010  		__schedule(SM_PREEMPT);
7011  		local_irq_disable();
7012  		sched_preempt_enable_no_resched();
7013  	} while (need_resched());
7014  
7015  	exception_exit(prev_state);
7016  }
7017  
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7018  int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7019  			  void *key)
7020  {
7021  	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7022  	return try_to_wake_up(curr->private, mode, wake_flags);
7023  }
7024  EXPORT_SYMBOL(default_wake_function);
7025  
__setscheduler_prio(struct task_struct * p,int prio)7026  static void __setscheduler_prio(struct task_struct *p, int prio)
7027  {
7028  	if (dl_prio(prio))
7029  		p->sched_class = &dl_sched_class;
7030  	else if (rt_prio(prio))
7031  		p->sched_class = &rt_sched_class;
7032  	else
7033  		p->sched_class = &fair_sched_class;
7034  
7035  	p->prio = prio;
7036  }
7037  
7038  #ifdef CONFIG_RT_MUTEXES
7039  
__rt_effective_prio(struct task_struct * pi_task,int prio)7040  static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
7041  {
7042  	if (pi_task)
7043  		prio = min(prio, pi_task->prio);
7044  
7045  	return prio;
7046  }
7047  
rt_effective_prio(struct task_struct * p,int prio)7048  static inline int rt_effective_prio(struct task_struct *p, int prio)
7049  {
7050  	struct task_struct *pi_task = rt_mutex_get_top_task(p);
7051  
7052  	return __rt_effective_prio(pi_task, prio);
7053  }
7054  
7055  /*
7056   * rt_mutex_setprio - set the current priority of a task
7057   * @p: task to boost
7058   * @pi_task: donor task
7059   *
7060   * This function changes the 'effective' priority of a task. It does
7061   * not touch ->normal_prio like __setscheduler().
7062   *
7063   * Used by the rt_mutex code to implement priority inheritance
7064   * logic. Call site only calls if the priority of the task changed.
7065   */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7066  void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7067  {
7068  	int prio, oldprio, queued, running, queue_flag =
7069  		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7070  	const struct sched_class *prev_class;
7071  	struct rq_flags rf;
7072  	struct rq *rq;
7073  
7074  	/* XXX used to be waiter->prio, not waiter->task->prio */
7075  	prio = __rt_effective_prio(pi_task, p->normal_prio);
7076  
7077  	/*
7078  	 * If nothing changed; bail early.
7079  	 */
7080  	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7081  		return;
7082  
7083  	rq = __task_rq_lock(p, &rf);
7084  	update_rq_clock(rq);
7085  	/*
7086  	 * Set under pi_lock && rq->lock, such that the value can be used under
7087  	 * either lock.
7088  	 *
7089  	 * Note that there is loads of tricky to make this pointer cache work
7090  	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7091  	 * ensure a task is de-boosted (pi_task is set to NULL) before the
7092  	 * task is allowed to run again (and can exit). This ensures the pointer
7093  	 * points to a blocked task -- which guarantees the task is present.
7094  	 */
7095  	p->pi_top_task = pi_task;
7096  
7097  	/*
7098  	 * For FIFO/RR we only need to set prio, if that matches we're done.
7099  	 */
7100  	if (prio == p->prio && !dl_prio(prio))
7101  		goto out_unlock;
7102  
7103  	/*
7104  	 * Idle task boosting is a nono in general. There is one
7105  	 * exception, when PREEMPT_RT and NOHZ is active:
7106  	 *
7107  	 * The idle task calls get_next_timer_interrupt() and holds
7108  	 * the timer wheel base->lock on the CPU and another CPU wants
7109  	 * to access the timer (probably to cancel it). We can safely
7110  	 * ignore the boosting request, as the idle CPU runs this code
7111  	 * with interrupts disabled and will complete the lock
7112  	 * protected section without being interrupted. So there is no
7113  	 * real need to boost.
7114  	 */
7115  	if (unlikely(p == rq->idle)) {
7116  		WARN_ON(p != rq->curr);
7117  		WARN_ON(p->pi_blocked_on);
7118  		goto out_unlock;
7119  	}
7120  
7121  	trace_sched_pi_setprio(p, pi_task);
7122  	oldprio = p->prio;
7123  
7124  	if (oldprio == prio)
7125  		queue_flag &= ~DEQUEUE_MOVE;
7126  
7127  	prev_class = p->sched_class;
7128  	queued = task_on_rq_queued(p);
7129  	running = task_current(rq, p);
7130  	if (queued)
7131  		dequeue_task(rq, p, queue_flag);
7132  	if (running)
7133  		put_prev_task(rq, p);
7134  
7135  	/*
7136  	 * Boosting condition are:
7137  	 * 1. -rt task is running and holds mutex A
7138  	 *      --> -dl task blocks on mutex A
7139  	 *
7140  	 * 2. -dl task is running and holds mutex A
7141  	 *      --> -dl task blocks on mutex A and could preempt the
7142  	 *          running task
7143  	 */
7144  	if (dl_prio(prio)) {
7145  		if (!dl_prio(p->normal_prio) ||
7146  		    (pi_task && dl_prio(pi_task->prio) &&
7147  		     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7148  			p->dl.pi_se = pi_task->dl.pi_se;
7149  			queue_flag |= ENQUEUE_REPLENISH;
7150  		} else {
7151  			p->dl.pi_se = &p->dl;
7152  		}
7153  	} else if (rt_prio(prio)) {
7154  		if (dl_prio(oldprio))
7155  			p->dl.pi_se = &p->dl;
7156  		if (oldprio < prio)
7157  			queue_flag |= ENQUEUE_HEAD;
7158  	} else {
7159  		if (dl_prio(oldprio))
7160  			p->dl.pi_se = &p->dl;
7161  		if (rt_prio(oldprio))
7162  			p->rt.timeout = 0;
7163  	}
7164  
7165  	__setscheduler_prio(p, prio);
7166  
7167  	if (queued)
7168  		enqueue_task(rq, p, queue_flag);
7169  	if (running)
7170  		set_next_task(rq, p);
7171  
7172  	check_class_changed(rq, p, prev_class, oldprio);
7173  out_unlock:
7174  	/* Avoid rq from going away on us: */
7175  	preempt_disable();
7176  
7177  	rq_unpin_lock(rq, &rf);
7178  	__balance_callbacks(rq);
7179  	raw_spin_rq_unlock(rq);
7180  
7181  	preempt_enable();
7182  }
7183  #else
rt_effective_prio(struct task_struct * p,int prio)7184  static inline int rt_effective_prio(struct task_struct *p, int prio)
7185  {
7186  	return prio;
7187  }
7188  #endif
7189  
set_user_nice(struct task_struct * p,long nice)7190  void set_user_nice(struct task_struct *p, long nice)
7191  {
7192  	bool queued, running;
7193  	int old_prio;
7194  	struct rq_flags rf;
7195  	struct rq *rq;
7196  
7197  	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
7198  		return;
7199  	/*
7200  	 * We have to be careful, if called from sys_setpriority(),
7201  	 * the task might be in the middle of scheduling on another CPU.
7202  	 */
7203  	rq = task_rq_lock(p, &rf);
7204  	update_rq_clock(rq);
7205  
7206  	/*
7207  	 * The RT priorities are set via sched_setscheduler(), but we still
7208  	 * allow the 'normal' nice value to be set - but as expected
7209  	 * it won't have any effect on scheduling until the task is
7210  	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
7211  	 */
7212  	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
7213  		p->static_prio = NICE_TO_PRIO(nice);
7214  		goto out_unlock;
7215  	}
7216  	queued = task_on_rq_queued(p);
7217  	running = task_current(rq, p);
7218  	if (queued)
7219  		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
7220  	if (running)
7221  		put_prev_task(rq, p);
7222  
7223  	p->static_prio = NICE_TO_PRIO(nice);
7224  	set_load_weight(p, true);
7225  	old_prio = p->prio;
7226  	p->prio = effective_prio(p);
7227  
7228  	if (queued)
7229  		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7230  	if (running)
7231  		set_next_task(rq, p);
7232  
7233  	/*
7234  	 * If the task increased its priority or is running and
7235  	 * lowered its priority, then reschedule its CPU:
7236  	 */
7237  	p->sched_class->prio_changed(rq, p, old_prio);
7238  
7239  out_unlock:
7240  	task_rq_unlock(rq, p, &rf);
7241  }
7242  EXPORT_SYMBOL(set_user_nice);
7243  
7244  /*
7245   * is_nice_reduction - check if nice value is an actual reduction
7246   *
7247   * Similar to can_nice() but does not perform a capability check.
7248   *
7249   * @p: task
7250   * @nice: nice value
7251   */
is_nice_reduction(const struct task_struct * p,const int nice)7252  static bool is_nice_reduction(const struct task_struct *p, const int nice)
7253  {
7254  	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
7255  	int nice_rlim = nice_to_rlimit(nice);
7256  
7257  	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
7258  }
7259  
7260  /*
7261   * can_nice - check if a task can reduce its nice value
7262   * @p: task
7263   * @nice: nice value
7264   */
can_nice(const struct task_struct * p,const int nice)7265  int can_nice(const struct task_struct *p, const int nice)
7266  {
7267  	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
7268  }
7269  
7270  #ifdef __ARCH_WANT_SYS_NICE
7271  
7272  /*
7273   * sys_nice - change the priority of the current process.
7274   * @increment: priority increment
7275   *
7276   * sys_setpriority is a more generic, but much slower function that
7277   * does similar things.
7278   */
SYSCALL_DEFINE1(nice,int,increment)7279  SYSCALL_DEFINE1(nice, int, increment)
7280  {
7281  	long nice, retval;
7282  
7283  	/*
7284  	 * Setpriority might change our priority at the same moment.
7285  	 * We don't have to worry. Conceptually one call occurs first
7286  	 * and we have a single winner.
7287  	 */
7288  	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
7289  	nice = task_nice(current) + increment;
7290  
7291  	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
7292  	if (increment < 0 && !can_nice(current, nice))
7293  		return -EPERM;
7294  
7295  	retval = security_task_setnice(current, nice);
7296  	if (retval)
7297  		return retval;
7298  
7299  	set_user_nice(current, nice);
7300  	return 0;
7301  }
7302  
7303  #endif
7304  
7305  /**
7306   * task_prio - return the priority value of a given task.
7307   * @p: the task in question.
7308   *
7309   * Return: The priority value as seen by users in /proc.
7310   *
7311   * sched policy         return value   kernel prio    user prio/nice
7312   *
7313   * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
7314   * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
7315   * deadline                     -101             -1           0
7316   */
task_prio(const struct task_struct * p)7317  int task_prio(const struct task_struct *p)
7318  {
7319  	return p->prio - MAX_RT_PRIO;
7320  }
7321  
7322  /**
7323   * idle_cpu - is a given CPU idle currently?
7324   * @cpu: the processor in question.
7325   *
7326   * Return: 1 if the CPU is currently idle. 0 otherwise.
7327   */
idle_cpu(int cpu)7328  int idle_cpu(int cpu)
7329  {
7330  	struct rq *rq = cpu_rq(cpu);
7331  
7332  	if (rq->curr != rq->idle)
7333  		return 0;
7334  
7335  	if (rq->nr_running)
7336  		return 0;
7337  
7338  #ifdef CONFIG_SMP
7339  	if (rq->ttwu_pending)
7340  		return 0;
7341  #endif
7342  
7343  	return 1;
7344  }
7345  
7346  /**
7347   * available_idle_cpu - is a given CPU idle for enqueuing work.
7348   * @cpu: the CPU in question.
7349   *
7350   * Return: 1 if the CPU is currently idle. 0 otherwise.
7351   */
available_idle_cpu(int cpu)7352  int available_idle_cpu(int cpu)
7353  {
7354  	if (!idle_cpu(cpu))
7355  		return 0;
7356  
7357  	if (vcpu_is_preempted(cpu))
7358  		return 0;
7359  
7360  	return 1;
7361  }
7362  
7363  /**
7364   * idle_task - return the idle task for a given CPU.
7365   * @cpu: the processor in question.
7366   *
7367   * Return: The idle task for the CPU @cpu.
7368   */
idle_task(int cpu)7369  struct task_struct *idle_task(int cpu)
7370  {
7371  	return cpu_rq(cpu)->idle;
7372  }
7373  
7374  #ifdef CONFIG_SCHED_CORE
sched_core_idle_cpu(int cpu)7375  int sched_core_idle_cpu(int cpu)
7376  {
7377  	struct rq *rq = cpu_rq(cpu);
7378  
7379  	if (sched_core_enabled(rq) && rq->curr == rq->idle)
7380  		return 1;
7381  
7382  	return idle_cpu(cpu);
7383  }
7384  
7385  #endif
7386  
7387  #ifdef CONFIG_SMP
7388  /*
7389   * This function computes an effective utilization for the given CPU, to be
7390   * used for frequency selection given the linear relation: f = u * f_max.
7391   *
7392   * The scheduler tracks the following metrics:
7393   *
7394   *   cpu_util_{cfs,rt,dl,irq}()
7395   *   cpu_bw_dl()
7396   *
7397   * Where the cfs,rt and dl util numbers are tracked with the same metric and
7398   * synchronized windows and are thus directly comparable.
7399   *
7400   * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7401   * which excludes things like IRQ and steal-time. These latter are then accrued
7402   * in the irq utilization.
7403   *
7404   * The DL bandwidth number otoh is not a measured metric but a value computed
7405   * based on the task model parameters and gives the minimal utilization
7406   * required to meet deadlines.
7407   */
effective_cpu_util(int cpu,unsigned long util_cfs,enum cpu_util_type type,struct task_struct * p)7408  unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7409  				 enum cpu_util_type type,
7410  				 struct task_struct *p)
7411  {
7412  	unsigned long dl_util, util, irq, max;
7413  	struct rq *rq = cpu_rq(cpu);
7414  
7415  	max = arch_scale_cpu_capacity(cpu);
7416  
7417  	if (!uclamp_is_used() &&
7418  	    type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7419  		return max;
7420  	}
7421  
7422  	/*
7423  	 * Early check to see if IRQ/steal time saturates the CPU, can be
7424  	 * because of inaccuracies in how we track these -- see
7425  	 * update_irq_load_avg().
7426  	 */
7427  	irq = cpu_util_irq(rq);
7428  	if (unlikely(irq >= max))
7429  		return max;
7430  
7431  	/*
7432  	 * Because the time spend on RT/DL tasks is visible as 'lost' time to
7433  	 * CFS tasks and we use the same metric to track the effective
7434  	 * utilization (PELT windows are synchronized) we can directly add them
7435  	 * to obtain the CPU's actual utilization.
7436  	 *
7437  	 * CFS and RT utilization can be boosted or capped, depending on
7438  	 * utilization clamp constraints requested by currently RUNNABLE
7439  	 * tasks.
7440  	 * When there are no CFS RUNNABLE tasks, clamps are released and
7441  	 * frequency will be gracefully reduced with the utilization decay.
7442  	 */
7443  	util = util_cfs + cpu_util_rt(rq);
7444  	if (type == FREQUENCY_UTIL)
7445  		util = uclamp_rq_util_with(rq, util, p);
7446  
7447  	dl_util = cpu_util_dl(rq);
7448  
7449  	/*
7450  	 * For frequency selection we do not make cpu_util_dl() a permanent part
7451  	 * of this sum because we want to use cpu_bw_dl() later on, but we need
7452  	 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
7453  	 * that we select f_max when there is no idle time.
7454  	 *
7455  	 * NOTE: numerical errors or stop class might cause us to not quite hit
7456  	 * saturation when we should -- something for later.
7457  	 */
7458  	if (util + dl_util >= max)
7459  		return max;
7460  
7461  	/*
7462  	 * OTOH, for energy computation we need the estimated running time, so
7463  	 * include util_dl and ignore dl_bw.
7464  	 */
7465  	if (type == ENERGY_UTIL)
7466  		util += dl_util;
7467  
7468  	/*
7469  	 * There is still idle time; further improve the number by using the
7470  	 * irq metric. Because IRQ/steal time is hidden from the task clock we
7471  	 * need to scale the task numbers:
7472  	 *
7473  	 *              max - irq
7474  	 *   U' = irq + --------- * U
7475  	 *                 max
7476  	 */
7477  	util = scale_irq_capacity(util, irq, max);
7478  	util += irq;
7479  
7480  	/*
7481  	 * Bandwidth required by DEADLINE must always be granted while, for
7482  	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
7483  	 * to gracefully reduce the frequency when no tasks show up for longer
7484  	 * periods of time.
7485  	 *
7486  	 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
7487  	 * bw_dl as requested freq. However, cpufreq is not yet ready for such
7488  	 * an interface. So, we only do the latter for now.
7489  	 */
7490  	if (type == FREQUENCY_UTIL)
7491  		util += cpu_bw_dl(rq);
7492  
7493  	return min(max, util);
7494  }
7495  
sched_cpu_util(int cpu)7496  unsigned long sched_cpu_util(int cpu)
7497  {
7498  	return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL);
7499  }
7500  #endif /* CONFIG_SMP */
7501  
7502  /**
7503   * find_process_by_pid - find a process with a matching PID value.
7504   * @pid: the pid in question.
7505   *
7506   * The task of @pid, if found. %NULL otherwise.
7507   */
find_process_by_pid(pid_t pid)7508  static struct task_struct *find_process_by_pid(pid_t pid)
7509  {
7510  	return pid ? find_task_by_vpid(pid) : current;
7511  }
7512  
7513  /*
7514   * sched_setparam() passes in -1 for its policy, to let the functions
7515   * it calls know not to change it.
7516   */
7517  #define SETPARAM_POLICY	-1
7518  
__setscheduler_params(struct task_struct * p,const struct sched_attr * attr)7519  static void __setscheduler_params(struct task_struct *p,
7520  		const struct sched_attr *attr)
7521  {
7522  	int policy = attr->sched_policy;
7523  
7524  	if (policy == SETPARAM_POLICY)
7525  		policy = p->policy;
7526  
7527  	p->policy = policy;
7528  
7529  	if (dl_policy(policy))
7530  		__setparam_dl(p, attr);
7531  	else if (fair_policy(policy))
7532  		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
7533  
7534  	/* rt-policy tasks do not have a timerslack */
7535  	if (task_is_realtime(p)) {
7536  		p->timer_slack_ns = 0;
7537  	} else if (p->timer_slack_ns == 0) {
7538  		/* when switching back to non-rt policy, restore timerslack */
7539  		p->timer_slack_ns = p->default_timer_slack_ns;
7540  	}
7541  
7542  	/*
7543  	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
7544  	 * !rt_policy. Always setting this ensures that things like
7545  	 * getparam()/getattr() don't report silly values for !rt tasks.
7546  	 */
7547  	p->rt_priority = attr->sched_priority;
7548  	p->normal_prio = normal_prio(p);
7549  	set_load_weight(p, true);
7550  }
7551  
7552  /*
7553   * Check the target process has a UID that matches the current process's:
7554   */
check_same_owner(struct task_struct * p)7555  static bool check_same_owner(struct task_struct *p)
7556  {
7557  	const struct cred *cred = current_cred(), *pcred;
7558  	bool match;
7559  
7560  	rcu_read_lock();
7561  	pcred = __task_cred(p);
7562  	match = (uid_eq(cred->euid, pcred->euid) ||
7563  		 uid_eq(cred->euid, pcred->uid));
7564  	rcu_read_unlock();
7565  	return match;
7566  }
7567  
7568  /*
7569   * Allow unprivileged RT tasks to decrease priority.
7570   * Only issue a capable test if needed and only once to avoid an audit
7571   * event on permitted non-privileged operations:
7572   */
user_check_sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,int policy,int reset_on_fork)7573  static int user_check_sched_setscheduler(struct task_struct *p,
7574  					 const struct sched_attr *attr,
7575  					 int policy, int reset_on_fork)
7576  {
7577  	if (fair_policy(policy)) {
7578  		if (attr->sched_nice < task_nice(p) &&
7579  		    !is_nice_reduction(p, attr->sched_nice))
7580  			goto req_priv;
7581  	}
7582  
7583  	if (rt_policy(policy)) {
7584  		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
7585  
7586  		/* Can't set/change the rt policy: */
7587  		if (policy != p->policy && !rlim_rtprio)
7588  			goto req_priv;
7589  
7590  		/* Can't increase priority: */
7591  		if (attr->sched_priority > p->rt_priority &&
7592  		    attr->sched_priority > rlim_rtprio)
7593  			goto req_priv;
7594  	}
7595  
7596  	/*
7597  	 * Can't set/change SCHED_DEADLINE policy at all for now
7598  	 * (safest behavior); in the future we would like to allow
7599  	 * unprivileged DL tasks to increase their relative deadline
7600  	 * or reduce their runtime (both ways reducing utilization)
7601  	 */
7602  	if (dl_policy(policy))
7603  		goto req_priv;
7604  
7605  	/*
7606  	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7607  	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7608  	 */
7609  	if (task_has_idle_policy(p) && !idle_policy(policy)) {
7610  		if (!is_nice_reduction(p, task_nice(p)))
7611  			goto req_priv;
7612  	}
7613  
7614  	/* Can't change other user's priorities: */
7615  	if (!check_same_owner(p))
7616  		goto req_priv;
7617  
7618  	/* Normal users shall not reset the sched_reset_on_fork flag: */
7619  	if (p->sched_reset_on_fork && !reset_on_fork)
7620  		goto req_priv;
7621  
7622  	return 0;
7623  
7624  req_priv:
7625  	if (!capable(CAP_SYS_NICE))
7626  		return -EPERM;
7627  
7628  	return 0;
7629  }
7630  
__sched_setscheduler(struct task_struct * p,const struct sched_attr * attr,bool user,bool pi)7631  static int __sched_setscheduler(struct task_struct *p,
7632  				const struct sched_attr *attr,
7633  				bool user, bool pi)
7634  {
7635  	int oldpolicy = -1, policy = attr->sched_policy;
7636  	int retval, oldprio, newprio, queued, running;
7637  	const struct sched_class *prev_class;
7638  	struct balance_callback *head;
7639  	struct rq_flags rf;
7640  	int reset_on_fork;
7641  	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7642  	struct rq *rq;
7643  	bool cpuset_locked = false;
7644  
7645  	/* The pi code expects interrupts enabled */
7646  	BUG_ON(pi && in_interrupt());
7647  recheck:
7648  	/* Double check policy once rq lock held: */
7649  	if (policy < 0) {
7650  		reset_on_fork = p->sched_reset_on_fork;
7651  		policy = oldpolicy = p->policy;
7652  	} else {
7653  		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
7654  
7655  		if (!valid_policy(policy))
7656  			return -EINVAL;
7657  	}
7658  
7659  	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7660  		return -EINVAL;
7661  
7662  	/*
7663  	 * Valid priorities for SCHED_FIFO and SCHED_RR are
7664  	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
7665  	 * SCHED_BATCH and SCHED_IDLE is 0.
7666  	 */
7667  	if (attr->sched_priority > MAX_RT_PRIO-1)
7668  		return -EINVAL;
7669  	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7670  	    (rt_policy(policy) != (attr->sched_priority != 0)))
7671  		return -EINVAL;
7672  
7673  	if (user) {
7674  		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
7675  		if (retval)
7676  			return retval;
7677  
7678  		if (attr->sched_flags & SCHED_FLAG_SUGOV)
7679  			return -EINVAL;
7680  
7681  		retval = security_task_setscheduler(p);
7682  		if (retval)
7683  			return retval;
7684  	}
7685  
7686  	/* Update task specific "requested" clamps */
7687  	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
7688  		retval = uclamp_validate(p, attr);
7689  		if (retval)
7690  			return retval;
7691  	}
7692  
7693  	/*
7694  	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
7695  	 * information.
7696  	 */
7697  	if (dl_policy(policy) || dl_policy(p->policy)) {
7698  		cpuset_locked = true;
7699  		cpuset_lock();
7700  	}
7701  
7702  	/*
7703  	 * Make sure no PI-waiters arrive (or leave) while we are
7704  	 * changing the priority of the task:
7705  	 *
7706  	 * To be able to change p->policy safely, the appropriate
7707  	 * runqueue lock must be held.
7708  	 */
7709  	rq = task_rq_lock(p, &rf);
7710  	update_rq_clock(rq);
7711  
7712  	/*
7713  	 * Changing the policy of the stop threads its a very bad idea:
7714  	 */
7715  	if (p == rq->stop) {
7716  		retval = -EINVAL;
7717  		goto unlock;
7718  	}
7719  
7720  	/*
7721  	 * If not changing anything there's no need to proceed further,
7722  	 * but store a possible modification of reset_on_fork.
7723  	 */
7724  	if (unlikely(policy == p->policy)) {
7725  		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
7726  			goto change;
7727  		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
7728  			goto change;
7729  		if (dl_policy(policy) && dl_param_changed(p, attr))
7730  			goto change;
7731  		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7732  			goto change;
7733  
7734  		p->sched_reset_on_fork = reset_on_fork;
7735  		retval = 0;
7736  		goto unlock;
7737  	}
7738  change:
7739  
7740  	if (user) {
7741  #ifdef CONFIG_RT_GROUP_SCHED
7742  		/*
7743  		 * Do not allow realtime tasks into groups that have no runtime
7744  		 * assigned.
7745  		 */
7746  		if (rt_bandwidth_enabled() && rt_policy(policy) &&
7747  				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
7748  				!task_group_is_autogroup(task_group(p))) {
7749  			retval = -EPERM;
7750  			goto unlock;
7751  		}
7752  #endif
7753  #ifdef CONFIG_SMP
7754  		if (dl_bandwidth_enabled() && dl_policy(policy) &&
7755  				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
7756  			cpumask_t *span = rq->rd->span;
7757  
7758  			/*
7759  			 * Don't allow tasks with an affinity mask smaller than
7760  			 * the entire root_domain to become SCHED_DEADLINE. We
7761  			 * will also fail if there's no bandwidth available.
7762  			 */
7763  			if (!cpumask_subset(span, p->cpus_ptr) ||
7764  			    rq->rd->dl_bw.bw == 0) {
7765  				retval = -EPERM;
7766  				goto unlock;
7767  			}
7768  		}
7769  #endif
7770  	}
7771  
7772  	/* Re-check policy now with rq lock held: */
7773  	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
7774  		policy = oldpolicy = -1;
7775  		task_rq_unlock(rq, p, &rf);
7776  		if (cpuset_locked)
7777  			cpuset_unlock();
7778  		goto recheck;
7779  	}
7780  
7781  	/*
7782  	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
7783  	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
7784  	 * is available.
7785  	 */
7786  	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
7787  		retval = -EBUSY;
7788  		goto unlock;
7789  	}
7790  
7791  	p->sched_reset_on_fork = reset_on_fork;
7792  	oldprio = p->prio;
7793  
7794  	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
7795  	if (pi) {
7796  		/*
7797  		 * Take priority boosted tasks into account. If the new
7798  		 * effective priority is unchanged, we just store the new
7799  		 * normal parameters and do not touch the scheduler class and
7800  		 * the runqueue. This will be done when the task deboost
7801  		 * itself.
7802  		 */
7803  		newprio = rt_effective_prio(p, newprio);
7804  		if (newprio == oldprio)
7805  			queue_flags &= ~DEQUEUE_MOVE;
7806  	}
7807  
7808  	queued = task_on_rq_queued(p);
7809  	running = task_current(rq, p);
7810  	if (queued)
7811  		dequeue_task(rq, p, queue_flags);
7812  	if (running)
7813  		put_prev_task(rq, p);
7814  
7815  	prev_class = p->sched_class;
7816  
7817  	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
7818  		__setscheduler_params(p, attr);
7819  		__setscheduler_prio(p, newprio);
7820  	}
7821  	__setscheduler_uclamp(p, attr);
7822  
7823  	if (queued) {
7824  		/*
7825  		 * We enqueue to tail when the priority of a task is
7826  		 * increased (user space view).
7827  		 */
7828  		if (oldprio < p->prio)
7829  			queue_flags |= ENQUEUE_HEAD;
7830  
7831  		enqueue_task(rq, p, queue_flags);
7832  	}
7833  	if (running)
7834  		set_next_task(rq, p);
7835  
7836  	check_class_changed(rq, p, prev_class, oldprio);
7837  
7838  	/* Avoid rq from going away on us: */
7839  	preempt_disable();
7840  	head = splice_balance_callbacks(rq);
7841  	task_rq_unlock(rq, p, &rf);
7842  
7843  	if (pi) {
7844  		if (cpuset_locked)
7845  			cpuset_unlock();
7846  		rt_mutex_adjust_pi(p);
7847  	}
7848  
7849  	/* Run balance callbacks after we've adjusted the PI chain: */
7850  	balance_callbacks(rq, head);
7851  	preempt_enable();
7852  
7853  	return 0;
7854  
7855  unlock:
7856  	task_rq_unlock(rq, p, &rf);
7857  	if (cpuset_locked)
7858  		cpuset_unlock();
7859  	return retval;
7860  }
7861  
_sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param,bool check)7862  static int _sched_setscheduler(struct task_struct *p, int policy,
7863  			       const struct sched_param *param, bool check)
7864  {
7865  	struct sched_attr attr = {
7866  		.sched_policy   = policy,
7867  		.sched_priority = param->sched_priority,
7868  		.sched_nice	= PRIO_TO_NICE(p->static_prio),
7869  	};
7870  
7871  	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
7872  	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7873  		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
7874  		policy &= ~SCHED_RESET_ON_FORK;
7875  		attr.sched_policy = policy;
7876  	}
7877  
7878  	return __sched_setscheduler(p, &attr, check, true);
7879  }
7880  /**
7881   * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7882   * @p: the task in question.
7883   * @policy: new policy.
7884   * @param: structure containing the new RT priority.
7885   *
7886   * Use sched_set_fifo(), read its comment.
7887   *
7888   * Return: 0 on success. An error code otherwise.
7889   *
7890   * NOTE that the task may be already dead.
7891   */
sched_setscheduler(struct task_struct * p,int policy,const struct sched_param * param)7892  int sched_setscheduler(struct task_struct *p, int policy,
7893  		       const struct sched_param *param)
7894  {
7895  	return _sched_setscheduler(p, policy, param, true);
7896  }
7897  
sched_setattr(struct task_struct * p,const struct sched_attr * attr)7898  int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
7899  {
7900  	return __sched_setscheduler(p, attr, true, true);
7901  }
7902  
sched_setattr_nocheck(struct task_struct * p,const struct sched_attr * attr)7903  int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
7904  {
7905  	return __sched_setscheduler(p, attr, false, true);
7906  }
7907  EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
7908  
7909  /**
7910   * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
7911   * @p: the task in question.
7912   * @policy: new policy.
7913   * @param: structure containing the new RT priority.
7914   *
7915   * Just like sched_setscheduler, only don't bother checking if the
7916   * current context has permission.  For example, this is needed in
7917   * stop_machine(): we create temporary high priority worker threads,
7918   * but our caller might not have that capability.
7919   *
7920   * Return: 0 on success. An error code otherwise.
7921   */
sched_setscheduler_nocheck(struct task_struct * p,int policy,const struct sched_param * param)7922  int sched_setscheduler_nocheck(struct task_struct *p, int policy,
7923  			       const struct sched_param *param)
7924  {
7925  	return _sched_setscheduler(p, policy, param, false);
7926  }
7927  
7928  /*
7929   * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
7930   * incapable of resource management, which is the one thing an OS really should
7931   * be doing.
7932   *
7933   * This is of course the reason it is limited to privileged users only.
7934   *
7935   * Worse still; it is fundamentally impossible to compose static priority
7936   * workloads. You cannot take two correctly working static prio workloads
7937   * and smash them together and still expect them to work.
7938   *
7939   * For this reason 'all' FIFO tasks the kernel creates are basically at:
7940   *
7941   *   MAX_RT_PRIO / 2
7942   *
7943   * The administrator _MUST_ configure the system, the kernel simply doesn't
7944   * know enough information to make a sensible choice.
7945   */
sched_set_fifo(struct task_struct * p)7946  void sched_set_fifo(struct task_struct *p)
7947  {
7948  	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
7949  	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7950  }
7951  EXPORT_SYMBOL_GPL(sched_set_fifo);
7952  
7953  /*
7954   * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
7955   */
sched_set_fifo_low(struct task_struct * p)7956  void sched_set_fifo_low(struct task_struct *p)
7957  {
7958  	struct sched_param sp = { .sched_priority = 1 };
7959  	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
7960  }
7961  EXPORT_SYMBOL_GPL(sched_set_fifo_low);
7962  
sched_set_normal(struct task_struct * p,int nice)7963  void sched_set_normal(struct task_struct *p, int nice)
7964  {
7965  	struct sched_attr attr = {
7966  		.sched_policy = SCHED_NORMAL,
7967  		.sched_nice = nice,
7968  	};
7969  	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
7970  }
7971  EXPORT_SYMBOL_GPL(sched_set_normal);
7972  
7973  static int
do_sched_setscheduler(pid_t pid,int policy,struct sched_param __user * param)7974  do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
7975  {
7976  	struct sched_param lparam;
7977  	struct task_struct *p;
7978  	int retval;
7979  
7980  	if (!param || pid < 0)
7981  		return -EINVAL;
7982  	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
7983  		return -EFAULT;
7984  
7985  	rcu_read_lock();
7986  	retval = -ESRCH;
7987  	p = find_process_by_pid(pid);
7988  	if (likely(p))
7989  		get_task_struct(p);
7990  	rcu_read_unlock();
7991  
7992  	if (likely(p)) {
7993  		retval = sched_setscheduler(p, policy, &lparam);
7994  		put_task_struct(p);
7995  	}
7996  
7997  	return retval;
7998  }
7999  
8000  /*
8001   * Mimics kernel/events/core.c perf_copy_attr().
8002   */
sched_copy_attr(struct sched_attr __user * uattr,struct sched_attr * attr)8003  static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
8004  {
8005  	u32 size;
8006  	int ret;
8007  
8008  	/* Zero the full structure, so that a short copy will be nice: */
8009  	memset(attr, 0, sizeof(*attr));
8010  
8011  	ret = get_user(size, &uattr->size);
8012  	if (ret)
8013  		return ret;
8014  
8015  	/* ABI compatibility quirk: */
8016  	if (!size)
8017  		size = SCHED_ATTR_SIZE_VER0;
8018  	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
8019  		goto err_size;
8020  
8021  	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
8022  	if (ret) {
8023  		if (ret == -E2BIG)
8024  			goto err_size;
8025  		return ret;
8026  	}
8027  
8028  	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
8029  	    size < SCHED_ATTR_SIZE_VER1)
8030  		return -EINVAL;
8031  
8032  	/*
8033  	 * XXX: Do we want to be lenient like existing syscalls; or do we want
8034  	 * to be strict and return an error on out-of-bounds values?
8035  	 */
8036  	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
8037  
8038  	return 0;
8039  
8040  err_size:
8041  	put_user(sizeof(*attr), &uattr->size);
8042  	return -E2BIG;
8043  }
8044  
get_params(struct task_struct * p,struct sched_attr * attr)8045  static void get_params(struct task_struct *p, struct sched_attr *attr)
8046  {
8047  	if (task_has_dl_policy(p))
8048  		__getparam_dl(p, attr);
8049  	else if (task_has_rt_policy(p))
8050  		attr->sched_priority = p->rt_priority;
8051  	else
8052  		attr->sched_nice = task_nice(p);
8053  }
8054  
8055  /**
8056   * sys_sched_setscheduler - set/change the scheduler policy and RT priority
8057   * @pid: the pid in question.
8058   * @policy: new policy.
8059   * @param: structure containing the new RT priority.
8060   *
8061   * Return: 0 on success. An error code otherwise.
8062   */
SYSCALL_DEFINE3(sched_setscheduler,pid_t,pid,int,policy,struct sched_param __user *,param)8063  SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
8064  {
8065  	if (policy < 0)
8066  		return -EINVAL;
8067  
8068  	return do_sched_setscheduler(pid, policy, param);
8069  }
8070  
8071  /**
8072   * sys_sched_setparam - set/change the RT priority of a thread
8073   * @pid: the pid in question.
8074   * @param: structure containing the new RT priority.
8075   *
8076   * Return: 0 on success. An error code otherwise.
8077   */
SYSCALL_DEFINE2(sched_setparam,pid_t,pid,struct sched_param __user *,param)8078  SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
8079  {
8080  	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
8081  }
8082  
8083  /**
8084   * sys_sched_setattr - same as above, but with extended sched_attr
8085   * @pid: the pid in question.
8086   * @uattr: structure containing the extended parameters.
8087   * @flags: for future extension.
8088   */
SYSCALL_DEFINE3(sched_setattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,flags)8089  SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
8090  			       unsigned int, flags)
8091  {
8092  	struct sched_attr attr;
8093  	struct task_struct *p;
8094  	int retval;
8095  
8096  	if (!uattr || pid < 0 || flags)
8097  		return -EINVAL;
8098  
8099  	retval = sched_copy_attr(uattr, &attr);
8100  	if (retval)
8101  		return retval;
8102  
8103  	if ((int)attr.sched_policy < 0)
8104  		return -EINVAL;
8105  	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
8106  		attr.sched_policy = SETPARAM_POLICY;
8107  
8108  	rcu_read_lock();
8109  	retval = -ESRCH;
8110  	p = find_process_by_pid(pid);
8111  	if (likely(p))
8112  		get_task_struct(p);
8113  	rcu_read_unlock();
8114  
8115  	if (likely(p)) {
8116  		if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
8117  			get_params(p, &attr);
8118  		retval = sched_setattr(p, &attr);
8119  		put_task_struct(p);
8120  	}
8121  
8122  	return retval;
8123  }
8124  
8125  /**
8126   * sys_sched_getscheduler - get the policy (scheduling class) of a thread
8127   * @pid: the pid in question.
8128   *
8129   * Return: On success, the policy of the thread. Otherwise, a negative error
8130   * code.
8131   */
SYSCALL_DEFINE1(sched_getscheduler,pid_t,pid)8132  SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
8133  {
8134  	struct task_struct *p;
8135  	int retval;
8136  
8137  	if (pid < 0)
8138  		return -EINVAL;
8139  
8140  	retval = -ESRCH;
8141  	rcu_read_lock();
8142  	p = find_process_by_pid(pid);
8143  	if (p) {
8144  		retval = security_task_getscheduler(p);
8145  		if (!retval)
8146  			retval = p->policy
8147  				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
8148  	}
8149  	rcu_read_unlock();
8150  	return retval;
8151  }
8152  
8153  /**
8154   * sys_sched_getparam - get the RT priority of a thread
8155   * @pid: the pid in question.
8156   * @param: structure containing the RT priority.
8157   *
8158   * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
8159   * code.
8160   */
SYSCALL_DEFINE2(sched_getparam,pid_t,pid,struct sched_param __user *,param)8161  SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
8162  {
8163  	struct sched_param lp = { .sched_priority = 0 };
8164  	struct task_struct *p;
8165  	int retval;
8166  
8167  	if (!param || pid < 0)
8168  		return -EINVAL;
8169  
8170  	rcu_read_lock();
8171  	p = find_process_by_pid(pid);
8172  	retval = -ESRCH;
8173  	if (!p)
8174  		goto out_unlock;
8175  
8176  	retval = security_task_getscheduler(p);
8177  	if (retval)
8178  		goto out_unlock;
8179  
8180  	if (task_has_rt_policy(p))
8181  		lp.sched_priority = p->rt_priority;
8182  	rcu_read_unlock();
8183  
8184  	/*
8185  	 * This one might sleep, we cannot do it with a spinlock held ...
8186  	 */
8187  	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
8188  
8189  	return retval;
8190  
8191  out_unlock:
8192  	rcu_read_unlock();
8193  	return retval;
8194  }
8195  
8196  /*
8197   * Copy the kernel size attribute structure (which might be larger
8198   * than what user-space knows about) to user-space.
8199   *
8200   * Note that all cases are valid: user-space buffer can be larger or
8201   * smaller than the kernel-space buffer. The usual case is that both
8202   * have the same size.
8203   */
8204  static int
sched_attr_copy_to_user(struct sched_attr __user * uattr,struct sched_attr * kattr,unsigned int usize)8205  sched_attr_copy_to_user(struct sched_attr __user *uattr,
8206  			struct sched_attr *kattr,
8207  			unsigned int usize)
8208  {
8209  	unsigned int ksize = sizeof(*kattr);
8210  
8211  	if (!access_ok(uattr, usize))
8212  		return -EFAULT;
8213  
8214  	/*
8215  	 * sched_getattr() ABI forwards and backwards compatibility:
8216  	 *
8217  	 * If usize == ksize then we just copy everything to user-space and all is good.
8218  	 *
8219  	 * If usize < ksize then we only copy as much as user-space has space for,
8220  	 * this keeps ABI compatibility as well. We skip the rest.
8221  	 *
8222  	 * If usize > ksize then user-space is using a newer version of the ABI,
8223  	 * which part the kernel doesn't know about. Just ignore it - tooling can
8224  	 * detect the kernel's knowledge of attributes from the attr->size value
8225  	 * which is set to ksize in this case.
8226  	 */
8227  	kattr->size = min(usize, ksize);
8228  
8229  	if (copy_to_user(uattr, kattr, kattr->size))
8230  		return -EFAULT;
8231  
8232  	return 0;
8233  }
8234  
8235  /**
8236   * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8237   * @pid: the pid in question.
8238   * @uattr: structure containing the extended parameters.
8239   * @usize: sizeof(attr) for fwd/bwd comp.
8240   * @flags: for future extension.
8241   */
SYSCALL_DEFINE4(sched_getattr,pid_t,pid,struct sched_attr __user *,uattr,unsigned int,usize,unsigned int,flags)8242  SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
8243  		unsigned int, usize, unsigned int, flags)
8244  {
8245  	struct sched_attr kattr = { };
8246  	struct task_struct *p;
8247  	int retval;
8248  
8249  	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
8250  	    usize < SCHED_ATTR_SIZE_VER0 || flags)
8251  		return -EINVAL;
8252  
8253  	rcu_read_lock();
8254  	p = find_process_by_pid(pid);
8255  	retval = -ESRCH;
8256  	if (!p)
8257  		goto out_unlock;
8258  
8259  	retval = security_task_getscheduler(p);
8260  	if (retval)
8261  		goto out_unlock;
8262  
8263  	kattr.sched_policy = p->policy;
8264  	if (p->sched_reset_on_fork)
8265  		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
8266  	get_params(p, &kattr);
8267  	kattr.sched_flags &= SCHED_FLAG_ALL;
8268  
8269  #ifdef CONFIG_UCLAMP_TASK
8270  	/*
8271  	 * This could race with another potential updater, but this is fine
8272  	 * because it'll correctly read the old or the new value. We don't need
8273  	 * to guarantee who wins the race as long as it doesn't return garbage.
8274  	 */
8275  	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
8276  	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
8277  #endif
8278  
8279  	rcu_read_unlock();
8280  
8281  	return sched_attr_copy_to_user(uattr, &kattr, usize);
8282  
8283  out_unlock:
8284  	rcu_read_unlock();
8285  	return retval;
8286  }
8287  
8288  #ifdef CONFIG_SMP
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)8289  int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
8290  {
8291  	int ret = 0;
8292  
8293  	/*
8294  	 * If the task isn't a deadline task or admission control is
8295  	 * disabled then we don't care about affinity changes.
8296  	 */
8297  	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
8298  		return 0;
8299  
8300  	/*
8301  	 * Since bandwidth control happens on root_domain basis,
8302  	 * if admission test is enabled, we only admit -deadline
8303  	 * tasks allowed to run on all the CPUs in the task's
8304  	 * root_domain.
8305  	 */
8306  	rcu_read_lock();
8307  	if (!cpumask_subset(task_rq(p)->rd->span, mask))
8308  		ret = -EBUSY;
8309  	rcu_read_unlock();
8310  	return ret;
8311  }
8312  #endif
8313  
8314  static int
__sched_setaffinity(struct task_struct * p,struct affinity_context * ctx)8315  __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
8316  {
8317  	int retval;
8318  	cpumask_var_t cpus_allowed, new_mask;
8319  
8320  	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
8321  		return -ENOMEM;
8322  
8323  	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
8324  		retval = -ENOMEM;
8325  		goto out_free_cpus_allowed;
8326  	}
8327  
8328  	cpuset_cpus_allowed(p, cpus_allowed);
8329  	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
8330  
8331  	ctx->new_mask = new_mask;
8332  	ctx->flags |= SCA_CHECK;
8333  
8334  	retval = dl_task_check_affinity(p, new_mask);
8335  	if (retval)
8336  		goto out_free_new_mask;
8337  
8338  	retval = __set_cpus_allowed_ptr(p, ctx);
8339  	if (retval)
8340  		goto out_free_new_mask;
8341  
8342  	cpuset_cpus_allowed(p, cpus_allowed);
8343  	if (!cpumask_subset(new_mask, cpus_allowed)) {
8344  		/*
8345  		 * We must have raced with a concurrent cpuset update.
8346  		 * Just reset the cpumask to the cpuset's cpus_allowed.
8347  		 */
8348  		cpumask_copy(new_mask, cpus_allowed);
8349  
8350  		/*
8351  		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
8352  		 * will restore the previous user_cpus_ptr value.
8353  		 *
8354  		 * In the unlikely event a previous user_cpus_ptr exists,
8355  		 * we need to further restrict the mask to what is allowed
8356  		 * by that old user_cpus_ptr.
8357  		 */
8358  		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
8359  			bool empty = !cpumask_and(new_mask, new_mask,
8360  						  ctx->user_mask);
8361  
8362  			if (WARN_ON_ONCE(empty))
8363  				cpumask_copy(new_mask, cpus_allowed);
8364  		}
8365  		__set_cpus_allowed_ptr(p, ctx);
8366  		retval = -EINVAL;
8367  	}
8368  
8369  out_free_new_mask:
8370  	free_cpumask_var(new_mask);
8371  out_free_cpus_allowed:
8372  	free_cpumask_var(cpus_allowed);
8373  	return retval;
8374  }
8375  
sched_setaffinity(pid_t pid,const struct cpumask * in_mask)8376  long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
8377  {
8378  	struct affinity_context ac;
8379  	struct cpumask *user_mask;
8380  	struct task_struct *p;
8381  	int retval;
8382  
8383  	rcu_read_lock();
8384  
8385  	p = find_process_by_pid(pid);
8386  	if (!p) {
8387  		rcu_read_unlock();
8388  		return -ESRCH;
8389  	}
8390  
8391  	/* Prevent p going away */
8392  	get_task_struct(p);
8393  	rcu_read_unlock();
8394  
8395  	if (p->flags & PF_NO_SETAFFINITY) {
8396  		retval = -EINVAL;
8397  		goto out_put_task;
8398  	}
8399  
8400  	if (!check_same_owner(p)) {
8401  		rcu_read_lock();
8402  		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
8403  			rcu_read_unlock();
8404  			retval = -EPERM;
8405  			goto out_put_task;
8406  		}
8407  		rcu_read_unlock();
8408  	}
8409  
8410  	retval = security_task_setscheduler(p);
8411  	if (retval)
8412  		goto out_put_task;
8413  
8414  	/*
8415  	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
8416  	 * alloc_user_cpus_ptr() returns NULL.
8417  	 */
8418  	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
8419  	if (user_mask) {
8420  		cpumask_copy(user_mask, in_mask);
8421  	} else if (IS_ENABLED(CONFIG_SMP)) {
8422  		retval = -ENOMEM;
8423  		goto out_put_task;
8424  	}
8425  
8426  	ac = (struct affinity_context){
8427  		.new_mask  = in_mask,
8428  		.user_mask = user_mask,
8429  		.flags     = SCA_USER,
8430  	};
8431  
8432  	retval = __sched_setaffinity(p, &ac);
8433  	kfree(ac.user_mask);
8434  
8435  out_put_task:
8436  	put_task_struct(p);
8437  	return retval;
8438  }
8439  
get_user_cpu_mask(unsigned long __user * user_mask_ptr,unsigned len,struct cpumask * new_mask)8440  static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
8441  			     struct cpumask *new_mask)
8442  {
8443  	if (len < cpumask_size())
8444  		cpumask_clear(new_mask);
8445  	else if (len > cpumask_size())
8446  		len = cpumask_size();
8447  
8448  	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
8449  }
8450  
8451  /**
8452   * sys_sched_setaffinity - set the CPU affinity of a process
8453   * @pid: pid of the process
8454   * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8455   * @user_mask_ptr: user-space pointer to the new CPU mask
8456   *
8457   * Return: 0 on success. An error code otherwise.
8458   */
SYSCALL_DEFINE3(sched_setaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)8459  SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
8460  		unsigned long __user *, user_mask_ptr)
8461  {
8462  	cpumask_var_t new_mask;
8463  	int retval;
8464  
8465  	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
8466  		return -ENOMEM;
8467  
8468  	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
8469  	if (retval == 0)
8470  		retval = sched_setaffinity(pid, new_mask);
8471  	free_cpumask_var(new_mask);
8472  	return retval;
8473  }
8474  
sched_getaffinity(pid_t pid,struct cpumask * mask)8475  long sched_getaffinity(pid_t pid, struct cpumask *mask)
8476  {
8477  	struct task_struct *p;
8478  	unsigned long flags;
8479  	int retval;
8480  
8481  	rcu_read_lock();
8482  
8483  	retval = -ESRCH;
8484  	p = find_process_by_pid(pid);
8485  	if (!p)
8486  		goto out_unlock;
8487  
8488  	retval = security_task_getscheduler(p);
8489  	if (retval)
8490  		goto out_unlock;
8491  
8492  	raw_spin_lock_irqsave(&p->pi_lock, flags);
8493  	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
8494  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
8495  
8496  out_unlock:
8497  	rcu_read_unlock();
8498  
8499  	return retval;
8500  }
8501  
8502  /**
8503   * sys_sched_getaffinity - get the CPU affinity of a process
8504   * @pid: pid of the process
8505   * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8506   * @user_mask_ptr: user-space pointer to hold the current CPU mask
8507   *
8508   * Return: size of CPU mask copied to user_mask_ptr on success. An
8509   * error code otherwise.
8510   */
SYSCALL_DEFINE3(sched_getaffinity,pid_t,pid,unsigned int,len,unsigned long __user *,user_mask_ptr)8511  SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
8512  		unsigned long __user *, user_mask_ptr)
8513  {
8514  	int ret;
8515  	cpumask_var_t mask;
8516  
8517  	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
8518  		return -EINVAL;
8519  	if (len & (sizeof(unsigned long)-1))
8520  		return -EINVAL;
8521  
8522  	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
8523  		return -ENOMEM;
8524  
8525  	ret = sched_getaffinity(pid, mask);
8526  	if (ret == 0) {
8527  		unsigned int retlen = min(len, cpumask_size());
8528  
8529  		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
8530  			ret = -EFAULT;
8531  		else
8532  			ret = retlen;
8533  	}
8534  	free_cpumask_var(mask);
8535  
8536  	return ret;
8537  }
8538  
do_sched_yield(void)8539  static void do_sched_yield(void)
8540  {
8541  	struct rq_flags rf;
8542  	struct rq *rq;
8543  
8544  	rq = this_rq_lock_irq(&rf);
8545  
8546  	schedstat_inc(rq->yld_count);
8547  	current->sched_class->yield_task(rq);
8548  
8549  	preempt_disable();
8550  	rq_unlock_irq(rq, &rf);
8551  	sched_preempt_enable_no_resched();
8552  
8553  	schedule();
8554  }
8555  
8556  /**
8557   * sys_sched_yield - yield the current processor to other threads.
8558   *
8559   * This function yields the current CPU to other tasks. If there are no
8560   * other threads running on this CPU then this function will return.
8561   *
8562   * Return: 0.
8563   */
SYSCALL_DEFINE0(sched_yield)8564  SYSCALL_DEFINE0(sched_yield)
8565  {
8566  	do_sched_yield();
8567  	return 0;
8568  }
8569  
8570  #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)8571  int __sched __cond_resched(void)
8572  {
8573  	if (should_resched(0) && !irqs_disabled()) {
8574  		preempt_schedule_common();
8575  		return 1;
8576  	}
8577  	/*
8578  	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
8579  	 * whether the current CPU is in an RCU read-side critical section,
8580  	 * so the tick can report quiescent states even for CPUs looping
8581  	 * in kernel context.  In contrast, in non-preemptible kernels,
8582  	 * RCU readers leave no in-memory hints, which means that CPU-bound
8583  	 * processes executing in kernel context might never report an
8584  	 * RCU quiescent state.  Therefore, the following code causes
8585  	 * cond_resched() to report a quiescent state, but only when RCU
8586  	 * is in urgent need of one.
8587  	 */
8588  #ifndef CONFIG_PREEMPT_RCU
8589  	rcu_all_qs();
8590  #endif
8591  	return 0;
8592  }
8593  EXPORT_SYMBOL(__cond_resched);
8594  #endif
8595  
8596  #ifdef CONFIG_PREEMPT_DYNAMIC
8597  #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8598  #define cond_resched_dynamic_enabled	__cond_resched
8599  #define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
8600  DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
8601  EXPORT_STATIC_CALL_TRAMP(cond_resched);
8602  
8603  #define might_resched_dynamic_enabled	__cond_resched
8604  #define might_resched_dynamic_disabled	((void *)&__static_call_return0)
8605  DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
8606  EXPORT_STATIC_CALL_TRAMP(might_resched);
8607  #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8608  static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)8609  int __sched dynamic_cond_resched(void)
8610  {
8611  	klp_sched_try_switch();
8612  	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
8613  		return 0;
8614  	return __cond_resched();
8615  }
8616  EXPORT_SYMBOL(dynamic_cond_resched);
8617  
8618  static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)8619  int __sched dynamic_might_resched(void)
8620  {
8621  	if (!static_branch_unlikely(&sk_dynamic_might_resched))
8622  		return 0;
8623  	return __cond_resched();
8624  }
8625  EXPORT_SYMBOL(dynamic_might_resched);
8626  #endif
8627  #endif
8628  
8629  /*
8630   * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8631   * call schedule, and on return reacquire the lock.
8632   *
8633   * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8634   * operations here to prevent schedule() from being called twice (once via
8635   * spin_unlock(), once by hand).
8636   */
__cond_resched_lock(spinlock_t * lock)8637  int __cond_resched_lock(spinlock_t *lock)
8638  {
8639  	int resched = should_resched(PREEMPT_LOCK_OFFSET);
8640  	int ret = 0;
8641  
8642  	lockdep_assert_held(lock);
8643  
8644  	if (spin_needbreak(lock) || resched) {
8645  		spin_unlock(lock);
8646  		if (!_cond_resched())
8647  			cpu_relax();
8648  		ret = 1;
8649  		spin_lock(lock);
8650  	}
8651  	return ret;
8652  }
8653  EXPORT_SYMBOL(__cond_resched_lock);
8654  
__cond_resched_rwlock_read(rwlock_t * lock)8655  int __cond_resched_rwlock_read(rwlock_t *lock)
8656  {
8657  	int resched = should_resched(PREEMPT_LOCK_OFFSET);
8658  	int ret = 0;
8659  
8660  	lockdep_assert_held_read(lock);
8661  
8662  	if (rwlock_needbreak(lock) || resched) {
8663  		read_unlock(lock);
8664  		if (!_cond_resched())
8665  			cpu_relax();
8666  		ret = 1;
8667  		read_lock(lock);
8668  	}
8669  	return ret;
8670  }
8671  EXPORT_SYMBOL(__cond_resched_rwlock_read);
8672  
__cond_resched_rwlock_write(rwlock_t * lock)8673  int __cond_resched_rwlock_write(rwlock_t *lock)
8674  {
8675  	int resched = should_resched(PREEMPT_LOCK_OFFSET);
8676  	int ret = 0;
8677  
8678  	lockdep_assert_held_write(lock);
8679  
8680  	if (rwlock_needbreak(lock) || resched) {
8681  		write_unlock(lock);
8682  		if (!_cond_resched())
8683  			cpu_relax();
8684  		ret = 1;
8685  		write_lock(lock);
8686  	}
8687  	return ret;
8688  }
8689  EXPORT_SYMBOL(__cond_resched_rwlock_write);
8690  
8691  #ifdef CONFIG_PREEMPT_DYNAMIC
8692  
8693  #ifdef CONFIG_GENERIC_ENTRY
8694  #include <linux/entry-common.h>
8695  #endif
8696  
8697  /*
8698   * SC:cond_resched
8699   * SC:might_resched
8700   * SC:preempt_schedule
8701   * SC:preempt_schedule_notrace
8702   * SC:irqentry_exit_cond_resched
8703   *
8704   *
8705   * NONE:
8706   *   cond_resched               <- __cond_resched
8707   *   might_resched              <- RET0
8708   *   preempt_schedule           <- NOP
8709   *   preempt_schedule_notrace   <- NOP
8710   *   irqentry_exit_cond_resched <- NOP
8711   *
8712   * VOLUNTARY:
8713   *   cond_resched               <- __cond_resched
8714   *   might_resched              <- __cond_resched
8715   *   preempt_schedule           <- NOP
8716   *   preempt_schedule_notrace   <- NOP
8717   *   irqentry_exit_cond_resched <- NOP
8718   *
8719   * FULL:
8720   *   cond_resched               <- RET0
8721   *   might_resched              <- RET0
8722   *   preempt_schedule           <- preempt_schedule
8723   *   preempt_schedule_notrace   <- preempt_schedule_notrace
8724   *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
8725   */
8726  
8727  enum {
8728  	preempt_dynamic_undefined = -1,
8729  	preempt_dynamic_none,
8730  	preempt_dynamic_voluntary,
8731  	preempt_dynamic_full,
8732  };
8733  
8734  int preempt_dynamic_mode = preempt_dynamic_undefined;
8735  
sched_dynamic_mode(const char * str)8736  int sched_dynamic_mode(const char *str)
8737  {
8738  	if (!strcmp(str, "none"))
8739  		return preempt_dynamic_none;
8740  
8741  	if (!strcmp(str, "voluntary"))
8742  		return preempt_dynamic_voluntary;
8743  
8744  	if (!strcmp(str, "full"))
8745  		return preempt_dynamic_full;
8746  
8747  	return -EINVAL;
8748  }
8749  
8750  #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8751  #define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
8752  #define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
8753  #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8754  #define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
8755  #define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
8756  #else
8757  #error "Unsupported PREEMPT_DYNAMIC mechanism"
8758  #endif
8759  
8760  static DEFINE_MUTEX(sched_dynamic_mutex);
8761  static bool klp_override;
8762  
__sched_dynamic_update(int mode)8763  static void __sched_dynamic_update(int mode)
8764  {
8765  	/*
8766  	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
8767  	 * the ZERO state, which is invalid.
8768  	 */
8769  	if (!klp_override)
8770  		preempt_dynamic_enable(cond_resched);
8771  	preempt_dynamic_enable(might_resched);
8772  	preempt_dynamic_enable(preempt_schedule);
8773  	preempt_dynamic_enable(preempt_schedule_notrace);
8774  	preempt_dynamic_enable(irqentry_exit_cond_resched);
8775  
8776  	switch (mode) {
8777  	case preempt_dynamic_none:
8778  		if (!klp_override)
8779  			preempt_dynamic_enable(cond_resched);
8780  		preempt_dynamic_disable(might_resched);
8781  		preempt_dynamic_disable(preempt_schedule);
8782  		preempt_dynamic_disable(preempt_schedule_notrace);
8783  		preempt_dynamic_disable(irqentry_exit_cond_resched);
8784  		if (mode != preempt_dynamic_mode)
8785  			pr_info("Dynamic Preempt: none\n");
8786  		break;
8787  
8788  	case preempt_dynamic_voluntary:
8789  		if (!klp_override)
8790  			preempt_dynamic_enable(cond_resched);
8791  		preempt_dynamic_enable(might_resched);
8792  		preempt_dynamic_disable(preempt_schedule);
8793  		preempt_dynamic_disable(preempt_schedule_notrace);
8794  		preempt_dynamic_disable(irqentry_exit_cond_resched);
8795  		if (mode != preempt_dynamic_mode)
8796  			pr_info("Dynamic Preempt: voluntary\n");
8797  		break;
8798  
8799  	case preempt_dynamic_full:
8800  		if (!klp_override)
8801  			preempt_dynamic_disable(cond_resched);
8802  		preempt_dynamic_disable(might_resched);
8803  		preempt_dynamic_enable(preempt_schedule);
8804  		preempt_dynamic_enable(preempt_schedule_notrace);
8805  		preempt_dynamic_enable(irqentry_exit_cond_resched);
8806  		if (mode != preempt_dynamic_mode)
8807  			pr_info("Dynamic Preempt: full\n");
8808  		break;
8809  	}
8810  
8811  	preempt_dynamic_mode = mode;
8812  }
8813  
sched_dynamic_update(int mode)8814  void sched_dynamic_update(int mode)
8815  {
8816  	mutex_lock(&sched_dynamic_mutex);
8817  	__sched_dynamic_update(mode);
8818  	mutex_unlock(&sched_dynamic_mutex);
8819  }
8820  
8821  #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
8822  
klp_cond_resched(void)8823  static int klp_cond_resched(void)
8824  {
8825  	__klp_sched_try_switch();
8826  	return __cond_resched();
8827  }
8828  
sched_dynamic_klp_enable(void)8829  void sched_dynamic_klp_enable(void)
8830  {
8831  	mutex_lock(&sched_dynamic_mutex);
8832  
8833  	klp_override = true;
8834  	static_call_update(cond_resched, klp_cond_resched);
8835  
8836  	mutex_unlock(&sched_dynamic_mutex);
8837  }
8838  
sched_dynamic_klp_disable(void)8839  void sched_dynamic_klp_disable(void)
8840  {
8841  	mutex_lock(&sched_dynamic_mutex);
8842  
8843  	klp_override = false;
8844  	__sched_dynamic_update(preempt_dynamic_mode);
8845  
8846  	mutex_unlock(&sched_dynamic_mutex);
8847  }
8848  
8849  #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
8850  
setup_preempt_mode(char * str)8851  static int __init setup_preempt_mode(char *str)
8852  {
8853  	int mode = sched_dynamic_mode(str);
8854  	if (mode < 0) {
8855  		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
8856  		return 0;
8857  	}
8858  
8859  	sched_dynamic_update(mode);
8860  	return 1;
8861  }
8862  __setup("preempt=", setup_preempt_mode);
8863  
preempt_dynamic_init(void)8864  static void __init preempt_dynamic_init(void)
8865  {
8866  	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
8867  		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
8868  			sched_dynamic_update(preempt_dynamic_none);
8869  		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
8870  			sched_dynamic_update(preempt_dynamic_voluntary);
8871  		} else {
8872  			/* Default static call setting, nothing to do */
8873  			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
8874  			preempt_dynamic_mode = preempt_dynamic_full;
8875  			pr_info("Dynamic Preempt: full\n");
8876  		}
8877  	}
8878  }
8879  
8880  #define PREEMPT_MODEL_ACCESSOR(mode) \
8881  	bool preempt_model_##mode(void)						 \
8882  	{									 \
8883  		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8884  		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
8885  	}									 \
8886  	EXPORT_SYMBOL_GPL(preempt_model_##mode)
8887  
8888  PREEMPT_MODEL_ACCESSOR(none);
8889  PREEMPT_MODEL_ACCESSOR(voluntary);
8890  PREEMPT_MODEL_ACCESSOR(full);
8891  
8892  #else /* !CONFIG_PREEMPT_DYNAMIC */
8893  
preempt_dynamic_init(void)8894  static inline void preempt_dynamic_init(void) { }
8895  
8896  #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
8897  
8898  /**
8899   * yield - yield the current processor to other threads.
8900   *
8901   * Do not ever use this function, there's a 99% chance you're doing it wrong.
8902   *
8903   * The scheduler is at all times free to pick the calling task as the most
8904   * eligible task to run, if removing the yield() call from your code breaks
8905   * it, it's already broken.
8906   *
8907   * Typical broken usage is:
8908   *
8909   * while (!event)
8910   *	yield();
8911   *
8912   * where one assumes that yield() will let 'the other' process run that will
8913   * make event true. If the current task is a SCHED_FIFO task that will never
8914   * happen. Never use yield() as a progress guarantee!!
8915   *
8916   * If you want to use yield() to wait for something, use wait_event().
8917   * If you want to use yield() to be 'nice' for others, use cond_resched().
8918   * If you still want to use yield(), do not!
8919   */
yield(void)8920  void __sched yield(void)
8921  {
8922  	set_current_state(TASK_RUNNING);
8923  	do_sched_yield();
8924  }
8925  EXPORT_SYMBOL(yield);
8926  
8927  /**
8928   * yield_to - yield the current processor to another thread in
8929   * your thread group, or accelerate that thread toward the
8930   * processor it's on.
8931   * @p: target task
8932   * @preempt: whether task preemption is allowed or not
8933   *
8934   * It's the caller's job to ensure that the target task struct
8935   * can't go away on us before we can do any checks.
8936   *
8937   * Return:
8938   *	true (>0) if we indeed boosted the target task.
8939   *	false (0) if we failed to boost the target.
8940   *	-ESRCH if there's no task to yield to.
8941   */
yield_to(struct task_struct * p,bool preempt)8942  int __sched yield_to(struct task_struct *p, bool preempt)
8943  {
8944  	struct task_struct *curr = current;
8945  	struct rq *rq, *p_rq;
8946  	unsigned long flags;
8947  	int yielded = 0;
8948  
8949  	local_irq_save(flags);
8950  	rq = this_rq();
8951  
8952  again:
8953  	p_rq = task_rq(p);
8954  	/*
8955  	 * If we're the only runnable task on the rq and target rq also
8956  	 * has only one task, there's absolutely no point in yielding.
8957  	 */
8958  	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
8959  		yielded = -ESRCH;
8960  		goto out_irq;
8961  	}
8962  
8963  	double_rq_lock(rq, p_rq);
8964  	if (task_rq(p) != p_rq) {
8965  		double_rq_unlock(rq, p_rq);
8966  		goto again;
8967  	}
8968  
8969  	if (!curr->sched_class->yield_to_task)
8970  		goto out_unlock;
8971  
8972  	if (curr->sched_class != p->sched_class)
8973  		goto out_unlock;
8974  
8975  	if (task_on_cpu(p_rq, p) || !task_is_running(p))
8976  		goto out_unlock;
8977  
8978  	yielded = curr->sched_class->yield_to_task(rq, p);
8979  	if (yielded) {
8980  		schedstat_inc(rq->yld_count);
8981  		/*
8982  		 * Make p's CPU reschedule; pick_next_entity takes care of
8983  		 * fairness.
8984  		 */
8985  		if (preempt && rq != p_rq)
8986  			resched_curr(p_rq);
8987  	}
8988  
8989  out_unlock:
8990  	double_rq_unlock(rq, p_rq);
8991  out_irq:
8992  	local_irq_restore(flags);
8993  
8994  	if (yielded > 0)
8995  		schedule();
8996  
8997  	return yielded;
8998  }
8999  EXPORT_SYMBOL_GPL(yield_to);
9000  
io_schedule_prepare(void)9001  int io_schedule_prepare(void)
9002  {
9003  	int old_iowait = current->in_iowait;
9004  
9005  	current->in_iowait = 1;
9006  	blk_flush_plug(current->plug, true);
9007  	return old_iowait;
9008  }
9009  
io_schedule_finish(int token)9010  void io_schedule_finish(int token)
9011  {
9012  	current->in_iowait = token;
9013  }
9014  
9015  /*
9016   * This task is about to go to sleep on IO. Increment rq->nr_iowait so
9017   * that process accounting knows that this is a task in IO wait state.
9018   */
io_schedule_timeout(long timeout)9019  long __sched io_schedule_timeout(long timeout)
9020  {
9021  	int token;
9022  	long ret;
9023  
9024  	token = io_schedule_prepare();
9025  	ret = schedule_timeout(timeout);
9026  	io_schedule_finish(token);
9027  
9028  	return ret;
9029  }
9030  EXPORT_SYMBOL(io_schedule_timeout);
9031  
io_schedule(void)9032  void __sched io_schedule(void)
9033  {
9034  	int token;
9035  
9036  	token = io_schedule_prepare();
9037  	schedule();
9038  	io_schedule_finish(token);
9039  }
9040  EXPORT_SYMBOL(io_schedule);
9041  
9042  /**
9043   * sys_sched_get_priority_max - return maximum RT priority.
9044   * @policy: scheduling class.
9045   *
9046   * Return: On success, this syscall returns the maximum
9047   * rt_priority that can be used by a given scheduling class.
9048   * On failure, a negative error code is returned.
9049   */
SYSCALL_DEFINE1(sched_get_priority_max,int,policy)9050  SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
9051  {
9052  	int ret = -EINVAL;
9053  
9054  	switch (policy) {
9055  	case SCHED_FIFO:
9056  	case SCHED_RR:
9057  		ret = MAX_RT_PRIO-1;
9058  		break;
9059  	case SCHED_DEADLINE:
9060  	case SCHED_NORMAL:
9061  	case SCHED_BATCH:
9062  	case SCHED_IDLE:
9063  		ret = 0;
9064  		break;
9065  	}
9066  	return ret;
9067  }
9068  
9069  /**
9070   * sys_sched_get_priority_min - return minimum RT priority.
9071   * @policy: scheduling class.
9072   *
9073   * Return: On success, this syscall returns the minimum
9074   * rt_priority that can be used by a given scheduling class.
9075   * On failure, a negative error code is returned.
9076   */
SYSCALL_DEFINE1(sched_get_priority_min,int,policy)9077  SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
9078  {
9079  	int ret = -EINVAL;
9080  
9081  	switch (policy) {
9082  	case SCHED_FIFO:
9083  	case SCHED_RR:
9084  		ret = 1;
9085  		break;
9086  	case SCHED_DEADLINE:
9087  	case SCHED_NORMAL:
9088  	case SCHED_BATCH:
9089  	case SCHED_IDLE:
9090  		ret = 0;
9091  	}
9092  	return ret;
9093  }
9094  
sched_rr_get_interval(pid_t pid,struct timespec64 * t)9095  static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
9096  {
9097  	struct task_struct *p;
9098  	unsigned int time_slice;
9099  	struct rq_flags rf;
9100  	struct rq *rq;
9101  	int retval;
9102  
9103  	if (pid < 0)
9104  		return -EINVAL;
9105  
9106  	retval = -ESRCH;
9107  	rcu_read_lock();
9108  	p = find_process_by_pid(pid);
9109  	if (!p)
9110  		goto out_unlock;
9111  
9112  	retval = security_task_getscheduler(p);
9113  	if (retval)
9114  		goto out_unlock;
9115  
9116  	rq = task_rq_lock(p, &rf);
9117  	time_slice = 0;
9118  	if (p->sched_class->get_rr_interval)
9119  		time_slice = p->sched_class->get_rr_interval(rq, p);
9120  	task_rq_unlock(rq, p, &rf);
9121  
9122  	rcu_read_unlock();
9123  	jiffies_to_timespec64(time_slice, t);
9124  	return 0;
9125  
9126  out_unlock:
9127  	rcu_read_unlock();
9128  	return retval;
9129  }
9130  
9131  /**
9132   * sys_sched_rr_get_interval - return the default timeslice of a process.
9133   * @pid: pid of the process.
9134   * @interval: userspace pointer to the timeslice value.
9135   *
9136   * this syscall writes the default timeslice value of a given process
9137   * into the user-space timespec buffer. A value of '0' means infinity.
9138   *
9139   * Return: On success, 0 and the timeslice is in @interval. Otherwise,
9140   * an error code.
9141   */
SYSCALL_DEFINE2(sched_rr_get_interval,pid_t,pid,struct __kernel_timespec __user *,interval)9142  SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
9143  		struct __kernel_timespec __user *, interval)
9144  {
9145  	struct timespec64 t;
9146  	int retval = sched_rr_get_interval(pid, &t);
9147  
9148  	if (retval == 0)
9149  		retval = put_timespec64(&t, interval);
9150  
9151  	return retval;
9152  }
9153  
9154  #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE2(sched_rr_get_interval_time32,pid_t,pid,struct old_timespec32 __user *,interval)9155  SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
9156  		struct old_timespec32 __user *, interval)
9157  {
9158  	struct timespec64 t;
9159  	int retval = sched_rr_get_interval(pid, &t);
9160  
9161  	if (retval == 0)
9162  		retval = put_old_timespec32(&t, interval);
9163  	return retval;
9164  }
9165  #endif
9166  
sched_show_task(struct task_struct * p)9167  void sched_show_task(struct task_struct *p)
9168  {
9169  	unsigned long free = 0;
9170  	int ppid;
9171  
9172  	if (!try_get_task_stack(p))
9173  		return;
9174  
9175  	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
9176  
9177  	if (task_is_running(p))
9178  		pr_cont("  running task    ");
9179  #ifdef CONFIG_DEBUG_STACK_USAGE
9180  	free = stack_not_used(p);
9181  #endif
9182  	ppid = 0;
9183  	rcu_read_lock();
9184  	if (pid_alive(p))
9185  		ppid = task_pid_nr(rcu_dereference(p->real_parent));
9186  	rcu_read_unlock();
9187  	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
9188  		free, task_pid_nr(p), ppid,
9189  		read_task_thread_flags(p));
9190  
9191  	print_worker_info(KERN_INFO, p);
9192  	print_stop_info(KERN_INFO, p);
9193  	show_stack(p, NULL, KERN_INFO);
9194  	put_task_stack(p);
9195  }
9196  EXPORT_SYMBOL_GPL(sched_show_task);
9197  
9198  static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)9199  state_filter_match(unsigned long state_filter, struct task_struct *p)
9200  {
9201  	unsigned int state = READ_ONCE(p->__state);
9202  
9203  	/* no filter, everything matches */
9204  	if (!state_filter)
9205  		return true;
9206  
9207  	/* filter, but doesn't match */
9208  	if (!(state & state_filter))
9209  		return false;
9210  
9211  	/*
9212  	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
9213  	 * TASK_KILLABLE).
9214  	 */
9215  	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
9216  		return false;
9217  
9218  	return true;
9219  }
9220  
9221  
show_state_filter(unsigned int state_filter)9222  void show_state_filter(unsigned int state_filter)
9223  {
9224  	struct task_struct *g, *p;
9225  
9226  	rcu_read_lock();
9227  	for_each_process_thread(g, p) {
9228  		/*
9229  		 * reset the NMI-timeout, listing all files on a slow
9230  		 * console might take a lot of time:
9231  		 * Also, reset softlockup watchdogs on all CPUs, because
9232  		 * another CPU might be blocked waiting for us to process
9233  		 * an IPI.
9234  		 */
9235  		touch_nmi_watchdog();
9236  		touch_all_softlockup_watchdogs();
9237  		if (state_filter_match(state_filter, p))
9238  			sched_show_task(p);
9239  	}
9240  
9241  #ifdef CONFIG_SCHED_DEBUG
9242  	if (!state_filter)
9243  		sysrq_sched_debug_show();
9244  #endif
9245  	rcu_read_unlock();
9246  	/*
9247  	 * Only show locks if all tasks are dumped:
9248  	 */
9249  	if (!state_filter)
9250  		debug_show_all_locks();
9251  }
9252  
9253  /**
9254   * init_idle - set up an idle thread for a given CPU
9255   * @idle: task in question
9256   * @cpu: CPU the idle task belongs to
9257   *
9258   * NOTE: this function does not set the idle thread's NEED_RESCHED
9259   * flag, to make booting more robust.
9260   */
init_idle(struct task_struct * idle,int cpu)9261  void __init init_idle(struct task_struct *idle, int cpu)
9262  {
9263  #ifdef CONFIG_SMP
9264  	struct affinity_context ac = (struct affinity_context) {
9265  		.new_mask  = cpumask_of(cpu),
9266  		.flags     = 0,
9267  	};
9268  #endif
9269  	struct rq *rq = cpu_rq(cpu);
9270  	unsigned long flags;
9271  
9272  	raw_spin_lock_irqsave(&idle->pi_lock, flags);
9273  	raw_spin_rq_lock(rq);
9274  
9275  	idle->__state = TASK_RUNNING;
9276  	idle->se.exec_start = sched_clock();
9277  	/*
9278  	 * PF_KTHREAD should already be set at this point; regardless, make it
9279  	 * look like a proper per-CPU kthread.
9280  	 */
9281  	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
9282  	kthread_set_per_cpu(idle, cpu);
9283  
9284  #ifdef CONFIG_SMP
9285  	/*
9286  	 * No validation and serialization required at boot time and for
9287  	 * setting up the idle tasks of not yet online CPUs.
9288  	 */
9289  	set_cpus_allowed_common(idle, &ac);
9290  #endif
9291  	/*
9292  	 * We're having a chicken and egg problem, even though we are
9293  	 * holding rq->lock, the CPU isn't yet set to this CPU so the
9294  	 * lockdep check in task_group() will fail.
9295  	 *
9296  	 * Similar case to sched_fork(). / Alternatively we could
9297  	 * use task_rq_lock() here and obtain the other rq->lock.
9298  	 *
9299  	 * Silence PROVE_RCU
9300  	 */
9301  	rcu_read_lock();
9302  	__set_task_cpu(idle, cpu);
9303  	rcu_read_unlock();
9304  
9305  	rq->idle = idle;
9306  	rcu_assign_pointer(rq->curr, idle);
9307  	idle->on_rq = TASK_ON_RQ_QUEUED;
9308  #ifdef CONFIG_SMP
9309  	idle->on_cpu = 1;
9310  #endif
9311  	raw_spin_rq_unlock(rq);
9312  	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
9313  
9314  	/* Set the preempt count _outside_ the spinlocks! */
9315  	init_idle_preempt_count(idle, cpu);
9316  
9317  	/*
9318  	 * The idle tasks have their own, simple scheduling class:
9319  	 */
9320  	idle->sched_class = &idle_sched_class;
9321  	ftrace_graph_init_idle_task(idle, cpu);
9322  	vtime_init_idle(idle, cpu);
9323  #ifdef CONFIG_SMP
9324  	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
9325  #endif
9326  }
9327  
9328  #ifdef CONFIG_SMP
9329  
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)9330  int cpuset_cpumask_can_shrink(const struct cpumask *cur,
9331  			      const struct cpumask *trial)
9332  {
9333  	int ret = 1;
9334  
9335  	if (cpumask_empty(cur))
9336  		return ret;
9337  
9338  	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
9339  
9340  	return ret;
9341  }
9342  
task_can_attach(struct task_struct * p)9343  int task_can_attach(struct task_struct *p)
9344  {
9345  	int ret = 0;
9346  
9347  	/*
9348  	 * Kthreads which disallow setaffinity shouldn't be moved
9349  	 * to a new cpuset; we don't want to change their CPU
9350  	 * affinity and isolating such threads by their set of
9351  	 * allowed nodes is unnecessary.  Thus, cpusets are not
9352  	 * applicable for such threads.  This prevents checking for
9353  	 * success of set_cpus_allowed_ptr() on all attached tasks
9354  	 * before cpus_mask may be changed.
9355  	 */
9356  	if (p->flags & PF_NO_SETAFFINITY)
9357  		ret = -EINVAL;
9358  
9359  	return ret;
9360  }
9361  
9362  bool sched_smp_initialized __read_mostly;
9363  
9364  #ifdef CONFIG_NUMA_BALANCING
9365  /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)9366  int migrate_task_to(struct task_struct *p, int target_cpu)
9367  {
9368  	struct migration_arg arg = { p, target_cpu };
9369  	int curr_cpu = task_cpu(p);
9370  
9371  	if (curr_cpu == target_cpu)
9372  		return 0;
9373  
9374  	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
9375  		return -EINVAL;
9376  
9377  	/* TODO: This is not properly updating schedstats */
9378  
9379  	trace_sched_move_numa(p, curr_cpu, target_cpu);
9380  	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
9381  }
9382  
9383  /*
9384   * Requeue a task on a given node and accurately track the number of NUMA
9385   * tasks on the runqueues
9386   */
sched_setnuma(struct task_struct * p,int nid)9387  void sched_setnuma(struct task_struct *p, int nid)
9388  {
9389  	bool queued, running;
9390  	struct rq_flags rf;
9391  	struct rq *rq;
9392  
9393  	rq = task_rq_lock(p, &rf);
9394  	queued = task_on_rq_queued(p);
9395  	running = task_current(rq, p);
9396  
9397  	if (queued)
9398  		dequeue_task(rq, p, DEQUEUE_SAVE);
9399  	if (running)
9400  		put_prev_task(rq, p);
9401  
9402  	p->numa_preferred_nid = nid;
9403  
9404  	if (queued)
9405  		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
9406  	if (running)
9407  		set_next_task(rq, p);
9408  	task_rq_unlock(rq, p, &rf);
9409  }
9410  #endif /* CONFIG_NUMA_BALANCING */
9411  
9412  #ifdef CONFIG_HOTPLUG_CPU
9413  /*
9414   * Ensure that the idle task is using init_mm right before its CPU goes
9415   * offline.
9416   */
idle_task_exit(void)9417  void idle_task_exit(void)
9418  {
9419  	struct mm_struct *mm = current->active_mm;
9420  
9421  	BUG_ON(cpu_online(smp_processor_id()));
9422  	BUG_ON(current != this_rq()->idle);
9423  
9424  	if (mm != &init_mm) {
9425  		switch_mm(mm, &init_mm, current);
9426  		finish_arch_post_lock_switch();
9427  	}
9428  
9429  	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
9430  }
9431  
__balance_push_cpu_stop(void * arg)9432  static int __balance_push_cpu_stop(void *arg)
9433  {
9434  	struct task_struct *p = arg;
9435  	struct rq *rq = this_rq();
9436  	struct rq_flags rf;
9437  	int cpu;
9438  
9439  	raw_spin_lock_irq(&p->pi_lock);
9440  	rq_lock(rq, &rf);
9441  
9442  	update_rq_clock(rq);
9443  
9444  	if (task_rq(p) == rq && task_on_rq_queued(p)) {
9445  		cpu = select_fallback_rq(rq->cpu, p);
9446  		rq = __migrate_task(rq, &rf, p, cpu);
9447  	}
9448  
9449  	rq_unlock(rq, &rf);
9450  	raw_spin_unlock_irq(&p->pi_lock);
9451  
9452  	put_task_struct(p);
9453  
9454  	return 0;
9455  }
9456  
9457  static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
9458  
9459  /*
9460   * Ensure we only run per-cpu kthreads once the CPU goes !active.
9461   *
9462   * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
9463   * effective when the hotplug motion is down.
9464   */
balance_push(struct rq * rq)9465  static void balance_push(struct rq *rq)
9466  {
9467  	struct task_struct *push_task = rq->curr;
9468  
9469  	lockdep_assert_rq_held(rq);
9470  
9471  	/*
9472  	 * Ensure the thing is persistent until balance_push_set(.on = false);
9473  	 */
9474  	rq->balance_callback = &balance_push_callback;
9475  
9476  	/*
9477  	 * Only active while going offline and when invoked on the outgoing
9478  	 * CPU.
9479  	 */
9480  	if (!cpu_dying(rq->cpu) || rq != this_rq())
9481  		return;
9482  
9483  	/*
9484  	 * Both the cpu-hotplug and stop task are in this case and are
9485  	 * required to complete the hotplug process.
9486  	 */
9487  	if (kthread_is_per_cpu(push_task) ||
9488  	    is_migration_disabled(push_task)) {
9489  
9490  		/*
9491  		 * If this is the idle task on the outgoing CPU try to wake
9492  		 * up the hotplug control thread which might wait for the
9493  		 * last task to vanish. The rcuwait_active() check is
9494  		 * accurate here because the waiter is pinned on this CPU
9495  		 * and can't obviously be running in parallel.
9496  		 *
9497  		 * On RT kernels this also has to check whether there are
9498  		 * pinned and scheduled out tasks on the runqueue. They
9499  		 * need to leave the migrate disabled section first.
9500  		 */
9501  		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
9502  		    rcuwait_active(&rq->hotplug_wait)) {
9503  			raw_spin_rq_unlock(rq);
9504  			rcuwait_wake_up(&rq->hotplug_wait);
9505  			raw_spin_rq_lock(rq);
9506  		}
9507  		return;
9508  	}
9509  
9510  	get_task_struct(push_task);
9511  	/*
9512  	 * Temporarily drop rq->lock such that we can wake-up the stop task.
9513  	 * Both preemption and IRQs are still disabled.
9514  	 */
9515  	preempt_disable();
9516  	raw_spin_rq_unlock(rq);
9517  	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
9518  			    this_cpu_ptr(&push_work));
9519  	preempt_enable();
9520  	/*
9521  	 * At this point need_resched() is true and we'll take the loop in
9522  	 * schedule(). The next pick is obviously going to be the stop task
9523  	 * which kthread_is_per_cpu() and will push this task away.
9524  	 */
9525  	raw_spin_rq_lock(rq);
9526  }
9527  
balance_push_set(int cpu,bool on)9528  static void balance_push_set(int cpu, bool on)
9529  {
9530  	struct rq *rq = cpu_rq(cpu);
9531  	struct rq_flags rf;
9532  
9533  	rq_lock_irqsave(rq, &rf);
9534  	if (on) {
9535  		WARN_ON_ONCE(rq->balance_callback);
9536  		rq->balance_callback = &balance_push_callback;
9537  	} else if (rq->balance_callback == &balance_push_callback) {
9538  		rq->balance_callback = NULL;
9539  	}
9540  	rq_unlock_irqrestore(rq, &rf);
9541  }
9542  
9543  /*
9544   * Invoked from a CPUs hotplug control thread after the CPU has been marked
9545   * inactive. All tasks which are not per CPU kernel threads are either
9546   * pushed off this CPU now via balance_push() or placed on a different CPU
9547   * during wakeup. Wait until the CPU is quiescent.
9548   */
balance_hotplug_wait(void)9549  static void balance_hotplug_wait(void)
9550  {
9551  	struct rq *rq = this_rq();
9552  
9553  	rcuwait_wait_event(&rq->hotplug_wait,
9554  			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
9555  			   TASK_UNINTERRUPTIBLE);
9556  }
9557  
9558  #else
9559  
balance_push(struct rq * rq)9560  static inline void balance_push(struct rq *rq)
9561  {
9562  }
9563  
balance_push_set(int cpu,bool on)9564  static inline void balance_push_set(int cpu, bool on)
9565  {
9566  }
9567  
balance_hotplug_wait(void)9568  static inline void balance_hotplug_wait(void)
9569  {
9570  }
9571  
9572  #endif /* CONFIG_HOTPLUG_CPU */
9573  
set_rq_online(struct rq * rq)9574  void set_rq_online(struct rq *rq)
9575  {
9576  	if (!rq->online) {
9577  		const struct sched_class *class;
9578  
9579  		cpumask_set_cpu(rq->cpu, rq->rd->online);
9580  		rq->online = 1;
9581  
9582  		for_each_class(class) {
9583  			if (class->rq_online)
9584  				class->rq_online(rq);
9585  		}
9586  	}
9587  }
9588  
set_rq_offline(struct rq * rq)9589  void set_rq_offline(struct rq *rq)
9590  {
9591  	if (rq->online) {
9592  		const struct sched_class *class;
9593  
9594  		update_rq_clock(rq);
9595  		for_each_class(class) {
9596  			if (class->rq_offline)
9597  				class->rq_offline(rq);
9598  		}
9599  
9600  		cpumask_clear_cpu(rq->cpu, rq->rd->online);
9601  		rq->online = 0;
9602  	}
9603  }
9604  
sched_set_rq_online(struct rq * rq,int cpu)9605  static inline void sched_set_rq_online(struct rq *rq, int cpu)
9606  {
9607  	struct rq_flags rf;
9608  
9609  	rq_lock_irqsave(rq, &rf);
9610  	if (rq->rd) {
9611  		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9612  		set_rq_online(rq);
9613  	}
9614  	rq_unlock_irqrestore(rq, &rf);
9615  }
9616  
sched_set_rq_offline(struct rq * rq,int cpu)9617  static inline void sched_set_rq_offline(struct rq *rq, int cpu)
9618  {
9619  	struct rq_flags rf;
9620  
9621  	rq_lock_irqsave(rq, &rf);
9622  	if (rq->rd) {
9623  		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9624  		set_rq_offline(rq);
9625  	}
9626  	rq_unlock_irqrestore(rq, &rf);
9627  }
9628  
9629  /*
9630   * used to mark begin/end of suspend/resume:
9631   */
9632  static int num_cpus_frozen;
9633  
9634  /*
9635   * Update cpusets according to cpu_active mask.  If cpusets are
9636   * disabled, cpuset_update_active_cpus() becomes a simple wrapper
9637   * around partition_sched_domains().
9638   *
9639   * If we come here as part of a suspend/resume, don't touch cpusets because we
9640   * want to restore it back to its original state upon resume anyway.
9641   */
cpuset_cpu_active(void)9642  static void cpuset_cpu_active(void)
9643  {
9644  	if (cpuhp_tasks_frozen) {
9645  		/*
9646  		 * num_cpus_frozen tracks how many CPUs are involved in suspend
9647  		 * resume sequence. As long as this is not the last online
9648  		 * operation in the resume sequence, just build a single sched
9649  		 * domain, ignoring cpusets.
9650  		 */
9651  		partition_sched_domains(1, NULL, NULL);
9652  		if (--num_cpus_frozen)
9653  			return;
9654  		/*
9655  		 * This is the last CPU online operation. So fall through and
9656  		 * restore the original sched domains by considering the
9657  		 * cpuset configurations.
9658  		 */
9659  		cpuset_force_rebuild();
9660  	}
9661  	cpuset_update_active_cpus();
9662  }
9663  
cpuset_cpu_inactive(unsigned int cpu)9664  static int cpuset_cpu_inactive(unsigned int cpu)
9665  {
9666  	if (!cpuhp_tasks_frozen) {
9667  		int ret = dl_bw_check_overflow(cpu);
9668  
9669  		if (ret)
9670  			return ret;
9671  		cpuset_update_active_cpus();
9672  	} else {
9673  		num_cpus_frozen++;
9674  		partition_sched_domains(1, NULL, NULL);
9675  	}
9676  	return 0;
9677  }
9678  
sched_smt_present_inc(int cpu)9679  static inline void sched_smt_present_inc(int cpu)
9680  {
9681  #ifdef CONFIG_SCHED_SMT
9682  	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9683  		static_branch_inc_cpuslocked(&sched_smt_present);
9684  #endif
9685  }
9686  
sched_smt_present_dec(int cpu)9687  static inline void sched_smt_present_dec(int cpu)
9688  {
9689  #ifdef CONFIG_SCHED_SMT
9690  	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9691  		static_branch_dec_cpuslocked(&sched_smt_present);
9692  #endif
9693  }
9694  
sched_cpu_activate(unsigned int cpu)9695  int sched_cpu_activate(unsigned int cpu)
9696  {
9697  	struct rq *rq = cpu_rq(cpu);
9698  
9699  	/*
9700  	 * Clear the balance_push callback and prepare to schedule
9701  	 * regular tasks.
9702  	 */
9703  	balance_push_set(cpu, false);
9704  
9705  	/*
9706  	 * When going up, increment the number of cores with SMT present.
9707  	 */
9708  	sched_smt_present_inc(cpu);
9709  	set_cpu_active(cpu, true);
9710  
9711  	if (sched_smp_initialized) {
9712  		sched_update_numa(cpu, true);
9713  		sched_domains_numa_masks_set(cpu);
9714  		cpuset_cpu_active();
9715  	}
9716  
9717  	/*
9718  	 * Put the rq online, if not already. This happens:
9719  	 *
9720  	 * 1) In the early boot process, because we build the real domains
9721  	 *    after all CPUs have been brought up.
9722  	 *
9723  	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
9724  	 *    domains.
9725  	 */
9726  	sched_set_rq_online(rq, cpu);
9727  
9728  	return 0;
9729  }
9730  
sched_cpu_deactivate(unsigned int cpu)9731  int sched_cpu_deactivate(unsigned int cpu)
9732  {
9733  	struct rq *rq = cpu_rq(cpu);
9734  	int ret;
9735  
9736  	/*
9737  	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
9738  	 * load balancing when not active
9739  	 */
9740  	nohz_balance_exit_idle(rq);
9741  
9742  	set_cpu_active(cpu, false);
9743  
9744  	/*
9745  	 * From this point forward, this CPU will refuse to run any task that
9746  	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
9747  	 * push those tasks away until this gets cleared, see
9748  	 * sched_cpu_dying().
9749  	 */
9750  	balance_push_set(cpu, true);
9751  
9752  	/*
9753  	 * We've cleared cpu_active_mask / set balance_push, wait for all
9754  	 * preempt-disabled and RCU users of this state to go away such that
9755  	 * all new such users will observe it.
9756  	 *
9757  	 * Specifically, we rely on ttwu to no longer target this CPU, see
9758  	 * ttwu_queue_cond() and is_cpu_allowed().
9759  	 *
9760  	 * Do sync before park smpboot threads to take care the rcu boost case.
9761  	 */
9762  	synchronize_rcu();
9763  
9764  	sched_set_rq_offline(rq, cpu);
9765  
9766  	/*
9767  	 * When going down, decrement the number of cores with SMT present.
9768  	 */
9769  	sched_smt_present_dec(cpu);
9770  
9771  #ifdef CONFIG_SCHED_SMT
9772  	sched_core_cpu_deactivate(cpu);
9773  #endif
9774  
9775  	if (!sched_smp_initialized)
9776  		return 0;
9777  
9778  	sched_update_numa(cpu, false);
9779  	ret = cpuset_cpu_inactive(cpu);
9780  	if (ret) {
9781  		sched_smt_present_inc(cpu);
9782  		sched_set_rq_online(rq, cpu);
9783  		balance_push_set(cpu, false);
9784  		set_cpu_active(cpu, true);
9785  		sched_update_numa(cpu, true);
9786  		return ret;
9787  	}
9788  	sched_domains_numa_masks_clear(cpu);
9789  	return 0;
9790  }
9791  
sched_rq_cpu_starting(unsigned int cpu)9792  static void sched_rq_cpu_starting(unsigned int cpu)
9793  {
9794  	struct rq *rq = cpu_rq(cpu);
9795  
9796  	rq->calc_load_update = calc_load_update;
9797  	update_max_interval();
9798  }
9799  
sched_cpu_starting(unsigned int cpu)9800  int sched_cpu_starting(unsigned int cpu)
9801  {
9802  	sched_core_cpu_starting(cpu);
9803  	sched_rq_cpu_starting(cpu);
9804  	sched_tick_start(cpu);
9805  	return 0;
9806  }
9807  
9808  #ifdef CONFIG_HOTPLUG_CPU
9809  
9810  /*
9811   * Invoked immediately before the stopper thread is invoked to bring the
9812   * CPU down completely. At this point all per CPU kthreads except the
9813   * hotplug thread (current) and the stopper thread (inactive) have been
9814   * either parked or have been unbound from the outgoing CPU. Ensure that
9815   * any of those which might be on the way out are gone.
9816   *
9817   * If after this point a bound task is being woken on this CPU then the
9818   * responsible hotplug callback has failed to do it's job.
9819   * sched_cpu_dying() will catch it with the appropriate fireworks.
9820   */
sched_cpu_wait_empty(unsigned int cpu)9821  int sched_cpu_wait_empty(unsigned int cpu)
9822  {
9823  	balance_hotplug_wait();
9824  	return 0;
9825  }
9826  
9827  /*
9828   * Since this CPU is going 'away' for a while, fold any nr_active delta we
9829   * might have. Called from the CPU stopper task after ensuring that the
9830   * stopper is the last running task on the CPU, so nr_active count is
9831   * stable. We need to take the teardown thread which is calling this into
9832   * account, so we hand in adjust = 1 to the load calculation.
9833   *
9834   * Also see the comment "Global load-average calculations".
9835   */
calc_load_migrate(struct rq * rq)9836  static void calc_load_migrate(struct rq *rq)
9837  {
9838  	long delta = calc_load_fold_active(rq, 1);
9839  
9840  	if (delta)
9841  		atomic_long_add(delta, &calc_load_tasks);
9842  }
9843  
dump_rq_tasks(struct rq * rq,const char * loglvl)9844  static void dump_rq_tasks(struct rq *rq, const char *loglvl)
9845  {
9846  	struct task_struct *g, *p;
9847  	int cpu = cpu_of(rq);
9848  
9849  	lockdep_assert_rq_held(rq);
9850  
9851  	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
9852  	for_each_process_thread(g, p) {
9853  		if (task_cpu(p) != cpu)
9854  			continue;
9855  
9856  		if (!task_on_rq_queued(p))
9857  			continue;
9858  
9859  		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
9860  	}
9861  }
9862  
sched_cpu_dying(unsigned int cpu)9863  int sched_cpu_dying(unsigned int cpu)
9864  {
9865  	struct rq *rq = cpu_rq(cpu);
9866  	struct rq_flags rf;
9867  
9868  	/* Handle pending wakeups and then migrate everything off */
9869  	sched_tick_stop(cpu);
9870  
9871  	rq_lock_irqsave(rq, &rf);
9872  	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
9873  		WARN(true, "Dying CPU not properly vacated!");
9874  		dump_rq_tasks(rq, KERN_WARNING);
9875  	}
9876  	rq_unlock_irqrestore(rq, &rf);
9877  
9878  	calc_load_migrate(rq);
9879  	update_max_interval();
9880  	hrtick_clear(rq);
9881  	sched_core_cpu_dying(cpu);
9882  	return 0;
9883  }
9884  #endif
9885  
sched_init_smp(void)9886  void __init sched_init_smp(void)
9887  {
9888  	sched_init_numa(NUMA_NO_NODE);
9889  
9890  	/*
9891  	 * There's no userspace yet to cause hotplug operations; hence all the
9892  	 * CPU masks are stable and all blatant races in the below code cannot
9893  	 * happen.
9894  	 */
9895  	mutex_lock(&sched_domains_mutex);
9896  	sched_init_domains(cpu_active_mask);
9897  	mutex_unlock(&sched_domains_mutex);
9898  
9899  	/* Move init over to a non-isolated CPU */
9900  	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
9901  		BUG();
9902  	current->flags &= ~PF_NO_SETAFFINITY;
9903  	sched_init_granularity();
9904  
9905  	init_sched_rt_class();
9906  	init_sched_dl_class();
9907  
9908  	sched_smp_initialized = true;
9909  }
9910  
migration_init(void)9911  static int __init migration_init(void)
9912  {
9913  	sched_cpu_starting(smp_processor_id());
9914  	return 0;
9915  }
9916  early_initcall(migration_init);
9917  
9918  #else
sched_init_smp(void)9919  void __init sched_init_smp(void)
9920  {
9921  	sched_init_granularity();
9922  }
9923  #endif /* CONFIG_SMP */
9924  
in_sched_functions(unsigned long addr)9925  int in_sched_functions(unsigned long addr)
9926  {
9927  	return in_lock_functions(addr) ||
9928  		(addr >= (unsigned long)__sched_text_start
9929  		&& addr < (unsigned long)__sched_text_end);
9930  }
9931  
9932  #ifdef CONFIG_CGROUP_SCHED
9933  /*
9934   * Default task group.
9935   * Every task in system belongs to this group at bootup.
9936   */
9937  struct task_group root_task_group;
9938  LIST_HEAD(task_groups);
9939  
9940  /* Cacheline aligned slab cache for task_group */
9941  static struct kmem_cache *task_group_cache __read_mostly;
9942  #endif
9943  
sched_init(void)9944  void __init sched_init(void)
9945  {
9946  	unsigned long ptr = 0;
9947  	int i;
9948  
9949  	/* Make sure the linker didn't screw up */
9950  	BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
9951  	       &fair_sched_class != &rt_sched_class + 1 ||
9952  	       &rt_sched_class   != &dl_sched_class + 1);
9953  #ifdef CONFIG_SMP
9954  	BUG_ON(&dl_sched_class != &stop_sched_class + 1);
9955  #endif
9956  
9957  	wait_bit_init();
9958  
9959  #ifdef CONFIG_FAIR_GROUP_SCHED
9960  	ptr += 2 * nr_cpu_ids * sizeof(void **);
9961  #endif
9962  #ifdef CONFIG_RT_GROUP_SCHED
9963  	ptr += 2 * nr_cpu_ids * sizeof(void **);
9964  #endif
9965  	if (ptr) {
9966  		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
9967  
9968  #ifdef CONFIG_FAIR_GROUP_SCHED
9969  		root_task_group.se = (struct sched_entity **)ptr;
9970  		ptr += nr_cpu_ids * sizeof(void **);
9971  
9972  		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9973  		ptr += nr_cpu_ids * sizeof(void **);
9974  
9975  		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
9976  		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
9977  #endif /* CONFIG_FAIR_GROUP_SCHED */
9978  #ifdef CONFIG_RT_GROUP_SCHED
9979  		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9980  		ptr += nr_cpu_ids * sizeof(void **);
9981  
9982  		root_task_group.rt_rq = (struct rt_rq **)ptr;
9983  		ptr += nr_cpu_ids * sizeof(void **);
9984  
9985  #endif /* CONFIG_RT_GROUP_SCHED */
9986  	}
9987  
9988  	init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9989  
9990  #ifdef CONFIG_SMP
9991  	init_defrootdomain();
9992  #endif
9993  
9994  #ifdef CONFIG_RT_GROUP_SCHED
9995  	init_rt_bandwidth(&root_task_group.rt_bandwidth,
9996  			global_rt_period(), global_rt_runtime());
9997  #endif /* CONFIG_RT_GROUP_SCHED */
9998  
9999  #ifdef CONFIG_CGROUP_SCHED
10000  	task_group_cache = KMEM_CACHE(task_group, 0);
10001  
10002  	list_add(&root_task_group.list, &task_groups);
10003  	INIT_LIST_HEAD(&root_task_group.children);
10004  	INIT_LIST_HEAD(&root_task_group.siblings);
10005  	autogroup_init(&init_task);
10006  #endif /* CONFIG_CGROUP_SCHED */
10007  
10008  	for_each_possible_cpu(i) {
10009  		struct rq *rq;
10010  
10011  		rq = cpu_rq(i);
10012  		raw_spin_lock_init(&rq->__lock);
10013  		rq->nr_running = 0;
10014  		rq->calc_load_active = 0;
10015  		rq->calc_load_update = jiffies + LOAD_FREQ;
10016  		init_cfs_rq(&rq->cfs);
10017  		init_rt_rq(&rq->rt);
10018  		init_dl_rq(&rq->dl);
10019  #ifdef CONFIG_FAIR_GROUP_SCHED
10020  		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
10021  		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
10022  		/*
10023  		 * How much CPU bandwidth does root_task_group get?
10024  		 *
10025  		 * In case of task-groups formed thr' the cgroup filesystem, it
10026  		 * gets 100% of the CPU resources in the system. This overall
10027  		 * system CPU resource is divided among the tasks of
10028  		 * root_task_group and its child task-groups in a fair manner,
10029  		 * based on each entity's (task or task-group's) weight
10030  		 * (se->load.weight).
10031  		 *
10032  		 * In other words, if root_task_group has 10 tasks of weight
10033  		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
10034  		 * then A0's share of the CPU resource is:
10035  		 *
10036  		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
10037  		 *
10038  		 * We achieve this by letting root_task_group's tasks sit
10039  		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
10040  		 */
10041  		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
10042  #endif /* CONFIG_FAIR_GROUP_SCHED */
10043  
10044  		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
10045  #ifdef CONFIG_RT_GROUP_SCHED
10046  		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
10047  #endif
10048  #ifdef CONFIG_SMP
10049  		rq->sd = NULL;
10050  		rq->rd = NULL;
10051  		rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
10052  		rq->balance_callback = &balance_push_callback;
10053  		rq->active_balance = 0;
10054  		rq->next_balance = jiffies;
10055  		rq->push_cpu = 0;
10056  		rq->cpu = i;
10057  		rq->online = 0;
10058  		rq->idle_stamp = 0;
10059  		rq->avg_idle = 2*sysctl_sched_migration_cost;
10060  		rq->wake_stamp = jiffies;
10061  		rq->wake_avg_idle = rq->avg_idle;
10062  		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
10063  
10064  		INIT_LIST_HEAD(&rq->cfs_tasks);
10065  
10066  		rq_attach_root(rq, &def_root_domain);
10067  #ifdef CONFIG_NO_HZ_COMMON
10068  		rq->last_blocked_load_update_tick = jiffies;
10069  		atomic_set(&rq->nohz_flags, 0);
10070  
10071  		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
10072  #endif
10073  #ifdef CONFIG_HOTPLUG_CPU
10074  		rcuwait_init(&rq->hotplug_wait);
10075  #endif
10076  #endif /* CONFIG_SMP */
10077  		hrtick_rq_init(rq);
10078  		atomic_set(&rq->nr_iowait, 0);
10079  
10080  #ifdef CONFIG_SCHED_CORE
10081  		rq->core = rq;
10082  		rq->core_pick = NULL;
10083  		rq->core_enabled = 0;
10084  		rq->core_tree = RB_ROOT;
10085  		rq->core_forceidle_count = 0;
10086  		rq->core_forceidle_occupation = 0;
10087  		rq->core_forceidle_start = 0;
10088  
10089  		rq->core_cookie = 0UL;
10090  #endif
10091  		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
10092  	}
10093  
10094  	set_load_weight(&init_task, false);
10095  
10096  	/*
10097  	 * The boot idle thread does lazy MMU switching as well:
10098  	 */
10099  	mmgrab_lazy_tlb(&init_mm);
10100  	enter_lazy_tlb(&init_mm, current);
10101  
10102  	/*
10103  	 * The idle task doesn't need the kthread struct to function, but it
10104  	 * is dressed up as a per-CPU kthread and thus needs to play the part
10105  	 * if we want to avoid special-casing it in code that deals with per-CPU
10106  	 * kthreads.
10107  	 */
10108  	WARN_ON(!set_kthread_struct(current));
10109  
10110  	/*
10111  	 * Make us the idle thread. Technically, schedule() should not be
10112  	 * called from this thread, however somewhere below it might be,
10113  	 * but because we are the idle thread, we just pick up running again
10114  	 * when this runqueue becomes "idle".
10115  	 */
10116  	__sched_fork(0, current);
10117  	init_idle(current, smp_processor_id());
10118  
10119  	calc_load_update = jiffies + LOAD_FREQ;
10120  
10121  #ifdef CONFIG_SMP
10122  	idle_thread_set_boot_cpu();
10123  	balance_push_set(smp_processor_id(), false);
10124  #endif
10125  	init_sched_fair_class();
10126  
10127  	psi_init();
10128  
10129  	init_uclamp();
10130  
10131  	preempt_dynamic_init();
10132  
10133  	scheduler_running = 1;
10134  }
10135  
10136  #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
10137  
__might_sleep(const char * file,int line)10138  void __might_sleep(const char *file, int line)
10139  {
10140  	unsigned int state = get_current_state();
10141  	/*
10142  	 * Blocking primitives will set (and therefore destroy) current->state,
10143  	 * since we will exit with TASK_RUNNING make sure we enter with it,
10144  	 * otherwise we will destroy state.
10145  	 */
10146  	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
10147  			"do not call blocking ops when !TASK_RUNNING; "
10148  			"state=%x set at [<%p>] %pS\n", state,
10149  			(void *)current->task_state_change,
10150  			(void *)current->task_state_change);
10151  
10152  	__might_resched(file, line, 0);
10153  }
10154  EXPORT_SYMBOL(__might_sleep);
10155  
print_preempt_disable_ip(int preempt_offset,unsigned long ip)10156  static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
10157  {
10158  	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
10159  		return;
10160  
10161  	if (preempt_count() == preempt_offset)
10162  		return;
10163  
10164  	pr_err("Preemption disabled at:");
10165  	print_ip_sym(KERN_ERR, ip);
10166  }
10167  
resched_offsets_ok(unsigned int offsets)10168  static inline bool resched_offsets_ok(unsigned int offsets)
10169  {
10170  	unsigned int nested = preempt_count();
10171  
10172  	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
10173  
10174  	return nested == offsets;
10175  }
10176  
__might_resched(const char * file,int line,unsigned int offsets)10177  void __might_resched(const char *file, int line, unsigned int offsets)
10178  {
10179  	/* Ratelimiting timestamp: */
10180  	static unsigned long prev_jiffy;
10181  
10182  	unsigned long preempt_disable_ip;
10183  
10184  	/* WARN_ON_ONCE() by default, no rate limit required: */
10185  	rcu_sleep_check();
10186  
10187  	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
10188  	     !is_idle_task(current) && !current->non_block_count) ||
10189  	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
10190  	    oops_in_progress)
10191  		return;
10192  
10193  	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10194  		return;
10195  	prev_jiffy = jiffies;
10196  
10197  	/* Save this before calling printk(), since that will clobber it: */
10198  	preempt_disable_ip = get_preempt_disable_ip(current);
10199  
10200  	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
10201  	       file, line);
10202  	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
10203  	       in_atomic(), irqs_disabled(), current->non_block_count,
10204  	       current->pid, current->comm);
10205  	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
10206  	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
10207  
10208  	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
10209  		pr_err("RCU nest depth: %d, expected: %u\n",
10210  		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
10211  	}
10212  
10213  	if (task_stack_end_corrupted(current))
10214  		pr_emerg("Thread overran stack, or stack corrupted\n");
10215  
10216  	debug_show_held_locks(current);
10217  	if (irqs_disabled())
10218  		print_irqtrace_events(current);
10219  
10220  	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
10221  				 preempt_disable_ip);
10222  
10223  	dump_stack();
10224  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10225  }
10226  EXPORT_SYMBOL(__might_resched);
10227  
__cant_sleep(const char * file,int line,int preempt_offset)10228  void __cant_sleep(const char *file, int line, int preempt_offset)
10229  {
10230  	static unsigned long prev_jiffy;
10231  
10232  	if (irqs_disabled())
10233  		return;
10234  
10235  	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10236  		return;
10237  
10238  	if (preempt_count() > preempt_offset)
10239  		return;
10240  
10241  	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10242  		return;
10243  	prev_jiffy = jiffies;
10244  
10245  	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
10246  	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
10247  			in_atomic(), irqs_disabled(),
10248  			current->pid, current->comm);
10249  
10250  	debug_show_held_locks(current);
10251  	dump_stack();
10252  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10253  }
10254  EXPORT_SYMBOL_GPL(__cant_sleep);
10255  
10256  #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)10257  void __cant_migrate(const char *file, int line)
10258  {
10259  	static unsigned long prev_jiffy;
10260  
10261  	if (irqs_disabled())
10262  		return;
10263  
10264  	if (is_migration_disabled(current))
10265  		return;
10266  
10267  	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10268  		return;
10269  
10270  	if (preempt_count() > 0)
10271  		return;
10272  
10273  	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10274  		return;
10275  	prev_jiffy = jiffies;
10276  
10277  	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
10278  	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
10279  	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
10280  	       current->pid, current->comm);
10281  
10282  	debug_show_held_locks(current);
10283  	dump_stack();
10284  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10285  }
10286  EXPORT_SYMBOL_GPL(__cant_migrate);
10287  #endif
10288  #endif
10289  
10290  #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)10291  void normalize_rt_tasks(void)
10292  {
10293  	struct task_struct *g, *p;
10294  	struct sched_attr attr = {
10295  		.sched_policy = SCHED_NORMAL,
10296  	};
10297  
10298  	read_lock(&tasklist_lock);
10299  	for_each_process_thread(g, p) {
10300  		/*
10301  		 * Only normalize user tasks:
10302  		 */
10303  		if (p->flags & PF_KTHREAD)
10304  			continue;
10305  
10306  		p->se.exec_start = 0;
10307  		schedstat_set(p->stats.wait_start,  0);
10308  		schedstat_set(p->stats.sleep_start, 0);
10309  		schedstat_set(p->stats.block_start, 0);
10310  
10311  		if (!dl_task(p) && !rt_task(p)) {
10312  			/*
10313  			 * Renice negative nice level userspace
10314  			 * tasks back to 0:
10315  			 */
10316  			if (task_nice(p) < 0)
10317  				set_user_nice(p, 0);
10318  			continue;
10319  		}
10320  
10321  		__sched_setscheduler(p, &attr, false, false);
10322  	}
10323  	read_unlock(&tasklist_lock);
10324  }
10325  
10326  #endif /* CONFIG_MAGIC_SYSRQ */
10327  
10328  #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
10329  /*
10330   * These functions are only useful for the IA64 MCA handling, or kdb.
10331   *
10332   * They can only be called when the whole system has been
10333   * stopped - every CPU needs to be quiescent, and no scheduling
10334   * activity can take place. Using them for anything else would
10335   * be a serious bug, and as a result, they aren't even visible
10336   * under any other configuration.
10337   */
10338  
10339  /**
10340   * curr_task - return the current task for a given CPU.
10341   * @cpu: the processor in question.
10342   *
10343   * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10344   *
10345   * Return: The current task for @cpu.
10346   */
curr_task(int cpu)10347  struct task_struct *curr_task(int cpu)
10348  {
10349  	return cpu_curr(cpu);
10350  }
10351  
10352  #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
10353  
10354  #ifdef CONFIG_IA64
10355  /**
10356   * ia64_set_curr_task - set the current task for a given CPU.
10357   * @cpu: the processor in question.
10358   * @p: the task pointer to set.
10359   *
10360   * Description: This function must only be used when non-maskable interrupts
10361   * are serviced on a separate stack. It allows the architecture to switch the
10362   * notion of the current task on a CPU in a non-blocking manner. This function
10363   * must be called with all CPU's synchronized, and interrupts disabled, the
10364   * and caller must save the original value of the current task (see
10365   * curr_task() above) and restore that value before reenabling interrupts and
10366   * re-starting the system.
10367   *
10368   * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10369   */
ia64_set_curr_task(int cpu,struct task_struct * p)10370  void ia64_set_curr_task(int cpu, struct task_struct *p)
10371  {
10372  	cpu_curr(cpu) = p;
10373  }
10374  
10375  #endif
10376  
10377  #ifdef CONFIG_CGROUP_SCHED
10378  /* task_group_lock serializes the addition/removal of task groups */
10379  static DEFINE_SPINLOCK(task_group_lock);
10380  
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)10381  static inline void alloc_uclamp_sched_group(struct task_group *tg,
10382  					    struct task_group *parent)
10383  {
10384  #ifdef CONFIG_UCLAMP_TASK_GROUP
10385  	enum uclamp_id clamp_id;
10386  
10387  	for_each_clamp_id(clamp_id) {
10388  		uclamp_se_set(&tg->uclamp_req[clamp_id],
10389  			      uclamp_none(clamp_id), false);
10390  		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
10391  	}
10392  #endif
10393  }
10394  
sched_free_group(struct task_group * tg)10395  static void sched_free_group(struct task_group *tg)
10396  {
10397  	free_fair_sched_group(tg);
10398  	free_rt_sched_group(tg);
10399  	autogroup_free(tg);
10400  	kmem_cache_free(task_group_cache, tg);
10401  }
10402  
sched_free_group_rcu(struct rcu_head * rcu)10403  static void sched_free_group_rcu(struct rcu_head *rcu)
10404  {
10405  	sched_free_group(container_of(rcu, struct task_group, rcu));
10406  }
10407  
sched_unregister_group(struct task_group * tg)10408  static void sched_unregister_group(struct task_group *tg)
10409  {
10410  	unregister_fair_sched_group(tg);
10411  	unregister_rt_sched_group(tg);
10412  	/*
10413  	 * We have to wait for yet another RCU grace period to expire, as
10414  	 * print_cfs_stats() might run concurrently.
10415  	 */
10416  	call_rcu(&tg->rcu, sched_free_group_rcu);
10417  }
10418  
10419  /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)10420  struct task_group *sched_create_group(struct task_group *parent)
10421  {
10422  	struct task_group *tg;
10423  
10424  	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
10425  	if (!tg)
10426  		return ERR_PTR(-ENOMEM);
10427  
10428  	if (!alloc_fair_sched_group(tg, parent))
10429  		goto err;
10430  
10431  	if (!alloc_rt_sched_group(tg, parent))
10432  		goto err;
10433  
10434  	alloc_uclamp_sched_group(tg, parent);
10435  
10436  	return tg;
10437  
10438  err:
10439  	sched_free_group(tg);
10440  	return ERR_PTR(-ENOMEM);
10441  }
10442  
sched_online_group(struct task_group * tg,struct task_group * parent)10443  void sched_online_group(struct task_group *tg, struct task_group *parent)
10444  {
10445  	unsigned long flags;
10446  
10447  	spin_lock_irqsave(&task_group_lock, flags);
10448  	list_add_rcu(&tg->list, &task_groups);
10449  
10450  	/* Root should already exist: */
10451  	WARN_ON(!parent);
10452  
10453  	tg->parent = parent;
10454  	INIT_LIST_HEAD(&tg->children);
10455  	list_add_rcu(&tg->siblings, &parent->children);
10456  	spin_unlock_irqrestore(&task_group_lock, flags);
10457  
10458  	online_fair_sched_group(tg);
10459  }
10460  
10461  /* rcu callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)10462  static void sched_unregister_group_rcu(struct rcu_head *rhp)
10463  {
10464  	/* Now it should be safe to free those cfs_rqs: */
10465  	sched_unregister_group(container_of(rhp, struct task_group, rcu));
10466  }
10467  
sched_destroy_group(struct task_group * tg)10468  void sched_destroy_group(struct task_group *tg)
10469  {
10470  	/* Wait for possible concurrent references to cfs_rqs complete: */
10471  	call_rcu(&tg->rcu, sched_unregister_group_rcu);
10472  }
10473  
sched_release_group(struct task_group * tg)10474  void sched_release_group(struct task_group *tg)
10475  {
10476  	unsigned long flags;
10477  
10478  	/*
10479  	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
10480  	 * sched_cfs_period_timer()).
10481  	 *
10482  	 * For this to be effective, we have to wait for all pending users of
10483  	 * this task group to leave their RCU critical section to ensure no new
10484  	 * user will see our dying task group any more. Specifically ensure
10485  	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10486  	 *
10487  	 * We therefore defer calling unregister_fair_sched_group() to
10488  	 * sched_unregister_group() which is guarantied to get called only after the
10489  	 * current RCU grace period has expired.
10490  	 */
10491  	spin_lock_irqsave(&task_group_lock, flags);
10492  	list_del_rcu(&tg->list);
10493  	list_del_rcu(&tg->siblings);
10494  	spin_unlock_irqrestore(&task_group_lock, flags);
10495  }
10496  
sched_change_group(struct task_struct * tsk)10497  static void sched_change_group(struct task_struct *tsk)
10498  {
10499  	struct task_group *tg;
10500  
10501  	/*
10502  	 * All callers are synchronized by task_rq_lock(); we do not use RCU
10503  	 * which is pointless here. Thus, we pass "true" to task_css_check()
10504  	 * to prevent lockdep warnings.
10505  	 */
10506  	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
10507  			  struct task_group, css);
10508  	tg = autogroup_task_group(tsk, tg);
10509  	tsk->sched_task_group = tg;
10510  
10511  #ifdef CONFIG_FAIR_GROUP_SCHED
10512  	if (tsk->sched_class->task_change_group)
10513  		tsk->sched_class->task_change_group(tsk);
10514  	else
10515  #endif
10516  		set_task_rq(tsk, task_cpu(tsk));
10517  }
10518  
10519  /*
10520   * Change task's runqueue when it moves between groups.
10521   *
10522   * The caller of this function should have put the task in its new group by
10523   * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10524   * its new group.
10525   */
sched_move_task(struct task_struct * tsk)10526  void sched_move_task(struct task_struct *tsk)
10527  {
10528  	int queued, running, queue_flags =
10529  		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
10530  	struct rq_flags rf;
10531  	struct rq *rq;
10532  
10533  	rq = task_rq_lock(tsk, &rf);
10534  	update_rq_clock(rq);
10535  
10536  	running = task_current(rq, tsk);
10537  	queued = task_on_rq_queued(tsk);
10538  
10539  	if (queued)
10540  		dequeue_task(rq, tsk, queue_flags);
10541  	if (running)
10542  		put_prev_task(rq, tsk);
10543  
10544  	sched_change_group(tsk);
10545  
10546  	if (queued)
10547  		enqueue_task(rq, tsk, queue_flags);
10548  	if (running) {
10549  		set_next_task(rq, tsk);
10550  		/*
10551  		 * After changing group, the running task may have joined a
10552  		 * throttled one but it's still the running task. Trigger a
10553  		 * resched to make sure that task can still run.
10554  		 */
10555  		resched_curr(rq);
10556  	}
10557  
10558  	task_rq_unlock(rq, tsk, &rf);
10559  }
10560  
css_tg(struct cgroup_subsys_state * css)10561  static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
10562  {
10563  	return css ? container_of(css, struct task_group, css) : NULL;
10564  }
10565  
10566  static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)10567  cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10568  {
10569  	struct task_group *parent = css_tg(parent_css);
10570  	struct task_group *tg;
10571  
10572  	if (!parent) {
10573  		/* This is early initialization for the top cgroup */
10574  		return &root_task_group.css;
10575  	}
10576  
10577  	tg = sched_create_group(parent);
10578  	if (IS_ERR(tg))
10579  		return ERR_PTR(-ENOMEM);
10580  
10581  	return &tg->css;
10582  }
10583  
10584  /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)10585  static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
10586  {
10587  	struct task_group *tg = css_tg(css);
10588  	struct task_group *parent = css_tg(css->parent);
10589  
10590  	if (parent)
10591  		sched_online_group(tg, parent);
10592  
10593  #ifdef CONFIG_UCLAMP_TASK_GROUP
10594  	/* Propagate the effective uclamp value for the new group */
10595  	mutex_lock(&uclamp_mutex);
10596  	rcu_read_lock();
10597  	cpu_util_update_eff(css);
10598  	rcu_read_unlock();
10599  	mutex_unlock(&uclamp_mutex);
10600  #endif
10601  
10602  	return 0;
10603  }
10604  
cpu_cgroup_css_released(struct cgroup_subsys_state * css)10605  static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
10606  {
10607  	struct task_group *tg = css_tg(css);
10608  
10609  	sched_release_group(tg);
10610  }
10611  
cpu_cgroup_css_free(struct cgroup_subsys_state * css)10612  static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
10613  {
10614  	struct task_group *tg = css_tg(css);
10615  
10616  	/*
10617  	 * Relies on the RCU grace period between css_released() and this.
10618  	 */
10619  	sched_unregister_group(tg);
10620  }
10621  
10622  #ifdef CONFIG_RT_GROUP_SCHED
cpu_cgroup_can_attach(struct cgroup_taskset * tset)10623  static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
10624  {
10625  	struct task_struct *task;
10626  	struct cgroup_subsys_state *css;
10627  
10628  	cgroup_taskset_for_each(task, css, tset) {
10629  		if (!sched_rt_can_attach(css_tg(css), task))
10630  			return -EINVAL;
10631  	}
10632  	return 0;
10633  }
10634  #endif
10635  
cpu_cgroup_attach(struct cgroup_taskset * tset)10636  static void cpu_cgroup_attach(struct cgroup_taskset *tset)
10637  {
10638  	struct task_struct *task;
10639  	struct cgroup_subsys_state *css;
10640  
10641  	cgroup_taskset_for_each(task, css, tset)
10642  		sched_move_task(task);
10643  }
10644  
10645  #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)10646  static void cpu_util_update_eff(struct cgroup_subsys_state *css)
10647  {
10648  	struct cgroup_subsys_state *top_css = css;
10649  	struct uclamp_se *uc_parent = NULL;
10650  	struct uclamp_se *uc_se = NULL;
10651  	unsigned int eff[UCLAMP_CNT];
10652  	enum uclamp_id clamp_id;
10653  	unsigned int clamps;
10654  
10655  	lockdep_assert_held(&uclamp_mutex);
10656  	SCHED_WARN_ON(!rcu_read_lock_held());
10657  
10658  	css_for_each_descendant_pre(css, top_css) {
10659  		uc_parent = css_tg(css)->parent
10660  			? css_tg(css)->parent->uclamp : NULL;
10661  
10662  		for_each_clamp_id(clamp_id) {
10663  			/* Assume effective clamps matches requested clamps */
10664  			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
10665  			/* Cap effective clamps with parent's effective clamps */
10666  			if (uc_parent &&
10667  			    eff[clamp_id] > uc_parent[clamp_id].value) {
10668  				eff[clamp_id] = uc_parent[clamp_id].value;
10669  			}
10670  		}
10671  		/* Ensure protection is always capped by limit */
10672  		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
10673  
10674  		/* Propagate most restrictive effective clamps */
10675  		clamps = 0x0;
10676  		uc_se = css_tg(css)->uclamp;
10677  		for_each_clamp_id(clamp_id) {
10678  			if (eff[clamp_id] == uc_se[clamp_id].value)
10679  				continue;
10680  			uc_se[clamp_id].value = eff[clamp_id];
10681  			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
10682  			clamps |= (0x1 << clamp_id);
10683  		}
10684  		if (!clamps) {
10685  			css = css_rightmost_descendant(css);
10686  			continue;
10687  		}
10688  
10689  		/* Immediately update descendants RUNNABLE tasks */
10690  		uclamp_update_active_tasks(css);
10691  	}
10692  }
10693  
10694  /*
10695   * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
10696   * C expression. Since there is no way to convert a macro argument (N) into a
10697   * character constant, use two levels of macros.
10698   */
10699  #define _POW10(exp) ((unsigned int)1e##exp)
10700  #define POW10(exp) _POW10(exp)
10701  
10702  struct uclamp_request {
10703  #define UCLAMP_PERCENT_SHIFT	2
10704  #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
10705  	s64 percent;
10706  	u64 util;
10707  	int ret;
10708  };
10709  
10710  static inline struct uclamp_request
capacity_from_percent(char * buf)10711  capacity_from_percent(char *buf)
10712  {
10713  	struct uclamp_request req = {
10714  		.percent = UCLAMP_PERCENT_SCALE,
10715  		.util = SCHED_CAPACITY_SCALE,
10716  		.ret = 0,
10717  	};
10718  
10719  	buf = strim(buf);
10720  	if (strcmp(buf, "max")) {
10721  		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
10722  					     &req.percent);
10723  		if (req.ret)
10724  			return req;
10725  		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
10726  			req.ret = -ERANGE;
10727  			return req;
10728  		}
10729  
10730  		req.util = req.percent << SCHED_CAPACITY_SHIFT;
10731  		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
10732  	}
10733  
10734  	return req;
10735  }
10736  
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)10737  static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
10738  				size_t nbytes, loff_t off,
10739  				enum uclamp_id clamp_id)
10740  {
10741  	struct uclamp_request req;
10742  	struct task_group *tg;
10743  
10744  	req = capacity_from_percent(buf);
10745  	if (req.ret)
10746  		return req.ret;
10747  
10748  	static_branch_enable(&sched_uclamp_used);
10749  
10750  	mutex_lock(&uclamp_mutex);
10751  	rcu_read_lock();
10752  
10753  	tg = css_tg(of_css(of));
10754  	if (tg->uclamp_req[clamp_id].value != req.util)
10755  		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
10756  
10757  	/*
10758  	 * Because of not recoverable conversion rounding we keep track of the
10759  	 * exact requested value
10760  	 */
10761  	tg->uclamp_pct[clamp_id] = req.percent;
10762  
10763  	/* Update effective clamps to track the most restrictive value */
10764  	cpu_util_update_eff(of_css(of));
10765  
10766  	rcu_read_unlock();
10767  	mutex_unlock(&uclamp_mutex);
10768  
10769  	return nbytes;
10770  }
10771  
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10772  static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
10773  				    char *buf, size_t nbytes,
10774  				    loff_t off)
10775  {
10776  	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
10777  }
10778  
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10779  static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
10780  				    char *buf, size_t nbytes,
10781  				    loff_t off)
10782  {
10783  	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
10784  }
10785  
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)10786  static inline void cpu_uclamp_print(struct seq_file *sf,
10787  				    enum uclamp_id clamp_id)
10788  {
10789  	struct task_group *tg;
10790  	u64 util_clamp;
10791  	u64 percent;
10792  	u32 rem;
10793  
10794  	rcu_read_lock();
10795  	tg = css_tg(seq_css(sf));
10796  	util_clamp = tg->uclamp_req[clamp_id].value;
10797  	rcu_read_unlock();
10798  
10799  	if (util_clamp == SCHED_CAPACITY_SCALE) {
10800  		seq_puts(sf, "max\n");
10801  		return;
10802  	}
10803  
10804  	percent = tg->uclamp_pct[clamp_id];
10805  	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
10806  	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
10807  }
10808  
cpu_uclamp_min_show(struct seq_file * sf,void * v)10809  static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
10810  {
10811  	cpu_uclamp_print(sf, UCLAMP_MIN);
10812  	return 0;
10813  }
10814  
cpu_uclamp_max_show(struct seq_file * sf,void * v)10815  static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
10816  {
10817  	cpu_uclamp_print(sf, UCLAMP_MAX);
10818  	return 0;
10819  }
10820  #endif /* CONFIG_UCLAMP_TASK_GROUP */
10821  
10822  #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)10823  static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
10824  				struct cftype *cftype, u64 shareval)
10825  {
10826  	if (shareval > scale_load_down(ULONG_MAX))
10827  		shareval = MAX_SHARES;
10828  	return sched_group_set_shares(css_tg(css), scale_load(shareval));
10829  }
10830  
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10831  static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
10832  			       struct cftype *cft)
10833  {
10834  	struct task_group *tg = css_tg(css);
10835  
10836  	return (u64) scale_load_down(tg->shares);
10837  }
10838  
10839  #ifdef CONFIG_CFS_BANDWIDTH
10840  static DEFINE_MUTEX(cfs_constraints_mutex);
10841  
10842  const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
10843  static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
10844  /* More than 203 days if BW_SHIFT equals 20. */
10845  static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
10846  
10847  static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10848  
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)10849  static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
10850  				u64 burst)
10851  {
10852  	int i, ret = 0, runtime_enabled, runtime_was_enabled;
10853  	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10854  
10855  	if (tg == &root_task_group)
10856  		return -EINVAL;
10857  
10858  	/*
10859  	 * Ensure we have at some amount of bandwidth every period.  This is
10860  	 * to prevent reaching a state of large arrears when throttled via
10861  	 * entity_tick() resulting in prolonged exit starvation.
10862  	 */
10863  	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
10864  		return -EINVAL;
10865  
10866  	/*
10867  	 * Likewise, bound things on the other side by preventing insane quota
10868  	 * periods.  This also allows us to normalize in computing quota
10869  	 * feasibility.
10870  	 */
10871  	if (period > max_cfs_quota_period)
10872  		return -EINVAL;
10873  
10874  	/*
10875  	 * Bound quota to defend quota against overflow during bandwidth shift.
10876  	 */
10877  	if (quota != RUNTIME_INF && quota > max_cfs_runtime)
10878  		return -EINVAL;
10879  
10880  	if (quota != RUNTIME_INF && (burst > quota ||
10881  				     burst + quota > max_cfs_runtime))
10882  		return -EINVAL;
10883  
10884  	/*
10885  	 * Prevent race between setting of cfs_rq->runtime_enabled and
10886  	 * unthrottle_offline_cfs_rqs().
10887  	 */
10888  	guard(cpus_read_lock)();
10889  	guard(mutex)(&cfs_constraints_mutex);
10890  
10891  	ret = __cfs_schedulable(tg, period, quota);
10892  	if (ret)
10893  		return ret;
10894  
10895  	runtime_enabled = quota != RUNTIME_INF;
10896  	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
10897  	/*
10898  	 * If we need to toggle cfs_bandwidth_used, off->on must occur
10899  	 * before making related changes, and on->off must occur afterwards
10900  	 */
10901  	if (runtime_enabled && !runtime_was_enabled)
10902  		cfs_bandwidth_usage_inc();
10903  
10904  	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
10905  		cfs_b->period = ns_to_ktime(period);
10906  		cfs_b->quota = quota;
10907  		cfs_b->burst = burst;
10908  
10909  		__refill_cfs_bandwidth_runtime(cfs_b);
10910  
10911  		/*
10912  		 * Restart the period timer (if active) to handle new
10913  		 * period expiry:
10914  		 */
10915  		if (runtime_enabled)
10916  			start_cfs_bandwidth(cfs_b);
10917  	}
10918  
10919  	for_each_online_cpu(i) {
10920  		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
10921  		struct rq *rq = cfs_rq->rq;
10922  
10923  		guard(rq_lock_irq)(rq);
10924  		cfs_rq->runtime_enabled = runtime_enabled;
10925  		cfs_rq->runtime_remaining = 0;
10926  
10927  		if (cfs_rq->throttled)
10928  			unthrottle_cfs_rq(cfs_rq);
10929  	}
10930  
10931  	if (runtime_was_enabled && !runtime_enabled)
10932  		cfs_bandwidth_usage_dec();
10933  
10934  	return 0;
10935  }
10936  
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)10937  static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
10938  {
10939  	u64 quota, period, burst;
10940  
10941  	period = ktime_to_ns(tg->cfs_bandwidth.period);
10942  	burst = tg->cfs_bandwidth.burst;
10943  	if (cfs_quota_us < 0)
10944  		quota = RUNTIME_INF;
10945  	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
10946  		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
10947  	else
10948  		return -EINVAL;
10949  
10950  	return tg_set_cfs_bandwidth(tg, period, quota, burst);
10951  }
10952  
tg_get_cfs_quota(struct task_group * tg)10953  static long tg_get_cfs_quota(struct task_group *tg)
10954  {
10955  	u64 quota_us;
10956  
10957  	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
10958  		return -1;
10959  
10960  	quota_us = tg->cfs_bandwidth.quota;
10961  	do_div(quota_us, NSEC_PER_USEC);
10962  
10963  	return quota_us;
10964  }
10965  
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)10966  static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
10967  {
10968  	u64 quota, period, burst;
10969  
10970  	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
10971  		return -EINVAL;
10972  
10973  	period = (u64)cfs_period_us * NSEC_PER_USEC;
10974  	quota = tg->cfs_bandwidth.quota;
10975  	burst = tg->cfs_bandwidth.burst;
10976  
10977  	return tg_set_cfs_bandwidth(tg, period, quota, burst);
10978  }
10979  
tg_get_cfs_period(struct task_group * tg)10980  static long tg_get_cfs_period(struct task_group *tg)
10981  {
10982  	u64 cfs_period_us;
10983  
10984  	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
10985  	do_div(cfs_period_us, NSEC_PER_USEC);
10986  
10987  	return cfs_period_us;
10988  }
10989  
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)10990  static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
10991  {
10992  	u64 quota, period, burst;
10993  
10994  	if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
10995  		return -EINVAL;
10996  
10997  	burst = (u64)cfs_burst_us * NSEC_PER_USEC;
10998  	period = ktime_to_ns(tg->cfs_bandwidth.period);
10999  	quota = tg->cfs_bandwidth.quota;
11000  
11001  	return tg_set_cfs_bandwidth(tg, period, quota, burst);
11002  }
11003  
tg_get_cfs_burst(struct task_group * tg)11004  static long tg_get_cfs_burst(struct task_group *tg)
11005  {
11006  	u64 burst_us;
11007  
11008  	burst_us = tg->cfs_bandwidth.burst;
11009  	do_div(burst_us, NSEC_PER_USEC);
11010  
11011  	return burst_us;
11012  }
11013  
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)11014  static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
11015  				  struct cftype *cft)
11016  {
11017  	return tg_get_cfs_quota(css_tg(css));
11018  }
11019  
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)11020  static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
11021  				   struct cftype *cftype, s64 cfs_quota_us)
11022  {
11023  	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
11024  }
11025  
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)11026  static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
11027  				   struct cftype *cft)
11028  {
11029  	return tg_get_cfs_period(css_tg(css));
11030  }
11031  
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)11032  static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
11033  				    struct cftype *cftype, u64 cfs_period_us)
11034  {
11035  	return tg_set_cfs_period(css_tg(css), cfs_period_us);
11036  }
11037  
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)11038  static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
11039  				  struct cftype *cft)
11040  {
11041  	return tg_get_cfs_burst(css_tg(css));
11042  }
11043  
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)11044  static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
11045  				   struct cftype *cftype, u64 cfs_burst_us)
11046  {
11047  	return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
11048  }
11049  
11050  struct cfs_schedulable_data {
11051  	struct task_group *tg;
11052  	u64 period, quota;
11053  };
11054  
11055  /*
11056   * normalize group quota/period to be quota/max_period
11057   * note: units are usecs
11058   */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)11059  static u64 normalize_cfs_quota(struct task_group *tg,
11060  			       struct cfs_schedulable_data *d)
11061  {
11062  	u64 quota, period;
11063  
11064  	if (tg == d->tg) {
11065  		period = d->period;
11066  		quota = d->quota;
11067  	} else {
11068  		period = tg_get_cfs_period(tg);
11069  		quota = tg_get_cfs_quota(tg);
11070  	}
11071  
11072  	/* note: these should typically be equivalent */
11073  	if (quota == RUNTIME_INF || quota == -1)
11074  		return RUNTIME_INF;
11075  
11076  	return to_ratio(period, quota);
11077  }
11078  
tg_cfs_schedulable_down(struct task_group * tg,void * data)11079  static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
11080  {
11081  	struct cfs_schedulable_data *d = data;
11082  	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11083  	s64 quota = 0, parent_quota = -1;
11084  
11085  	if (!tg->parent) {
11086  		quota = RUNTIME_INF;
11087  	} else {
11088  		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
11089  
11090  		quota = normalize_cfs_quota(tg, d);
11091  		parent_quota = parent_b->hierarchical_quota;
11092  
11093  		/*
11094  		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
11095  		 * always take the non-RUNTIME_INF min.  On cgroup1, only
11096  		 * inherit when no limit is set. In both cases this is used
11097  		 * by the scheduler to determine if a given CFS task has a
11098  		 * bandwidth constraint at some higher level.
11099  		 */
11100  		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
11101  			if (quota == RUNTIME_INF)
11102  				quota = parent_quota;
11103  			else if (parent_quota != RUNTIME_INF)
11104  				quota = min(quota, parent_quota);
11105  		} else {
11106  			if (quota == RUNTIME_INF)
11107  				quota = parent_quota;
11108  			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
11109  				return -EINVAL;
11110  		}
11111  	}
11112  	cfs_b->hierarchical_quota = quota;
11113  
11114  	return 0;
11115  }
11116  
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)11117  static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
11118  {
11119  	int ret;
11120  	struct cfs_schedulable_data data = {
11121  		.tg = tg,
11122  		.period = period,
11123  		.quota = quota,
11124  	};
11125  
11126  	if (quota != RUNTIME_INF) {
11127  		do_div(data.period, NSEC_PER_USEC);
11128  		do_div(data.quota, NSEC_PER_USEC);
11129  	}
11130  
11131  	rcu_read_lock();
11132  	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
11133  	rcu_read_unlock();
11134  
11135  	return ret;
11136  }
11137  
cpu_cfs_stat_show(struct seq_file * sf,void * v)11138  static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
11139  {
11140  	struct task_group *tg = css_tg(seq_css(sf));
11141  	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11142  
11143  	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
11144  	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
11145  	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
11146  
11147  	if (schedstat_enabled() && tg != &root_task_group) {
11148  		struct sched_statistics *stats;
11149  		u64 ws = 0;
11150  		int i;
11151  
11152  		for_each_possible_cpu(i) {
11153  			stats = __schedstats_from_se(tg->se[i]);
11154  			ws += schedstat_val(stats->wait_sum);
11155  		}
11156  
11157  		seq_printf(sf, "wait_sum %llu\n", ws);
11158  	}
11159  
11160  	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
11161  	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
11162  
11163  	return 0;
11164  }
11165  
throttled_time_self(struct task_group * tg)11166  static u64 throttled_time_self(struct task_group *tg)
11167  {
11168  	int i;
11169  	u64 total = 0;
11170  
11171  	for_each_possible_cpu(i) {
11172  		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
11173  	}
11174  
11175  	return total;
11176  }
11177  
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)11178  static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
11179  {
11180  	struct task_group *tg = css_tg(seq_css(sf));
11181  
11182  	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
11183  
11184  	return 0;
11185  }
11186  #endif /* CONFIG_CFS_BANDWIDTH */
11187  #endif /* CONFIG_FAIR_GROUP_SCHED */
11188  
11189  #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)11190  static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
11191  				struct cftype *cft, s64 val)
11192  {
11193  	return sched_group_set_rt_runtime(css_tg(css), val);
11194  }
11195  
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)11196  static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
11197  			       struct cftype *cft)
11198  {
11199  	return sched_group_rt_runtime(css_tg(css));
11200  }
11201  
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)11202  static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
11203  				    struct cftype *cftype, u64 rt_period_us)
11204  {
11205  	return sched_group_set_rt_period(css_tg(css), rt_period_us);
11206  }
11207  
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)11208  static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
11209  				   struct cftype *cft)
11210  {
11211  	return sched_group_rt_period(css_tg(css));
11212  }
11213  #endif /* CONFIG_RT_GROUP_SCHED */
11214  
11215  #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)11216  static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
11217  			       struct cftype *cft)
11218  {
11219  	return css_tg(css)->idle;
11220  }
11221  
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)11222  static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
11223  				struct cftype *cft, s64 idle)
11224  {
11225  	return sched_group_set_idle(css_tg(css), idle);
11226  }
11227  #endif
11228  
11229  static struct cftype cpu_legacy_files[] = {
11230  #ifdef CONFIG_FAIR_GROUP_SCHED
11231  	{
11232  		.name = "shares",
11233  		.read_u64 = cpu_shares_read_u64,
11234  		.write_u64 = cpu_shares_write_u64,
11235  	},
11236  	{
11237  		.name = "idle",
11238  		.read_s64 = cpu_idle_read_s64,
11239  		.write_s64 = cpu_idle_write_s64,
11240  	},
11241  #endif
11242  #ifdef CONFIG_CFS_BANDWIDTH
11243  	{
11244  		.name = "cfs_quota_us",
11245  		.read_s64 = cpu_cfs_quota_read_s64,
11246  		.write_s64 = cpu_cfs_quota_write_s64,
11247  	},
11248  	{
11249  		.name = "cfs_period_us",
11250  		.read_u64 = cpu_cfs_period_read_u64,
11251  		.write_u64 = cpu_cfs_period_write_u64,
11252  	},
11253  	{
11254  		.name = "cfs_burst_us",
11255  		.read_u64 = cpu_cfs_burst_read_u64,
11256  		.write_u64 = cpu_cfs_burst_write_u64,
11257  	},
11258  	{
11259  		.name = "stat",
11260  		.seq_show = cpu_cfs_stat_show,
11261  	},
11262  	{
11263  		.name = "stat.local",
11264  		.seq_show = cpu_cfs_local_stat_show,
11265  	},
11266  #endif
11267  #ifdef CONFIG_RT_GROUP_SCHED
11268  	{
11269  		.name = "rt_runtime_us",
11270  		.read_s64 = cpu_rt_runtime_read,
11271  		.write_s64 = cpu_rt_runtime_write,
11272  	},
11273  	{
11274  		.name = "rt_period_us",
11275  		.read_u64 = cpu_rt_period_read_uint,
11276  		.write_u64 = cpu_rt_period_write_uint,
11277  	},
11278  #endif
11279  #ifdef CONFIG_UCLAMP_TASK_GROUP
11280  	{
11281  		.name = "uclamp.min",
11282  		.flags = CFTYPE_NOT_ON_ROOT,
11283  		.seq_show = cpu_uclamp_min_show,
11284  		.write = cpu_uclamp_min_write,
11285  	},
11286  	{
11287  		.name = "uclamp.max",
11288  		.flags = CFTYPE_NOT_ON_ROOT,
11289  		.seq_show = cpu_uclamp_max_show,
11290  		.write = cpu_uclamp_max_write,
11291  	},
11292  #endif
11293  	{ }	/* Terminate */
11294  };
11295  
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)11296  static int cpu_extra_stat_show(struct seq_file *sf,
11297  			       struct cgroup_subsys_state *css)
11298  {
11299  #ifdef CONFIG_CFS_BANDWIDTH
11300  	{
11301  		struct task_group *tg = css_tg(css);
11302  		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11303  		u64 throttled_usec, burst_usec;
11304  
11305  		throttled_usec = cfs_b->throttled_time;
11306  		do_div(throttled_usec, NSEC_PER_USEC);
11307  		burst_usec = cfs_b->burst_time;
11308  		do_div(burst_usec, NSEC_PER_USEC);
11309  
11310  		seq_printf(sf, "nr_periods %d\n"
11311  			   "nr_throttled %d\n"
11312  			   "throttled_usec %llu\n"
11313  			   "nr_bursts %d\n"
11314  			   "burst_usec %llu\n",
11315  			   cfs_b->nr_periods, cfs_b->nr_throttled,
11316  			   throttled_usec, cfs_b->nr_burst, burst_usec);
11317  	}
11318  #endif
11319  	return 0;
11320  }
11321  
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)11322  static int cpu_local_stat_show(struct seq_file *sf,
11323  			       struct cgroup_subsys_state *css)
11324  {
11325  #ifdef CONFIG_CFS_BANDWIDTH
11326  	{
11327  		struct task_group *tg = css_tg(css);
11328  		u64 throttled_self_usec;
11329  
11330  		throttled_self_usec = throttled_time_self(tg);
11331  		do_div(throttled_self_usec, NSEC_PER_USEC);
11332  
11333  		seq_printf(sf, "throttled_usec %llu\n",
11334  			   throttled_self_usec);
11335  	}
11336  #endif
11337  	return 0;
11338  }
11339  
11340  #ifdef CONFIG_FAIR_GROUP_SCHED
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)11341  static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
11342  			       struct cftype *cft)
11343  {
11344  	struct task_group *tg = css_tg(css);
11345  	u64 weight = scale_load_down(tg->shares);
11346  
11347  	return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
11348  }
11349  
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 weight)11350  static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
11351  				struct cftype *cft, u64 weight)
11352  {
11353  	/*
11354  	 * cgroup weight knobs should use the common MIN, DFL and MAX
11355  	 * values which are 1, 100 and 10000 respectively.  While it loses
11356  	 * a bit of range on both ends, it maps pretty well onto the shares
11357  	 * value used by scheduler and the round-trip conversions preserve
11358  	 * the original value over the entire range.
11359  	 */
11360  	if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
11361  		return -ERANGE;
11362  
11363  	weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
11364  
11365  	return sched_group_set_shares(css_tg(css), scale_load(weight));
11366  }
11367  
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)11368  static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
11369  				    struct cftype *cft)
11370  {
11371  	unsigned long weight = scale_load_down(css_tg(css)->shares);
11372  	int last_delta = INT_MAX;
11373  	int prio, delta;
11374  
11375  	/* find the closest nice value to the current weight */
11376  	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
11377  		delta = abs(sched_prio_to_weight[prio] - weight);
11378  		if (delta >= last_delta)
11379  			break;
11380  		last_delta = delta;
11381  	}
11382  
11383  	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
11384  }
11385  
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)11386  static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
11387  				     struct cftype *cft, s64 nice)
11388  {
11389  	unsigned long weight;
11390  	int idx;
11391  
11392  	if (nice < MIN_NICE || nice > MAX_NICE)
11393  		return -ERANGE;
11394  
11395  	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
11396  	idx = array_index_nospec(idx, 40);
11397  	weight = sched_prio_to_weight[idx];
11398  
11399  	return sched_group_set_shares(css_tg(css), scale_load(weight));
11400  }
11401  #endif
11402  
cpu_period_quota_print(struct seq_file * sf,long period,long quota)11403  static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
11404  						  long period, long quota)
11405  {
11406  	if (quota < 0)
11407  		seq_puts(sf, "max");
11408  	else
11409  		seq_printf(sf, "%ld", quota);
11410  
11411  	seq_printf(sf, " %ld\n", period);
11412  }
11413  
11414  /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)11415  static int __maybe_unused cpu_period_quota_parse(char *buf,
11416  						 u64 *periodp, u64 *quotap)
11417  {
11418  	char tok[21];	/* U64_MAX */
11419  
11420  	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
11421  		return -EINVAL;
11422  
11423  	*periodp *= NSEC_PER_USEC;
11424  
11425  	if (sscanf(tok, "%llu", quotap))
11426  		*quotap *= NSEC_PER_USEC;
11427  	else if (!strcmp(tok, "max"))
11428  		*quotap = RUNTIME_INF;
11429  	else
11430  		return -EINVAL;
11431  
11432  	return 0;
11433  }
11434  
11435  #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)11436  static int cpu_max_show(struct seq_file *sf, void *v)
11437  {
11438  	struct task_group *tg = css_tg(seq_css(sf));
11439  
11440  	cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
11441  	return 0;
11442  }
11443  
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)11444  static ssize_t cpu_max_write(struct kernfs_open_file *of,
11445  			     char *buf, size_t nbytes, loff_t off)
11446  {
11447  	struct task_group *tg = css_tg(of_css(of));
11448  	u64 period = tg_get_cfs_period(tg);
11449  	u64 burst = tg->cfs_bandwidth.burst;
11450  	u64 quota;
11451  	int ret;
11452  
11453  	ret = cpu_period_quota_parse(buf, &period, &quota);
11454  	if (!ret)
11455  		ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
11456  	return ret ?: nbytes;
11457  }
11458  #endif
11459  
11460  static struct cftype cpu_files[] = {
11461  #ifdef CONFIG_FAIR_GROUP_SCHED
11462  	{
11463  		.name = "weight",
11464  		.flags = CFTYPE_NOT_ON_ROOT,
11465  		.read_u64 = cpu_weight_read_u64,
11466  		.write_u64 = cpu_weight_write_u64,
11467  	},
11468  	{
11469  		.name = "weight.nice",
11470  		.flags = CFTYPE_NOT_ON_ROOT,
11471  		.read_s64 = cpu_weight_nice_read_s64,
11472  		.write_s64 = cpu_weight_nice_write_s64,
11473  	},
11474  	{
11475  		.name = "idle",
11476  		.flags = CFTYPE_NOT_ON_ROOT,
11477  		.read_s64 = cpu_idle_read_s64,
11478  		.write_s64 = cpu_idle_write_s64,
11479  	},
11480  #endif
11481  #ifdef CONFIG_CFS_BANDWIDTH
11482  	{
11483  		.name = "max",
11484  		.flags = CFTYPE_NOT_ON_ROOT,
11485  		.seq_show = cpu_max_show,
11486  		.write = cpu_max_write,
11487  	},
11488  	{
11489  		.name = "max.burst",
11490  		.flags = CFTYPE_NOT_ON_ROOT,
11491  		.read_u64 = cpu_cfs_burst_read_u64,
11492  		.write_u64 = cpu_cfs_burst_write_u64,
11493  	},
11494  #endif
11495  #ifdef CONFIG_UCLAMP_TASK_GROUP
11496  	{
11497  		.name = "uclamp.min",
11498  		.flags = CFTYPE_NOT_ON_ROOT,
11499  		.seq_show = cpu_uclamp_min_show,
11500  		.write = cpu_uclamp_min_write,
11501  	},
11502  	{
11503  		.name = "uclamp.max",
11504  		.flags = CFTYPE_NOT_ON_ROOT,
11505  		.seq_show = cpu_uclamp_max_show,
11506  		.write = cpu_uclamp_max_write,
11507  	},
11508  #endif
11509  	{ }	/* terminate */
11510  };
11511  
11512  struct cgroup_subsys cpu_cgrp_subsys = {
11513  	.css_alloc	= cpu_cgroup_css_alloc,
11514  	.css_online	= cpu_cgroup_css_online,
11515  	.css_released	= cpu_cgroup_css_released,
11516  	.css_free	= cpu_cgroup_css_free,
11517  	.css_extra_stat_show = cpu_extra_stat_show,
11518  	.css_local_stat_show = cpu_local_stat_show,
11519  #ifdef CONFIG_RT_GROUP_SCHED
11520  	.can_attach	= cpu_cgroup_can_attach,
11521  #endif
11522  	.attach		= cpu_cgroup_attach,
11523  	.legacy_cftypes	= cpu_legacy_files,
11524  	.dfl_cftypes	= cpu_files,
11525  	.early_init	= true,
11526  	.threaded	= true,
11527  };
11528  
11529  #endif	/* CONFIG_CGROUP_SCHED */
11530  
dump_cpu_task(int cpu)11531  void dump_cpu_task(int cpu)
11532  {
11533  	if (cpu == smp_processor_id() && in_hardirq()) {
11534  		struct pt_regs *regs;
11535  
11536  		regs = get_irq_regs();
11537  		if (regs) {
11538  			show_regs(regs);
11539  			return;
11540  		}
11541  	}
11542  
11543  	if (trigger_single_cpu_backtrace(cpu))
11544  		return;
11545  
11546  	pr_info("Task dump for CPU %d:\n", cpu);
11547  	sched_show_task(cpu_curr(cpu));
11548  }
11549  
11550  /*
11551   * Nice levels are multiplicative, with a gentle 10% change for every
11552   * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11553   * nice 1, it will get ~10% less CPU time than another CPU-bound task
11554   * that remained on nice 0.
11555   *
11556   * The "10% effect" is relative and cumulative: from _any_ nice level,
11557   * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11558   * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
11559   * If a task goes up by ~10% and another task goes down by ~10% then
11560   * the relative distance between them is ~25%.)
11561   */
11562  const int sched_prio_to_weight[40] = {
11563   /* -20 */     88761,     71755,     56483,     46273,     36291,
11564   /* -15 */     29154,     23254,     18705,     14949,     11916,
11565   /* -10 */      9548,      7620,      6100,      4904,      3906,
11566   /*  -5 */      3121,      2501,      1991,      1586,      1277,
11567   /*   0 */      1024,       820,       655,       526,       423,
11568   /*   5 */       335,       272,       215,       172,       137,
11569   /*  10 */       110,        87,        70,        56,        45,
11570   /*  15 */        36,        29,        23,        18,        15,
11571  };
11572  
11573  /*
11574   * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
11575   *
11576   * In cases where the weight does not change often, we can use the
11577   * precalculated inverse to speed up arithmetics by turning divisions
11578   * into multiplications:
11579   */
11580  const u32 sched_prio_to_wmult[40] = {
11581   /* -20 */     48388,     59856,     76040,     92818,    118348,
11582   /* -15 */    147320,    184698,    229616,    287308,    360437,
11583   /* -10 */    449829,    563644,    704093,    875809,   1099582,
11584   /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
11585   /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
11586   /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
11587   /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
11588   /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
11589  };
11590  
call_trace_sched_update_nr_running(struct rq * rq,int count)11591  void call_trace_sched_update_nr_running(struct rq *rq, int count)
11592  {
11593          trace_sched_update_nr_running_tp(rq, count);
11594  }
11595  
11596  #ifdef CONFIG_SCHED_MM_CID
11597  
11598  /*
11599   * @cid_lock: Guarantee forward-progress of cid allocation.
11600   *
11601   * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
11602   * is only used when contention is detected by the lock-free allocation so
11603   * forward progress can be guaranteed.
11604   */
11605  DEFINE_RAW_SPINLOCK(cid_lock);
11606  
11607  /*
11608   * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
11609   *
11610   * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
11611   * detected, it is set to 1 to ensure that all newly coming allocations are
11612   * serialized by @cid_lock until the allocation which detected contention
11613   * completes and sets @use_cid_lock back to 0. This guarantees forward progress
11614   * of a cid allocation.
11615   */
11616  int use_cid_lock;
11617  
11618  /*
11619   * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
11620   * concurrently with respect to the execution of the source runqueue context
11621   * switch.
11622   *
11623   * There is one basic properties we want to guarantee here:
11624   *
11625   * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
11626   * used by a task. That would lead to concurrent allocation of the cid and
11627   * userspace corruption.
11628   *
11629   * Provide this guarantee by introducing a Dekker memory ordering to guarantee
11630   * that a pair of loads observe at least one of a pair of stores, which can be
11631   * shown as:
11632   *
11633   *      X = Y = 0
11634   *
11635   *      w[X]=1          w[Y]=1
11636   *      MB              MB
11637   *      r[Y]=y          r[X]=x
11638   *
11639   * Which guarantees that x==0 && y==0 is impossible. But rather than using
11640   * values 0 and 1, this algorithm cares about specific state transitions of the
11641   * runqueue current task (as updated by the scheduler context switch), and the
11642   * per-mm/cpu cid value.
11643   *
11644   * Let's introduce task (Y) which has task->mm == mm and task (N) which has
11645   * task->mm != mm for the rest of the discussion. There are two scheduler state
11646   * transitions on context switch we care about:
11647   *
11648   * (TSA) Store to rq->curr with transition from (N) to (Y)
11649   *
11650   * (TSB) Store to rq->curr with transition from (Y) to (N)
11651   *
11652   * On the remote-clear side, there is one transition we care about:
11653   *
11654   * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
11655   *
11656   * There is also a transition to UNSET state which can be performed from all
11657   * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
11658   * guarantees that only a single thread will succeed:
11659   *
11660   * (TMB) cmpxchg to *pcpu_cid to mark UNSET
11661   *
11662   * Just to be clear, what we do _not_ want to happen is a transition to UNSET
11663   * when a thread is actively using the cid (property (1)).
11664   *
11665   * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
11666   *
11667   * Scenario A) (TSA)+(TMA) (from next task perspective)
11668   *
11669   * CPU0                                      CPU1
11670   *
11671   * Context switch CS-1                       Remote-clear
11672   *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
11673   *                                             (implied barrier after cmpxchg)
11674   *   - switch_mm_cid()
11675   *     - memory barrier (see switch_mm_cid()
11676   *       comment explaining how this barrier
11677   *       is combined with other scheduler
11678   *       barriers)
11679   *     - mm_cid_get (next)
11680   *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
11681   *
11682   * This Dekker ensures that either task (Y) is observed by the
11683   * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
11684   * observed.
11685   *
11686   * If task (Y) store is observed by rcu_dereference(), it means that there is
11687   * still an active task on the cpu. Remote-clear will therefore not transition
11688   * to UNSET, which fulfills property (1).
11689   *
11690   * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
11691   * it will move its state to UNSET, which clears the percpu cid perhaps
11692   * uselessly (which is not an issue for correctness). Because task (Y) is not
11693   * observed, CPU1 can move ahead to set the state to UNSET. Because moving
11694   * state to UNSET is done with a cmpxchg expecting that the old state has the
11695   * LAZY flag set, only one thread will successfully UNSET.
11696   *
11697   * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
11698   * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
11699   * CPU1 will observe task (Y) and do nothing more, which is fine.
11700   *
11701   * What we are effectively preventing with this Dekker is a scenario where
11702   * neither LAZY flag nor store (Y) are observed, which would fail property (1)
11703   * because this would UNSET a cid which is actively used.
11704   */
11705  
sched_mm_cid_migrate_from(struct task_struct * t)11706  void sched_mm_cid_migrate_from(struct task_struct *t)
11707  {
11708  	t->migrate_from_cpu = task_cpu(t);
11709  }
11710  
11711  static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)11712  int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
11713  					  struct task_struct *t,
11714  					  struct mm_cid *src_pcpu_cid)
11715  {
11716  	struct mm_struct *mm = t->mm;
11717  	struct task_struct *src_task;
11718  	int src_cid, last_mm_cid;
11719  
11720  	if (!mm)
11721  		return -1;
11722  
11723  	last_mm_cid = t->last_mm_cid;
11724  	/*
11725  	 * If the migrated task has no last cid, or if the current
11726  	 * task on src rq uses the cid, it means the source cid does not need
11727  	 * to be moved to the destination cpu.
11728  	 */
11729  	if (last_mm_cid == -1)
11730  		return -1;
11731  	src_cid = READ_ONCE(src_pcpu_cid->cid);
11732  	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
11733  		return -1;
11734  
11735  	/*
11736  	 * If we observe an active task using the mm on this rq, it means we
11737  	 * are not the last task to be migrated from this cpu for this mm, so
11738  	 * there is no need to move src_cid to the destination cpu.
11739  	 */
11740  	rcu_read_lock();
11741  	src_task = rcu_dereference(src_rq->curr);
11742  	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11743  		rcu_read_unlock();
11744  		t->last_mm_cid = -1;
11745  		return -1;
11746  	}
11747  	rcu_read_unlock();
11748  
11749  	return src_cid;
11750  }
11751  
11752  static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)11753  int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
11754  					      struct task_struct *t,
11755  					      struct mm_cid *src_pcpu_cid,
11756  					      int src_cid)
11757  {
11758  	struct task_struct *src_task;
11759  	struct mm_struct *mm = t->mm;
11760  	int lazy_cid;
11761  
11762  	if (src_cid == -1)
11763  		return -1;
11764  
11765  	/*
11766  	 * Attempt to clear the source cpu cid to move it to the destination
11767  	 * cpu.
11768  	 */
11769  	lazy_cid = mm_cid_set_lazy_put(src_cid);
11770  	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
11771  		return -1;
11772  
11773  	/*
11774  	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11775  	 * rq->curr->mm matches the scheduler barrier in context_switch()
11776  	 * between store to rq->curr and load of prev and next task's
11777  	 * per-mm/cpu cid.
11778  	 *
11779  	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11780  	 * rq->curr->mm_cid_active matches the barrier in
11781  	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
11782  	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11783  	 * load of per-mm/cpu cid.
11784  	 */
11785  
11786  	/*
11787  	 * If we observe an active task using the mm on this rq after setting
11788  	 * the lazy-put flag, this task will be responsible for transitioning
11789  	 * from lazy-put flag set to MM_CID_UNSET.
11790  	 */
11791  	rcu_read_lock();
11792  	src_task = rcu_dereference(src_rq->curr);
11793  	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11794  		rcu_read_unlock();
11795  		/*
11796  		 * We observed an active task for this mm, there is therefore
11797  		 * no point in moving this cid to the destination cpu.
11798  		 */
11799  		t->last_mm_cid = -1;
11800  		return -1;
11801  	}
11802  	rcu_read_unlock();
11803  
11804  	/*
11805  	 * The src_cid is unused, so it can be unset.
11806  	 */
11807  	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11808  		return -1;
11809  	return src_cid;
11810  }
11811  
11812  /*
11813   * Migration to dst cpu. Called with dst_rq lock held.
11814   * Interrupts are disabled, which keeps the window of cid ownership without the
11815   * source rq lock held small.
11816   */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)11817  void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
11818  {
11819  	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
11820  	struct mm_struct *mm = t->mm;
11821  	int src_cid, dst_cid, src_cpu;
11822  	struct rq *src_rq;
11823  
11824  	lockdep_assert_rq_held(dst_rq);
11825  
11826  	if (!mm)
11827  		return;
11828  	src_cpu = t->migrate_from_cpu;
11829  	if (src_cpu == -1) {
11830  		t->last_mm_cid = -1;
11831  		return;
11832  	}
11833  	/*
11834  	 * Move the src cid if the dst cid is unset. This keeps id
11835  	 * allocation closest to 0 in cases where few threads migrate around
11836  	 * many cpus.
11837  	 *
11838  	 * If destination cid is already set, we may have to just clear
11839  	 * the src cid to ensure compactness in frequent migrations
11840  	 * scenarios.
11841  	 *
11842  	 * It is not useful to clear the src cid when the number of threads is
11843  	 * greater or equal to the number of allowed cpus, because user-space
11844  	 * can expect that the number of allowed cids can reach the number of
11845  	 * allowed cpus.
11846  	 */
11847  	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
11848  	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
11849  	if (!mm_cid_is_unset(dst_cid) &&
11850  	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
11851  		return;
11852  	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
11853  	src_rq = cpu_rq(src_cpu);
11854  	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
11855  	if (src_cid == -1)
11856  		return;
11857  	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
11858  							    src_cid);
11859  	if (src_cid == -1)
11860  		return;
11861  	if (!mm_cid_is_unset(dst_cid)) {
11862  		__mm_cid_put(mm, src_cid);
11863  		return;
11864  	}
11865  	/* Move src_cid to dst cpu. */
11866  	mm_cid_snapshot_time(dst_rq, mm);
11867  	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
11868  }
11869  
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)11870  static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
11871  				      int cpu)
11872  {
11873  	struct rq *rq = cpu_rq(cpu);
11874  	struct task_struct *t;
11875  	unsigned long flags;
11876  	int cid, lazy_cid;
11877  
11878  	cid = READ_ONCE(pcpu_cid->cid);
11879  	if (!mm_cid_is_valid(cid))
11880  		return;
11881  
11882  	/*
11883  	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
11884  	 * there happens to be other tasks left on the source cpu using this
11885  	 * mm, the next task using this mm will reallocate its cid on context
11886  	 * switch.
11887  	 */
11888  	lazy_cid = mm_cid_set_lazy_put(cid);
11889  	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
11890  		return;
11891  
11892  	/*
11893  	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11894  	 * rq->curr->mm matches the scheduler barrier in context_switch()
11895  	 * between store to rq->curr and load of prev and next task's
11896  	 * per-mm/cpu cid.
11897  	 *
11898  	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11899  	 * rq->curr->mm_cid_active matches the barrier in
11900  	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
11901  	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11902  	 * load of per-mm/cpu cid.
11903  	 */
11904  
11905  	/*
11906  	 * If we observe an active task using the mm on this rq after setting
11907  	 * the lazy-put flag, that task will be responsible for transitioning
11908  	 * from lazy-put flag set to MM_CID_UNSET.
11909  	 */
11910  	rcu_read_lock();
11911  	t = rcu_dereference(rq->curr);
11912  	if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
11913  		rcu_read_unlock();
11914  		return;
11915  	}
11916  	rcu_read_unlock();
11917  
11918  	/*
11919  	 * The cid is unused, so it can be unset.
11920  	 * Disable interrupts to keep the window of cid ownership without rq
11921  	 * lock small.
11922  	 */
11923  	local_irq_save(flags);
11924  	if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11925  		__mm_cid_put(mm, cid);
11926  	local_irq_restore(flags);
11927  }
11928  
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)11929  static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
11930  {
11931  	struct rq *rq = cpu_rq(cpu);
11932  	struct mm_cid *pcpu_cid;
11933  	struct task_struct *curr;
11934  	u64 rq_clock;
11935  
11936  	/*
11937  	 * rq->clock load is racy on 32-bit but one spurious clear once in a
11938  	 * while is irrelevant.
11939  	 */
11940  	rq_clock = READ_ONCE(rq->clock);
11941  	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11942  
11943  	/*
11944  	 * In order to take care of infrequently scheduled tasks, bump the time
11945  	 * snapshot associated with this cid if an active task using the mm is
11946  	 * observed on this rq.
11947  	 */
11948  	rcu_read_lock();
11949  	curr = rcu_dereference(rq->curr);
11950  	if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
11951  		WRITE_ONCE(pcpu_cid->time, rq_clock);
11952  		rcu_read_unlock();
11953  		return;
11954  	}
11955  	rcu_read_unlock();
11956  
11957  	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
11958  		return;
11959  	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
11960  }
11961  
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)11962  static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
11963  					     int weight)
11964  {
11965  	struct mm_cid *pcpu_cid;
11966  	int cid;
11967  
11968  	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11969  	cid = READ_ONCE(pcpu_cid->cid);
11970  	if (!mm_cid_is_valid(cid) || cid < weight)
11971  		return;
11972  	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
11973  }
11974  
task_mm_cid_work(struct callback_head * work)11975  static void task_mm_cid_work(struct callback_head *work)
11976  {
11977  	unsigned long now = jiffies, old_scan, next_scan;
11978  	struct task_struct *t = current;
11979  	struct cpumask *cidmask;
11980  	struct mm_struct *mm;
11981  	int weight, cpu;
11982  
11983  	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
11984  
11985  	work->next = work;	/* Prevent double-add */
11986  	if (t->flags & PF_EXITING)
11987  		return;
11988  	mm = t->mm;
11989  	if (!mm)
11990  		return;
11991  	old_scan = READ_ONCE(mm->mm_cid_next_scan);
11992  	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
11993  	if (!old_scan) {
11994  		unsigned long res;
11995  
11996  		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
11997  		if (res != old_scan)
11998  			old_scan = res;
11999  		else
12000  			old_scan = next_scan;
12001  	}
12002  	if (time_before(now, old_scan))
12003  		return;
12004  	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
12005  		return;
12006  	cidmask = mm_cidmask(mm);
12007  	/* Clear cids that were not recently used. */
12008  	for_each_possible_cpu(cpu)
12009  		sched_mm_cid_remote_clear_old(mm, cpu);
12010  	weight = cpumask_weight(cidmask);
12011  	/*
12012  	 * Clear cids that are greater or equal to the cidmask weight to
12013  	 * recompact it.
12014  	 */
12015  	for_each_possible_cpu(cpu)
12016  		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
12017  }
12018  
init_sched_mm_cid(struct task_struct * t)12019  void init_sched_mm_cid(struct task_struct *t)
12020  {
12021  	struct mm_struct *mm = t->mm;
12022  	int mm_users = 0;
12023  
12024  	if (mm) {
12025  		mm_users = atomic_read(&mm->mm_users);
12026  		if (mm_users == 1)
12027  			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
12028  	}
12029  	t->cid_work.next = &t->cid_work;	/* Protect against double add */
12030  	init_task_work(&t->cid_work, task_mm_cid_work);
12031  }
12032  
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)12033  void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
12034  {
12035  	struct callback_head *work = &curr->cid_work;
12036  	unsigned long now = jiffies;
12037  
12038  	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
12039  	    work->next != work)
12040  		return;
12041  	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
12042  		return;
12043  
12044  	/* No page allocation under rq lock */
12045  	task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
12046  }
12047  
sched_mm_cid_exit_signals(struct task_struct * t)12048  void sched_mm_cid_exit_signals(struct task_struct *t)
12049  {
12050  	struct mm_struct *mm = t->mm;
12051  	struct rq_flags rf;
12052  	struct rq *rq;
12053  
12054  	if (!mm)
12055  		return;
12056  
12057  	preempt_disable();
12058  	rq = this_rq();
12059  	rq_lock_irqsave(rq, &rf);
12060  	preempt_enable_no_resched();	/* holding spinlock */
12061  	WRITE_ONCE(t->mm_cid_active, 0);
12062  	/*
12063  	 * Store t->mm_cid_active before loading per-mm/cpu cid.
12064  	 * Matches barrier in sched_mm_cid_remote_clear_old().
12065  	 */
12066  	smp_mb();
12067  	mm_cid_put(mm);
12068  	t->last_mm_cid = t->mm_cid = -1;
12069  	rq_unlock_irqrestore(rq, &rf);
12070  }
12071  
sched_mm_cid_before_execve(struct task_struct * t)12072  void sched_mm_cid_before_execve(struct task_struct *t)
12073  {
12074  	struct mm_struct *mm = t->mm;
12075  	struct rq_flags rf;
12076  	struct rq *rq;
12077  
12078  	if (!mm)
12079  		return;
12080  
12081  	preempt_disable();
12082  	rq = this_rq();
12083  	rq_lock_irqsave(rq, &rf);
12084  	preempt_enable_no_resched();	/* holding spinlock */
12085  	WRITE_ONCE(t->mm_cid_active, 0);
12086  	/*
12087  	 * Store t->mm_cid_active before loading per-mm/cpu cid.
12088  	 * Matches barrier in sched_mm_cid_remote_clear_old().
12089  	 */
12090  	smp_mb();
12091  	mm_cid_put(mm);
12092  	t->last_mm_cid = t->mm_cid = -1;
12093  	rq_unlock_irqrestore(rq, &rf);
12094  }
12095  
sched_mm_cid_after_execve(struct task_struct * t)12096  void sched_mm_cid_after_execve(struct task_struct *t)
12097  {
12098  	struct mm_struct *mm = t->mm;
12099  	struct rq_flags rf;
12100  	struct rq *rq;
12101  
12102  	if (!mm)
12103  		return;
12104  
12105  	preempt_disable();
12106  	rq = this_rq();
12107  	rq_lock_irqsave(rq, &rf);
12108  	preempt_enable_no_resched();	/* holding spinlock */
12109  	WRITE_ONCE(t->mm_cid_active, 1);
12110  	/*
12111  	 * Store t->mm_cid_active before loading per-mm/cpu cid.
12112  	 * Matches barrier in sched_mm_cid_remote_clear_old().
12113  	 */
12114  	smp_mb();
12115  	t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
12116  	rq_unlock_irqrestore(rq, &rf);
12117  	rseq_set_notify_resume(t);
12118  }
12119  
sched_mm_cid_fork(struct task_struct * t)12120  void sched_mm_cid_fork(struct task_struct *t)
12121  {
12122  	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
12123  	t->mm_cid_active = 1;
12124  }
12125  #endif
12126