Lines Matching +full:powered +full:- +full:remotely

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1991-2002 Linus Torvalds
70 # include <linux/entry-common.h>
96 #include "../../io_uring/io-wq.h"
161 if (p->sched_class == &stop_sched_class) /* trumps deadline */
162 return -2;
164 if (rt_prio(p->prio)) /* includes deadline */
165 return p->prio; /* [-1, 99] */
167 if (p->sched_class == &idle_sched_class)
187 if (-pa < -pb)
190 if (-pb < -pa)
193 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
194 return !dl_time_before(a->dl.deadline, b->dl.deadline);
205 if (a->core_cookie < b->core_cookie)
208 if (a->core_cookie > b->core_cookie)
212 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
230 if (cookie < p->core_cookie)
231 return -1;
233 if (cookie > p->core_cookie)
241 rq->core->core_task_seq++;
243 if (!p->core_cookie)
246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
251 rq->core->core_task_seq++;
254 rb_erase(&p->core_node, &rq->core_tree);
255 RB_CLEAR_NODE(&p->core_node);
261 * and re-examine whether the core is still in forced idle state.
263 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
264 rq->core->core_forceidle_count && rq->curr == rq->idle)
270 if (p->sched_class->task_is_throttled)
271 return p->sched_class->task_is_throttled(p, cpu);
278 struct rb_node *node = &p->core_node;
287 if (p->core_cookie != cookie)
296 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
304 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
309 if (!sched_task_is_throttled(p, rq->cpu))
339 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
348 raw_spin_unlock(&cpu_rq(t)->__lock);
369 cpu_rq(t)->core_enabled = enabled;
371 cpu_rq(cpu)->core->core_forceidle_start = 0;
382 cpu_rq(cpu)->core_enabled = enabled;
392 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
447 if (!atomic_add_unless(&sched_core_count, -1, 1))
464 * p->pi_lock
465 * rq->lock
466 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
468 * rq1->lock
469 * rq2->lock where: rq1 < rq2
473 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
474 * local CPU's rq->lock, it optionally removes the task from the runqueue and
478 * Task enqueue is also under rq->lock, possibly taken from another CPU.
484 * complicated to avoid having to take two rq->locks.
488 * System-calls and anything external will use task_rq_lock() which acquires
489 * both p->pi_lock and rq->lock. As a consequence the state they change is
492 * - sched_setaffinity()/
493 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
494 * - set_user_nice(): p->se.load, p->*prio
495 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
496 * p->se.load, p->rt_priority,
497 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
498 * - sched_setnuma(): p->numa_preferred_nid
499 * - sched_move_task(): p->sched_task_group
500 * - uclamp_update_active() p->uclamp*
502 * p->state <- TASK_*:
506 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
509 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
512 * rq->lock. Non-zero indicates the task is runnable, the special
514 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
516 * p->on_cpu <- { 0, 1 }:
519 * set before p is scheduled-in and cleared after p is scheduled-out, both
520 * under rq->lock. Non-zero indicates the task is running on its CPU.
523 * CPU to have ->on_cpu = 1 at the same time. ]
527 * - Don't call set_task_cpu() on a blocked task:
532 * - for try_to_wake_up(), called under p->pi_lock:
534 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
536 * - for migration called under rq->lock:
542 * - for migration called under double_rq_lock():
558 raw_spin_lock_nested(&rq->__lock, subclass);
584 ret = raw_spin_trylock(&rq->__lock);
607 * double_rq_lock - safely lock two runqueues
625 * __task_rq_lock - lock the rq @p resides on.
628 __acquires(rq->lock)
632 lockdep_assert_held(&p->pi_lock);
649 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
652 __acquires(p->pi_lock)
653 __acquires(rq->lock)
658 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
664 * ACQUIRE (rq->lock)
665 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
666 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
667 * [S] ->cpu = new_cpu [L] task_rq()
668 * [L] ->on_rq
669 * RELEASE (rq->lock)
672 * the old rq->lock will fully serialize against the stores.
683 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
691 * RQ-clock updating methods:
703 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
710 * When this happens, we stop ->clock_task and only update the
712 * update will consume the rest. This ensures ->clock_task is
715 * It does however cause some slight miss-attribution of {soft,}irq
717 * the current rq->clock timestamp, except that would require using
723 rq->prev_irq_time += irq_delta;
724 delta -= irq_delta;
725 delayacct_irq(rq->curr, irq_delta);
732 steal -= rq->prev_steal_time_rq;
737 rq->prev_steal_time_rq = prev_steal;
738 delta -= steal;
742 rq->clock_task += delta;
757 if (rq->clock_update_flags & RQCF_ACT_SKIP)
762 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
763 rq->clock_update_flags |= RQCF_UPDATED;
766 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
769 rq->clock += delta;
775 * Use HR-timers to deliver accurate preemption points.
780 if (hrtimer_active(&rq->hrtick_timer))
781 hrtimer_cancel(&rq->hrtick_timer);
785 * High-resolution timer tick.
797 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
807 struct hrtimer *timer = &rq->hrtick_timer;
808 ktime_t time = rq->hrtick_time;
829 * called with rq->lock held and irqs disabled
833 struct hrtimer *timer = &rq->hrtick_timer;
841 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
846 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
853 * called with rq->lock held and irqs disabled
862 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
871 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
873 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
874 rq->hrtick_timer.function = hrtick;
909 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
921 typeof(ti->flags) val = READ_ONCE(ti->flags);
928 if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
951 struct wake_q_node *node = &task->wake_q;
954 * Atomically grab the task, if ->wake_q is !nil already it means
962 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
968 *head->lastp = node;
969 head->lastp = &node->next;
974 * wake_q_add() - queue a wakeup for 'later' waking.
982 * This function must be used as-if it were wake_up_process(); IOW the task
992 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1000 * This function must be used as-if it were wake_up_process(); IOW the task
1003 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1016 struct wake_q_node *node = head->first;
1022 node = node->next;
1024 WRITE_ONCE(task->wake_q.next, NULL);
1025 /* Task can safely be re-inserted now. */
1037 * resched_curr - mark rq's current task 'to be rescheduled now'.
1040 * might also involve a cross-CPU call to trigger the scheduler on
1045 struct task_struct *curr = rq->curr;
1082 * from an idle CPU. This is good for power-savings.
1090 int i, cpu = smp_processor_id(), default_cpu = -1;
1114 if (default_cpu == -1)
1137 if (set_nr_and_not_polling(rq->idle))
1146 * We just need the target to call irq_exit() and re-evaluate
1186 rq->idle_balance = idle_cpu(cpu);
1187 if (rq->idle_balance) {
1188 rq->nohz_idle_balance = flags;
1198 if (rq->nr_running != 1)
1201 if (p->sched_class != &fair_sched_class)
1215 if (rq->dl.dl_nr_running)
1222 if (rq->rt.rr_nr_running) {
1223 if (rq->rt.rr_nr_running == 1)
1233 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1242 if (rq->nr_running > 1)
1250 * E.g. going from 2->1 without going through pick_next_task().
1252 if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
1253 if (cfs_task_bw_constrained(rq->curr))
1282 list_for_each_entry_rcu(child, &parent->children, siblings) {
1294 parent = parent->parent;
1309 int prio = p->static_prio - MAX_RT_PRIO;
1324 if (update_load && p->sched_class == &fair_sched_class)
1327 p->se.load = lw;
1334 * The (slow-path) user-space triggers utilization clamp value updates which
1335 * can require updates on (fast-path) scheduler's data structures used to
1337 * While the per-CPU rq lock protects fast-path update operations, user-space
1355 * used. In battery powered devices, particularly, running at the maximum
1359 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1397 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
1410 uc_se->value = value;
1411 uc_se->bucket_id = uclamp_bucket_id(value);
1412 uc_se->user_defined = user_defined;
1421 * idle (which drops the max-clamp) by retaining the last known
1422 * max-clamp.
1425 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1435 /* Reset max-clamp retention only on idle exit */
1436 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1446 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1447 int bucket_id = UCLAMP_BUCKETS - 1;
1453 for ( ; bucket_id >= 0; bucket_id--) {
1459 /* No tasks -- default clamp values */
1468 lockdep_assert_held(&p->pi_lock);
1470 uc_se = &p->uclamp_req[UCLAMP_MIN];
1473 if (uc_se->user_defined)
1488 /* Protect updates to p->uclamp_* */
1498 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1511 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1512 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1524 * - the task specific clamp value, when explicitly requested from userspace
1525 * - the task group effective clamp value, for tasks not either in the root
1527 * - the system default clamp value, defined by the sysadmin
1546 /* Task currently refcounted: use back-annotated (effective) value */
1547 if (p->uclamp[clamp_id].active)
1548 return (unsigned long)p->uclamp[clamp_id].value;
1560 * Tasks can have a task-specific value requested from user-space, track
1568 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1569 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1575 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1577 bucket = &uc_rq->bucket[uc_se->bucket_id];
1578 bucket->tasks++;
1579 uc_se->active = true;
1581 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1587 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1588 bucket->value = uc_se->value;
1590 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1591 uclamp_rq_set(rq, clamp_id, uc_se->value);
1606 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1607 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1618 * In this case the uc_se->active flag should be false since no uclamp
1629 * // Must not decrement bucket->tasks here
1633 * bucket[uc_se->bucket_id].
1637 if (unlikely(!uc_se->active))
1640 bucket = &uc_rq->bucket[uc_se->bucket_id];
1642 SCHED_WARN_ON(!bucket->tasks);
1643 if (likely(bucket->tasks))
1644 bucket->tasks--;
1646 uc_se->active = false;
1654 if (likely(bucket->tasks))
1662 SCHED_WARN_ON(bucket->value > rq_clamp);
1663 if (bucket->value >= rq_clamp) {
1664 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1682 if (unlikely(!p->sched_class->uclamp_enabled))
1689 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1690 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1706 if (unlikely(!p->sched_class->uclamp_enabled))
1716 if (!p->uclamp[clamp_id].active)
1726 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1727 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1782 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1784 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1845 result = -EINVAL;
1889 int util_min = p->uclamp_req[UCLAMP_MIN].value;
1890 int util_max = p->uclamp_req[UCLAMP_MAX].value;
1892 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1893 util_min = attr->sched_util_min;
1896 return -EINVAL;
1899 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1900 util_max = attr->sched_util_max;
1903 return -EINVAL;
1906 if (util_min != -1 && util_max != -1 && util_min > util_max)
1907 return -EINVAL;
1925 /* Reset on sched class change for a non user-defined clamp value. */
1926 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1927 !uc_se->user_defined)
1930 /* Reset on sched_util_{min,max} == -1. */
1932 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1933 attr->sched_util_min == -1) {
1938 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1939 attr->sched_util_max == -1) {
1952 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1971 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1974 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1975 attr->sched_util_min != -1) {
1976 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1977 attr->sched_util_min, true);
1980 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1981 attr->sched_util_max != -1) {
1982 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1983 attr->sched_util_max, true);
1992 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1996 p->uclamp[clamp_id].active = false;
1998 if (likely(!p->sched_reset_on_fork))
2002 uclamp_se_set(&p->uclamp_req[clamp_id],
2015 struct uclamp_rq *uc_rq = rq->uclamp;
2023 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2057 return -EOPNOTSUPP;
2080 raw_spin_lock_irq(&p->pi_lock);
2081 state = READ_ONCE(p->__state);
2083 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2085 raw_spin_unlock_irq(&p->pi_lock);
2101 p->sched_class->enqueue_task(rq, p, flags);
2121 p->sched_class->dequeue_task(rq, p, flags);
2133 p->on_rq = TASK_ON_RQ_QUEUED;
2138 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
2148 prio = MAX_DL_PRIO - 1;
2150 prio = MAX_RT_PRIO - 1 - rt_prio;
2159 * without taking RT-inheritance into account. Might be
2166 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
2174 * RT-boosted. If not then it returns p->normal_prio.
2178 p->normal_prio = normal_prio(p);
2184 if (!rt_prio(p->prio))
2185 return p->normal_prio;
2186 return p->prio;
2190 * task_curr - is this task currently executing on a CPU?
2201 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2211 if (prev_class != p->sched_class) {
2212 if (prev_class->switched_from)
2213 prev_class->switched_from(rq, p);
2215 p->sched_class->switched_to(rq, p);
2216 } else if (oldprio != p->prio || dl_task(p))
2217 p->sched_class->prio_changed(rq, p, oldprio);
2222 if (p->sched_class == rq->curr->sched_class)
2223 rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2224 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2231 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2238 if (READ_ONCE(p->__state) & state)
2242 if (READ_ONCE(p->saved_state) & state)
2243 return -1;
2258 raw_spin_lock_irq(&p->pi_lock);
2260 raw_spin_unlock_irq(&p->pi_lock);
2269 * wait_task_inactive - wait for a thread to unschedule.
2294 * any task-queue locks at all. We'll only try to get
2302 * still, just relax and busy-wait without holding
2329 * When matching on p->saved_state, consider this task
2334 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2362 * yield - it could be a while.
2394 .new_mask = cpumask_of(rq->cpu),
2398 if (likely(!p->migration_disabled))
2401 if (p->cpus_ptr != &p->cpus_mask)
2414 if (p->migration_disabled) {
2415 p->migration_disabled++;
2420 this_rq()->nr_pinned++;
2421 p->migration_disabled = 1;
2430 .new_mask = &p->cpus_mask,
2434 if (p->migration_disabled > 1) {
2435 p->migration_disabled--;
2439 if (WARN_ON_ONCE(!p->migration_disabled))
2447 if (p->cpus_ptr != &p->cpus_mask)
2455 p->migration_disabled = 0;
2456 this_rq()->nr_pinned--;
2463 return rq->nr_pinned;
2467 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2473 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2481 if (!(p->flags & PF_KTHREAD))
2511 * move_queued_task - move a queued task to new rq.
2574 * migration_cpu_stop - this will be executed by a highprio stopper thread
2581 struct set_affinity_pending *pending = arg->pending;
2582 struct task_struct *p = arg->task;
2599 raw_spin_lock(&p->pi_lock);
2603 * If we were passed a pending, then ->stop_pending was set, thus
2604 * p->migration_pending must have remained stable.
2606 WARN_ON_ONCE(pending && pending != p->migration_pending);
2610 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2611 * we're holding p->pi_lock.
2618 p->migration_pending = NULL;
2621 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2627 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2629 p->wake_cpu = arg->dest_cpu;
2651 * ->pi_lock, so the allowed mask is stable - if it got
2654 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2655 p->migration_pending = NULL;
2661 * When migrate_enable() hits a rq mis-match we can't reliably
2665 WARN_ON_ONCE(!pending->stop_pending);
2669 &pending->arg, &pending->stop_work);
2675 pending->stop_pending = false;
2679 complete_all(&pending->done);
2689 raw_spin_lock_irq(&p->pi_lock);
2696 p->migration_flags |= MDF_PUSH;
2700 p->migration_flags &= ~MDF_PUSH;
2702 if (p->sched_class->find_lock_rq)
2703 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2711 set_task_cpu(p, lowest_rq->cpu);
2719 rq->push_busy = false;
2721 raw_spin_unlock_irq(&p->pi_lock);
2733 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2734 p->cpus_ptr = ctx->new_mask;
2738 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2739 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2744 if (ctx->flags & SCA_USER)
2745 swap(p->user_cpus_ptr, ctx->user_mask);
2756 * supposed to change these variables while holding both rq->lock and
2757 * p->pi_lock.
2760 * accesses these variables under p->pi_lock and only does so after
2761 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2766 if (ctx->flags & SCA_MIGRATE_DISABLE)
2767 SCHED_WARN_ON(!p->on_cpu);
2769 lockdep_assert_held(&p->pi_lock);
2777 * holding rq->lock.
2785 p->sched_class->set_cpus_allowed(p, ctx);
2812 * Because this is called with p->pi_lock held, it is not possible
2836 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2839 dst->user_cpus_ptr = NULL;
2846 if (data_race(!src->user_cpus_ptr))
2851 return -ENOMEM;
2859 raw_spin_lock_irqsave(&src->pi_lock, flags);
2860 if (src->user_cpus_ptr) {
2861 swap(dst->user_cpus_ptr, user_mask);
2862 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2864 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2876 swap(p->user_cpus_ptr, user_mask);
2894 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2897 * Initial conditions: P0->cpus_mask = [0, 1]
2906 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2919 * `--> <woken on migration completion>
2921 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2923 * task p are serialized by p->pi_lock, which we can leverage: the one that
2924 * should come into effect at the end of the Migrate-Disable region is the last
2925 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2930 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2934 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2940 * Migrate-Disable. Consider:
2942 * Initial conditions: P0->cpus_mask = [0, 1]
2960 * p->migration_pending done with p->pi_lock held.
2964 __releases(rq->lock)
2965 __releases(p->pi_lock)
2971 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2975 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2976 rq->push_busy = true;
2984 pending = p->migration_pending;
2985 if (pending && !pending->stop_pending) {
2986 p->migration_pending = NULL;
2993 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2994 p, &rq->push_work);
2999 complete_all(&pending->done);
3005 /* serialized by p->pi_lock */
3006 if (!p->migration_pending) {
3016 p->migration_pending = &my_pending;
3018 pending = p->migration_pending;
3019 refcount_inc(&pending->refs);
3026 * Serialized by p->pi_lock, so this is safe.
3028 pending->arg.dest_cpu = dest_cpu;
3031 pending = p->migration_pending;
3033 * - !MIGRATE_ENABLE:
3036 * - MIGRATE_ENABLE:
3046 return -EINVAL;
3049 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3053 * and have the stopper function handle it all race-free.
3055 stop_pending = pending->stop_pending;
3057 pending->stop_pending = true;
3060 p->migration_flags &= ~MDF_PUSH;
3066 &pending->arg, &pending->stop_work);
3078 if (!pending->stop_pending) {
3079 p->migration_pending = NULL;
3086 complete_all(&pending->done);
3089 wait_for_completion(&pending->done);
3091 if (refcount_dec_and_test(&pending->refs))
3092 wake_up_var(&pending->refs); /* No UaF, just an address */
3107 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3113 __releases(rq->lock)
3114 __releases(p->pi_lock)
3118 bool kthread = p->flags & PF_KTHREAD;
3127 * however, during cpu-hot-unplug, even these might get pushed
3133 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3138 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3139 ret = -EINVAL;
3144 * Must re-check here, to close a race against __kthread_bind(),
3147 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3148 ret = -EINVAL;
3152 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3153 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3154 if (ctx->flags & SCA_USER)
3155 swap(p->user_cpus_ptr, ctx->user_mask);
3161 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3162 ret = -EBUSY;
3172 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3174 ret = -EINVAL;
3180 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3208 if (p->user_cpus_ptr &&
3209 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3210 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3211 ctx->new_mask = rq->scratch_mask;
3234 * -EINVAL.
3256 err = -EPERM;
3261 err = -EINVAL;
3274 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3307 task_pid_nr(p), p->comm,
3346 unsigned int state = READ_ONCE(p->__state);
3352 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3355 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3357 * time relying on p->on_rq.
3360 p->sched_class == &fair_sched_class &&
3361 (p->on_rq && !task_on_rq_migrating(p)));
3365 * The caller should hold either p->pi_lock or rq->lock, when changing
3366 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3374 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3388 if (p->sched_class->migrate_task_rq)
3389 p->sched_class->migrate_task_rq(p, new_cpu);
3390 p->se.nr_migrations++;
3426 p->wake_cpu = cpu;
3440 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3441 return -EAGAIN;
3443 src_rq = cpu_rq(arg->src_cpu);
3444 dst_rq = cpu_rq(arg->dst_cpu);
3446 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3449 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3450 return -EAGAIN;
3452 if (task_cpu(arg->src_task) != arg->src_cpu)
3453 return -EAGAIN;
3455 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3456 return -EAGAIN;
3458 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3459 return -EAGAIN;
3461 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3462 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3474 int ret = -EINVAL;
3488 * will be re-checked with proper locks held further down the line.
3493 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3496 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3508 * kick_process - kick a running thread to enter/exit the kernel
3509 * @p: the to-be-kicked thread
3512 * kernel-mode, without any delay. (to get signals handled.)
3533 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3537 * - cpu_active must be a subset of cpu_online
3539 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3544 * - on CPU-down we clear cpu_active() to mask the sched domains and
3563 * will return -1. There is no CPU on the node, and we should
3566 if (nid != -1) {
3578 for_each_cpu(dest_cpu, p->cpus_ptr) {
3596 * hold p->pi_lock and again violate locking order.
3616 if (p->mm && printk_ratelimit()) {
3618 task_pid_nr(p), p->comm, cpu);
3626 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3631 lockdep_assert_held(&p->pi_lock);
3633 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3634 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3636 cpu = cpumask_any(p->cpus_ptr);
3640 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3645 * [ this allows ->select_task() to simply return task_cpu(p) and
3657 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3658 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3666 * much confusion -- but then, stop work should not
3671 stop->sched_class = &stop_sched_class;
3674 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3680 * The stop task itself will never be part of the PI-chain, it
3681 * never blocks, therefore that ->pi_lock recursion is safe.
3682 * Tell lockdep about this by placing the stop->pi_lock in its
3685 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3688 cpu_rq(cpu)->stop = stop;
3695 old_stop->sched_class = &rt_sched_class;
3704 return set_cpus_allowed_ptr(p, ctx->new_mask);
3732 if (cpu == rq->cpu) {
3733 __schedstat_inc(rq->ttwu_local);
3734 __schedstat_inc(p->stats.nr_wakeups_local);
3738 __schedstat_inc(p->stats.nr_wakeups_remote);
3741 for_each_domain(rq->cpu, sd) {
3743 __schedstat_inc(sd->ttwu_wake_remote);
3750 __schedstat_inc(p->stats.nr_wakeups_migrate);
3753 __schedstat_inc(rq->ttwu_count);
3754 __schedstat_inc(p->stats.nr_wakeups);
3757 __schedstat_inc(p->stats.nr_wakeups_sync);
3765 WRITE_ONCE(p->__state, TASK_RUNNING);
3777 if (p->sched_contributes_to_load)
3778 rq->nr_uninterruptible--;
3785 if (p->in_iowait) {
3787 atomic_dec(&task_rq(p)->nr_iowait);
3796 if (p->sched_class->task_woken) {
3799 * drop the rq->lock, hereafter rq is only used for statistics.
3802 p->sched_class->task_woken(rq, p);
3806 if (rq->idle_stamp) {
3807 u64 delta = rq_clock(rq) - rq->idle_stamp;
3808 u64 max = 2*rq->max_idle_balance_cost;
3810 update_avg(&rq->avg_idle, delta);
3812 if (rq->avg_idle > max)
3813 rq->avg_idle = max;
3815 rq->wake_stamp = jiffies;
3816 rq->wake_avg_idle = rq->avg_idle / 2;
3818 rq->idle_stamp = 0;
3837 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3840 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3841 * then schedule() must still happen and p->state can be changed to
3887 if (WARN_ON_ONCE(p->on_cpu))
3888 smp_cond_load_acquire(&p->on_cpu, !VAL);
3893 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3898 * idle_cpu() does not observe a false-negative -- if it does,
3906 WRITE_ONCE(rq->ttwu_pending, 0);
3918 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3936 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3938 WRITE_ONCE(rq->ttwu_pending, 1);
3939 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3947 if (is_idle_task(rcu_dereference(rq->curr))) {
3949 if (is_idle_task(rq->curr))
3972 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3988 * the task activation to the idle (or soon-to-be-idle) CPU as
3992 * Note that we can only get here with (wakee) p->on_rq=0,
3993 * p->on_cpu can be whatever, we've done the dequeue, so
3994 * the wakee has been accounted out of ->nr_running.
3996 if (!cpu_rq(cpu)->nr_running)
4077 p->saved_state = TASK_RUNNING;
4083 * Notes on Program-Order guarantees on SMP systems.
4087 * The basic program-order guarantee on SMP systems is that when a task [t]
4088 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4093 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4094 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4095 * rq(c1)->lock (if not at the same time, then in that order).
4096 * C) LOCK of the rq(c1)->lock scheduling in task
4105 * LOCK rq(0)->lock
4106 * sched-out X
4107 * sched-in Y
4108 * UNLOCK rq(0)->lock
4110 * LOCK rq(0)->lock // orders against CPU0
4112 * UNLOCK rq(0)->lock
4114 * LOCK rq(1)->lock
4116 * UNLOCK rq(1)->lock
4118 * LOCK rq(1)->lock // orders against CPU2
4119 * sched-out Z
4120 * sched-in X
4121 * UNLOCK rq(1)->lock
4124 * BLOCKING -- aka. SLEEP + WAKEUP
4130 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4131 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4137 * LOCK rq(0)->lock LOCK X->pi_lock
4139 * sched-out X
4140 * smp_store_release(X->on_cpu, 0);
4142 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4143 * X->state = WAKING
4146 * LOCK rq(2)->lock
4148 * X->state = RUNNING
4149 * UNLOCK rq(2)->lock
4151 * LOCK rq(2)->lock // orders against CPU1
4152 * sched-out Z
4153 * sched-in X
4154 * UNLOCK rq(2)->lock
4156 * UNLOCK X->pi_lock
4157 * UNLOCK rq(0)->lock
4166 * try_to_wake_up - wake up a thread
4173 * If (@state & @p->state) @p->state = TASK_RUNNING.
4179 * It issues a full memory barrier before accessing @p->state, see the comment
4182 * Uses p->pi_lock to serialize against concurrent wake-ups.
4184 * Relies on p->pi_lock stabilizing:
4185 * - p->sched_class
4186 * - p->cpus_ptr
4187 * - p->sched_task_group
4190 * Tries really hard to only take one task_rq(p)->lock for performance.
4191 * Takes rq->lock in:
4192 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4193 * - ttwu_queue() -- new rq, for enqueue of the task;
4194 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4199 * Return: %true if @p->state changes (an actual wakeup was done),
4209 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4211 * case the whole 'p->on_rq && ttwu_runnable()' case below
4215 * - we rely on Program-Order guarantees for all the ordering,
4216 * - we're serialized against set_special_state() by virtue of
4217 * it disabling IRQs (this allows not taking ->pi_lock).
4230 * reordered with p->state check below. This pairs with smp_store_mb()
4233 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4241 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4242 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4246 * STORE p->on_rq = 1 LOAD p->state
4247 * UNLOCK rq->lock
4250 * LOCK rq->lock smp_rmb();
4252 * UNLOCK rq->lock
4255 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4257 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4263 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4268 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4269 * possible to, falsely, observe p->on_cpu == 0.
4271 * One must be running (->on_cpu == 1) in order to remove oneself
4275 * STORE p->on_cpu = 1 LOAD p->on_rq
4276 * UNLOCK rq->lock
4279 * LOCK rq->lock smp_rmb();
4281 * STORE p->on_rq = 0 LOAD p->on_cpu
4283 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4286 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4288 * care about it's own p->state. See the comment in __schedule().
4293 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4294 * == 0), which means we need to do an enqueue, change p->state to
4295 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4298 WRITE_ONCE(p->__state, TASK_WAKING);
4303 * which potentially sends an IPI instead of spinning on p->on_cpu to
4307 * Ensure we load task_cpu(p) after p->on_cpu:
4310 * STORE p->cpu = @cpu
4312 * LOCK rq->lock
4313 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4314 * STORE p->on_cpu = 1 LOAD p->cpu
4319 if (smp_load_acquire(&p->on_cpu) &&
4332 smp_cond_load_acquire(&p->on_cpu, !VAL);
4334 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4336 if (p->in_iowait) {
4338 atomic_dec(&task_rq(p)->nr_iowait);
4360 unsigned int state = READ_ONCE(p->__state);
4363 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4371 * Ensure we load p->on_rq after p->__state, otherwise it would be
4372 * possible to, falsely, observe p->on_rq == 0.
4377 if (p->on_rq)
4386 smp_cond_load_acquire(&p->on_cpu, !VAL);
4393 * task_call_func - Invoke a function on task in fixed state
4399 * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
4412 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4419 * - blocked and we're holding off wakeups (pi->lock)
4420 * - woken, and we're holding off enqueue (rq->lock)
4421 * - queued, and we're holding off schedule (rq->lock)
4422 * - running, and we're holding off de-schedule (rq->lock)
4424 * The called function (@func) can use: task_curr(), p->on_rq and
4425 * p->__state to differentiate between these states.
4432 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4437 * cpu_curr_snapshot - Return a snapshot of the currently running task
4469 * wake_up_process - Wake up a specific process
4499 p->on_rq = 0;
4501 p->se.on_rq = 0;
4502 p->se.exec_start = 0;
4503 p->se.sum_exec_runtime = 0;
4504 p->se.prev_sum_exec_runtime = 0;
4505 p->se.nr_migrations = 0;
4506 p->se.vruntime = 0;
4507 p->se.vlag = 0;
4508 p->se.slice = sysctl_sched_base_slice;
4509 INIT_LIST_HEAD(&p->se.group_node);
4512 p->se.cfs_rq = NULL;
4517 memset(&p->stats, 0, sizeof(p->stats));
4520 init_dl_entity(&p->dl);
4522 INIT_LIST_HEAD(&p->rt.run_list);
4523 p->rt.timeout = 0;
4524 p->rt.time_slice = sched_rr_timeslice;
4525 p->rt.on_rq = 0;
4526 p->rt.on_list = 0;
4529 INIT_HLIST_HEAD(&p->preempt_notifiers);
4533 p->capture_control = NULL;
4537 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4538 p->migration_pending = NULL;
4572 pgdat->nbp_threshold = 0;
4573 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4574 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4586 return -EPERM;
4655 return -EPERM;
4727 * fork()/clone()-time setup:
4737 p->__state = TASK_NEW;
4742 p->prio = current->normal_prio;
4749 if (unlikely(p->sched_reset_on_fork)) {
4751 p->policy = SCHED_NORMAL;
4752 p->static_prio = NICE_TO_PRIO(0);
4753 p->rt_priority = 0;
4754 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4755 p->static_prio = NICE_TO_PRIO(0);
4757 p->prio = p->normal_prio = p->static_prio;
4764 p->sched_reset_on_fork = 0;
4767 if (dl_prio(p->prio))
4768 return -EAGAIN;
4769 else if (rt_prio(p->prio))
4770 p->sched_class = &rt_sched_class;
4772 p->sched_class = &fair_sched_class;
4774 init_entity_runnable_average(&p->se);
4779 memset(&p->sched_info, 0, sizeof(p->sched_info));
4782 p->on_cpu = 0;
4786 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4787 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4797 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4800 raw_spin_lock_irqsave(&p->pi_lock, flags);
4804 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4807 p->sched_task_group = tg;
4816 if (p->sched_class->task_fork)
4817 p->sched_class->task_fork(p);
4818 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4843 * wake_up_new_task - wake up a newly created task for the first time.
4854 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4855 WRITE_ONCE(p->__state, TASK_RUNNING);
4859 * - cpus_ptr can change in the fork path
4860 * - any previously selected CPU might disappear through hotplug
4863 * as we're not fully set-up yet.
4865 p->recent_used_cpu = task_cpu(p);
4877 if (p->sched_class->task_woken) {
4879 * Nothing relies on rq->lock after this, so it's fine to
4883 p->sched_class->task_woken(rq, p);
4907 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4915 hlist_add_head(&notifier->link, &current->preempt_notifiers);
4920 * preempt_notifier_unregister - no longer interested in preemption notifications
4927 hlist_del(&notifier->link);
4935 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4936 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4951 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4952 notifier->ops->sched_out(notifier, next);
4984 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4987 WRITE_ONCE(next->on_cpu, 1);
4996 * p->on_cpu is cleared, the task can be moved to a different CPU. We
5000 * In particular, the load of prev->state in finish_task_switch() must
5005 smp_store_release(&prev->on_cpu, 0);
5019 func = (void (*)(struct rq *))head->func;
5020 next = head->next;
5021 head->next = NULL;
5035 * that queued it (only later, when it's safe to drop rq->lock again),
5039 * a single test, namely: rq->balance_callback == NULL.
5049 struct balance_callback *head = rq->balance_callback;
5058 * in the same rq->lock section.
5066 rq->balance_callback = NULL;
5115 * of the scheduler it's an obvious special-case), so we
5119 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5122 rq_lockp(rq)->owner = next;
5130 * fix up the runqueue lock - which gets 'carried over' from
5133 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5153 if (unlikely(current->kmap_ctrl.idx))
5161 if (unlikely(current->kmap_ctrl.idx))
5167 * prepare_task_switch - prepare to switch tasks
5194 * finish_task_switch - clean up after a task-switch
5200 * and do any other architecture-specific cleanup actions.
5213 __releases(rq->lock)
5216 struct mm_struct *mm = rq->prev_mm;
5226 * raw_spin_lock_irq(&rq->lock) // 2
5232 current->comm, current->pid, preempt_count()))
5235 rq->prev_mm = NULL;
5239 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5243 * We must observe prev->state before clearing prev->on_cpu (in
5245 * running on another CPU and we could rave with its RUNNING -> DEAD
5248 prev_state = READ_ONCE(prev->__state);
5270 * schedule between user->kernel->user threads without passing though
5272 * rq->curr, before returning to userspace, so provide them here:
5274 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5276 * - a sync_core for SYNC_CORE.
5284 if (prev->sched_class->task_dead)
5285 prev->sched_class->task_dead(prev);
5297 * schedule_tail - first thing a freshly forked thread must call.
5301 __releases(rq->lock)
5307 * finish_task_switch() will drop rq->lock() and lower preempt_count
5315 if (current->set_child_tid)
5316 put_user(task_pid_vnr(current), current->set_child_tid);
5322 * context_switch - switch to the new MM and the new thread's register state.
5338 * kernel -> kernel lazy + transfer active
5339 * user -> kernel lazy + mmgrab_lazy_tlb() active
5341 * kernel -> user switch + mmdrop_lazy_tlb() active
5342 * user -> user switch
5347 if (!next->mm) { // to kernel
5348 enter_lazy_tlb(prev->active_mm, next);
5350 next->active_mm = prev->active_mm;
5351 if (prev->mm) // from user
5352 mmgrab_lazy_tlb(prev->active_mm);
5354 prev->active_mm = NULL;
5356 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5359 * rq->curr / membarrier_switch_mm() and returning to userspace.
5362 * case 'prev->active_mm == next->mm' through
5365 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5366 lru_gen_use_mm(next->mm);
5368 if (!prev->mm) { // from kernel
5370 rq->prev_mm = prev->active_mm;
5371 prev->active_mm = NULL;
5398 sum += cpu_rq(i)->nr_running;
5407 * preemption, thus the result might have a time-of-check-to-time-of-use
5410 * - from a non-preemptible section (of course)
5412 * - from a thread that is bound to a single CPU
5414 * - in a loop with very short iterations (e.g. a polling loop)
5418 return raw_rq()->nr_running == 1;
5424 return cpu_rq(cpu)->nr_switches;
5433 sum += cpu_rq(i)->nr_switches;
5441 * for a CPU that has IO-wait which might not even end up running the task when
5447 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5451 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5453 * The idea behind IO-wait account is to account the idle time that we could
5455 * storage performance, we'd have a proportional reduction in IO-wait time.
5458 * idle time as IO-wait, because if the storage were faster, it could've been
5465 * CPU will have IO-wait accounted, while the other has regular idle. Even
5469 * This means, that when looking globally, the current IO-wait accounting on
5475 * blocked on. This means the per CPU IO-wait number is meaningless.
5493 * sched_exec - execve() is a valuable balancing opportunity, because at
5502 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5503 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5525 * and its field curr->exec_start; when called from task_sched_runtime(),
5532 struct sched_entity *curr = (&p->se)->cfs_rq->curr;
5534 struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
5537 prefetch(&curr->exec_start);
5553 * 64-bit doesn't need locks to atomically read a 64-bit value.
5555 * Reading ->on_cpu is racy, but this is ok.
5560 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5563 if (!p->on_cpu || !task_on_rq_queued(p))
5564 return p->se.sum_exec_runtime;
5569 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5576 p->sched_class->update_curr(rq);
5578 ns = p->se.sum_exec_runtime;
5600 if (!rq->last_seen_need_resched_ns) {
5601 rq->last_seen_need_resched_ns = now;
5602 rq->ticks_without_resched = 0;
5606 rq->ticks_without_resched++;
5607 resched_latency = now - rq->last_seen_need_resched_ns;
5653 curr = rq->curr;
5659 curr->sched_class->task_tick(rq, curr, 0);
5673 if (curr->flags & PF_WQ_WORKER)
5677 rq->idle_balance = idle_cpu(cpu);
5689 /* Values for ->state, see diagram below. */
5695 * State diagram for ->state:
5704 * +--TICK_SCHED_REMOTE_OFFLINING
5723 int cpu = twork->cpu;
5731 * statistics and checks timeslices in a time-independent way, regardless
5736 struct task_struct *curr = rq->curr;
5746 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5749 curr->sched_class->task_tick(rq, curr, 0);
5761 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5778 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5781 twork->cpu = cpu;
5782 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5783 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5799 /* There cannot be competing actions, but don't rely on stop-machine. */
5800 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5829 current->preempt_disable_ip = ip;
5850 PREEMPT_MASK - 10);
5897 return p->preempt_disable_ip;
5915 prev->comm, prev->pid, preempt_count());
5933 * Various schedule()-time debugging checks and statistics:
5946 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5947 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5948 prev->comm, prev->pid, prev->non_block_count);
5963 schedstat_inc(this_rq()->sched_count);
5973 * that when we release the rq->lock the task is in the same
5974 * state as before we took rq->lock.
5979 for_class_range(class, prev->sched_class, &idle_sched_class) {
5980 if (class->balance(rq, prev, rf))
5989 * Pick up the highest-prio task:
6003 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6004 rq->nr_running == rq->cfs.h_nr_running)) {
6023 p = class->pick_next_task(rq);
6034 return (task_rq(t)->idle == t);
6039 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6047 return a->core_cookie == b->core_cookie;
6056 p = class->pick_task(rq);
6074 bool core_clock_updated = (rq == rq->core);
6085 /* Stopper task is switching into idle, no need core-wide selection. */
6092 rq->core_pick = NULL;
6101 * rq->core_pick can be NULL if no selection was made for a CPU because
6102 * it was either offline or went offline during a sibling's core-wide
6103 * selection. In this case, do a core-wide selection.
6105 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6106 rq->core->core_pick_seq != rq->core_sched_seq &&
6107 rq->core_pick) {
6108 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6110 next = rq->core_pick;
6116 rq->core_pick = NULL;
6123 need_sync = !!rq->core->core_cookie;
6126 rq->core->core_cookie = 0UL;
6127 if (rq->core->core_forceidle_count) {
6129 update_rq_clock(rq->core);
6134 rq->core->core_forceidle_start = 0;
6135 rq->core->core_forceidle_count = 0;
6136 rq->core->core_forceidle_occupation = 0;
6142 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6151 rq->core->core_task_seq++;
6159 if (!next->core_cookie) {
6160 rq->core_pick = NULL;
6175 * Tie-break prio towards the current CPU
6185 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6188 p = rq_i->core_pick = pick_task(rq_i);
6193 cookie = rq->core->core_cookie = max->core_cookie;
6201 p = rq_i->core_pick;
6211 rq_i->core_pick = p;
6213 if (p == rq_i->idle) {
6214 if (rq_i->nr_running) {
6215 rq->core->core_forceidle_count++;
6217 rq->core->core_forceidle_seq++;
6224 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6225 rq->core->core_forceidle_start = rq_clock(rq->core);
6226 rq->core->core_forceidle_occupation = occ;
6229 rq->core->core_pick_seq = rq->core->core_task_seq;
6230 next = rq->core_pick;
6231 rq->core_sched_seq = rq->core->core_pick_seq;
6239 * NOTE: L1TF -- at this point we're no longer running the old task and
6241 * their task. This ensures there is no inter-sibling overlap between
6242 * non-matching user state.
6251 * picked for it. That's Ok - it will pick tasks for itself,
6254 if (!rq_i->core_pick)
6258 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6265 if (!(fi_before && rq->core->core_forceidle_count))
6266 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6268 rq_i->core_pick->core_occupation = occ;
6271 rq_i->core_pick = NULL;
6276 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6278 if (rq_i->curr == rq_i->core_pick) {
6279 rq_i->core_pick = NULL;
6289 if (rq->core->core_forceidle_count && next == rq->idle)
6305 cookie = dst->core->core_cookie;
6309 if (dst->curr != dst->idle)
6317 if (p == src->core_pick || p == src->curr)
6323 if (p->core_occupation > dst->idle->core_occupation)
6395 if (!rq->core->core_cookie)
6398 if (!rq->nr_running) /* not forced idle */
6401 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6405 sched_core_lock(*_T->lock, &_T->flags),
6406 sched_core_unlock(*_T->lock, &_T->flags),
6417 WARN_ON_ONCE(rq->core != rq);
6428 if (rq->core == rq) {
6442 rq->core = core_rq;
6444 WARN_ON_ONCE(rq->core != core_rq);
6458 WARN_ON_ONCE(rq->core != rq);
6463 if (rq->core != rq)
6478 core_rq->core_task_seq = rq->core_task_seq;
6479 core_rq->core_pick_seq = rq->core_pick_seq;
6480 core_rq->core_cookie = rq->core_cookie;
6481 core_rq->core_forceidle_count = rq->core_forceidle_count;
6482 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6483 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6490 core_rq->core_forceidle_start = 0;
6495 rq->core = core_rq;
6503 if (rq->core != rq)
6504 rq->core = rq;
6553 * task to the run-queue and that's it.
6555 * Now, if the new task added to the run-queue preempts the current
6559 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6561 * - in syscall or exception context, at the next outmost
6565 * - in IRQ context, return from interrupt-handler to
6568 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6571 * - cond_resched() call
6572 * - explicit schedule() call
6573 * - return from syscall or exception to user-space
6574 * - return from interrupt-handler to user-space
6589 prev = rq->curr;
6600 * Make sure that signal_pending_state()->signal_pending() below
6607 * LOCK rq->lock LOCK p->pi_state
6609 * if (signal_pending_state()) if (p->state & @state)
6612 * after coming from user-space, before storing to rq->curr.
6618 rq->clock_update_flags <<= 1;
6620 rq->clock_update_flags = RQCF_UPDATED;
6622 switch_count = &prev->nivcsw;
6625 * We must load prev->state once (task_struct::state is volatile), such
6628 prev_state = READ_ONCE(prev->__state);
6631 WRITE_ONCE(prev->__state, TASK_RUNNING);
6633 prev->sched_contributes_to_load =
6638 if (prev->sched_contributes_to_load)
6639 rq->nr_uninterruptible++;
6643 * prev_state = prev->state; if (p->on_rq && ...)
6645 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6646 * p->state = TASK_WAKING
6650 * After this, schedule() must not care about p->state any more.
6654 if (prev->in_iowait) {
6655 atomic_inc(&rq->nr_iowait);
6659 switch_count = &prev->nvcsw;
6666 rq->last_seen_need_resched_ns = 0;
6670 rq->nr_switches++;
6672 * RCU users of rcu_dereference(rq->curr) may not see
6675 RCU_INIT_POINTER(rq->curr, next);
6679 * rq->curr, before returning to user-space.
6683 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6684 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6685 * on PowerPC and on RISC-V.
6686 * - finish_lock_switch() for weakly-ordered
6688 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6714 current->flags |= PF_NOFREEZE;
6719 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6731 task_flags = tsk->flags;
6748 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6754 blk_flush_plug(tsk->plug, true);
6759 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
6760 if (tsk->flags & PF_WQ_WORKER)
6783 * state (have scheduled out non-voluntarily) by making sure that all
6786 * (schedule out non-voluntarily).
6800 WARN_ON_ONCE(current->__state);
6811 * or we have been woken up remotely but the IPI has not yet arrived,
6826 * schedule_preempt_disabled - called with preemption disabled
6880 * This is the entry point to schedule() from in-kernel preemption
6886 * If there is a non-zero preempt_count or interrupts are disabled,
6918 * preempt_schedule_notrace - preempt_schedule called by tracing
7022 return try_to_wake_up(curr->private, mode, wake_flags);
7029 p->sched_class = &dl_sched_class;
7031 p->sched_class = &rt_sched_class;
7033 p->sched_class = &fair_sched_class;
7035 p->prio = prio;
7043 prio = min(prio, pi_task->prio);
7056 * rt_mutex_setprio - set the current priority of a task
7061 * not touch ->normal_prio like __setscheduler().
7074 /* XXX used to be waiter->prio, not waiter->task->prio */
7075 prio = __rt_effective_prio(pi_task, p->normal_prio);
7080 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7086 * Set under pi_lock && rq->lock, such that the value can be used under
7091 * ensure a task is de-boosted (pi_task is set to NULL) before the
7093 * points to a blocked task -- which guarantees the task is present.
7095 p->pi_top_task = pi_task;
7100 if (prio == p->prio && !dl_prio(prio))
7108 * the timer wheel base->lock on the CPU and another CPU wants
7115 if (unlikely(p == rq->idle)) {
7116 WARN_ON(p != rq->curr);
7117 WARN_ON(p->pi_blocked_on);
7122 oldprio = p->prio;
7127 prev_class = p->sched_class;
7137 * 1. -rt task is running and holds mutex A
7138 * --> -dl task blocks on mutex A
7140 * 2. -dl task is running and holds mutex A
7141 * --> -dl task blocks on mutex A and could preempt the
7145 if (!dl_prio(p->normal_prio) ||
7146 (pi_task && dl_prio(pi_task->prio) &&
7147 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7148 p->dl.pi_se = pi_task->dl.pi_se;
7151 p->dl.pi_se = &p->dl;
7155 p->dl.pi_se = &p->dl;
7160 p->dl.pi_se = &p->dl;
7162 p->rt.timeout = 0;
7208 * allow the 'normal' nice value to be set - but as expected
7213 p->static_prio = NICE_TO_PRIO(nice);
7223 p->static_prio = NICE_TO_PRIO(nice);
7225 old_prio = p->prio;
7226 p->prio = effective_prio(p);
7237 p->sched_class->prio_changed(rq, p, old_prio);
7245 * is_nice_reduction - check if nice value is an actual reduction
7254 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7261 * can_nice - check if a task can reduce its nice value
7273 * sys_nice - change the priority of the current process.
7288 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
7293 return -EPERM;
7306 * task_prio - return the priority value of a given task.
7313 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19]
7314 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99]
7315 * deadline -101 -1 0
7319 return p->prio - MAX_RT_PRIO;
7323 * idle_cpu - is a given CPU idle currently?
7332 if (rq->curr != rq->idle)
7335 if (rq->nr_running)
7339 if (rq->ttwu_pending)
7347 * available_idle_cpu - is a given CPU idle for enqueuing work.
7364 * idle_task - return the idle task for a given CPU.
7371 return cpu_rq(cpu)->idle;
7379 if (sched_core_enabled(rq) && rq->curr == rq->idle)
7400 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7401 * which excludes things like IRQ and steal-time. These latter are then accrued
7418 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7424 * because of inaccuracies in how we track these -- see
7456 * saturation when we should -- something for later.
7473 * max - irq
7474 * U' = irq + --------- * U
7503 * find_process_by_pid - find a process with a matching PID value.
7514 * sched_setparam() passes in -1 for its policy, to let the functions
7517 #define SETPARAM_POLICY -1
7522 int policy = attr->sched_policy;
7525 policy = p->policy;
7527 p->policy = policy;
7532 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
7534 /* rt-policy tasks do not have a timerslack */
7536 p->timer_slack_ns = 0;
7537 } else if (p->timer_slack_ns == 0) {
7538 /* when switching back to non-rt policy, restore timerslack */
7539 p->timer_slack_ns = p->default_timer_slack_ns;
7543 * __sched_setscheduler() ensures attr->sched_priority == 0 when
7547 p->rt_priority = attr->sched_priority;
7548 p->normal_prio = normal_prio(p);
7562 match = (uid_eq(cred->euid, pcred->euid) ||
7563 uid_eq(cred->euid, pcred->uid));
7571 * event on permitted non-privileged operations:
7578 if (attr->sched_nice < task_nice(p) &&
7579 !is_nice_reduction(p, attr->sched_nice))
7587 if (policy != p->policy && !rlim_rtprio)
7591 if (attr->sched_priority > p->rt_priority &&
7592 attr->sched_priority > rlim_rtprio)
7619 if (p->sched_reset_on_fork && !reset_on_fork)
7626 return -EPERM;
7635 int oldpolicy = -1, policy = attr->sched_policy;
7650 reset_on_fork = p->sched_reset_on_fork;
7651 policy = oldpolicy = p->policy;
7653 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
7656 return -EINVAL;
7659 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7660 return -EINVAL;
7664 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
7667 if (attr->sched_priority > MAX_RT_PRIO-1)
7668 return -EINVAL;
7670 (rt_policy(policy) != (attr->sched_priority != 0)))
7671 return -EINVAL;
7678 if (attr->sched_flags & SCHED_FLAG_SUGOV)
7679 return -EINVAL;
7687 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
7697 if (dl_policy(policy) || dl_policy(p->policy)) {
7703 * Make sure no PI-waiters arrive (or leave) while we are
7706 * To be able to change p->policy safely, the appropriate
7715 if (p == rq->stop) {
7716 retval = -EINVAL;
7724 if (unlikely(policy == p->policy)) {
7725 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
7727 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
7731 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7734 p->sched_reset_on_fork = reset_on_fork;
7747 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
7749 retval = -EPERM;
7755 !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
7756 cpumask_t *span = rq->rd->span;
7763 if (!cpumask_subset(span, p->cpus_ptr) ||
7764 rq->rd->dl_bw.bw == 0) {
7765 retval = -EPERM;
7772 /* Re-check policy now with rq lock held: */
7773 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
7774 policy = oldpolicy = -1;
7787 retval = -EBUSY;
7791 p->sched_reset_on_fork = reset_on_fork;
7792 oldprio = p->prio;
7794 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
7815 prev_class = p->sched_class;
7817 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
7828 if (oldprio < p->prio)
7867 .sched_priority = param->sched_priority,
7868 .sched_nice = PRIO_TO_NICE(p->static_prio),
7881 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7910 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
7981 return -EINVAL;
7983 return -EFAULT;
7986 retval = -ESRCH;
8011 ret = get_user(size, &uattr->size);
8023 if (ret == -E2BIG)
8028 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
8030 return -EINVAL;
8034 * to be strict and return an error on out-of-bounds values?
8036 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
8041 put_user(sizeof(*attr), &uattr->size);
8042 return -E2BIG;
8050 attr->sched_priority = p->rt_priority;
8052 attr->sched_nice = task_nice(p);
8056 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
8066 return -EINVAL;
8072 * sys_sched_setparam - set/change the RT priority of a thread
8084 * sys_sched_setattr - same as above, but with extended sched_attr
8097 return -EINVAL;
8104 return -EINVAL;
8109 retval = -ESRCH;
8126 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
8138 return -EINVAL;
8140 retval = -ESRCH;
8146 retval = p->policy
8147 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
8154 * sys_sched_getparam - get the RT priority of a thread
8168 return -EINVAL;
8172 retval = -ESRCH;
8181 lp.sched_priority = p->rt_priority;
8187 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
8198 * than what user-space knows about) to user-space.
8200 * Note that all cases are valid: user-space buffer can be larger or
8201 * smaller than the kernel-space buffer. The usual case is that both
8212 return -EFAULT;
8217 * If usize == ksize then we just copy everything to user-space and all is good.
8219 * If usize < ksize then we only copy as much as user-space has space for,
8222 * If usize > ksize then user-space is using a newer version of the ABI,
8223 * which part the kernel doesn't know about. Just ignore it - tooling can
8224 * detect the kernel's knowledge of attributes from the attr->size value
8227 kattr->size = min(usize, ksize);
8229 if (copy_to_user(uattr, kattr, kattr->size))
8230 return -EFAULT;
8236 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8251 return -EINVAL;
8255 retval = -ESRCH;
8263 kattr.sched_policy = p->policy;
8264 if (p->sched_reset_on_fork)
8275 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
8276 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
8302 * if admission test is enabled, we only admit -deadline
8307 if (!cpumask_subset(task_rq(p)->rd->span, mask))
8308 ret = -EBUSY;
8321 return -ENOMEM;
8324 retval = -ENOMEM;
8329 cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
8331 ctx->new_mask = new_mask;
8332 ctx->flags |= SCA_CHECK;
8358 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
8360 ctx->user_mask);
8366 retval = -EINVAL;
8388 return -ESRCH;
8395 if (p->flags & PF_NO_SETAFFINITY) {
8396 retval = -EINVAL;
8402 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
8404 retval = -EPERM;
8415 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
8422 retval = -ENOMEM;
8448 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
8452 * sys_sched_setaffinity - set the CPU affinity of a process
8455 * @user_mask_ptr: user-space pointer to the new CPU mask
8466 return -ENOMEM;
8483 retval = -ESRCH;
8492 raw_spin_lock_irqsave(&p->pi_lock, flags);
8493 cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
8494 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
8503 * sys_sched_getaffinity - get the CPU affinity of a process
8506 * @user_mask_ptr: user-space pointer to hold the current CPU mask
8518 return -EINVAL;
8519 if (len & (sizeof(unsigned long)-1))
8520 return -EINVAL;
8523 return -ENOMEM;
8530 ret = -EFAULT;
8546 schedstat_inc(rq->yld_count);
8547 current->sched_class->yield_task(rq);
8557 * sys_sched_yield - yield the current processor to other threads.
8578 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
8579 * whether the current CPU is in an RCU read-side critical section,
8581 * in kernel context. In contrast, in non-preemptible kernels,
8582 * RCU readers leave no in-memory hints, which means that CPU-bound
8630 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8633 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8694 #include <linux/entry-common.h>
8706 * cond_resched <- __cond_resched
8707 * might_resched <- RET0
8708 * preempt_schedule <- NOP
8709 * preempt_schedule_notrace <- NOP
8710 * irqentry_exit_cond_resched <- NOP
8713 * cond_resched <- __cond_resched
8714 * might_resched <- __cond_resched
8715 * preempt_schedule <- NOP
8716 * preempt_schedule_notrace <- NOP
8717 * irqentry_exit_cond_resched <- NOP
8720 * cond_resched <- RET0
8721 * might_resched <- RET0
8722 * preempt_schedule <- preempt_schedule
8723 * preempt_schedule_notrace <- preempt_schedule_notrace
8724 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
8728 preempt_dynamic_undefined = -1,
8747 return -EINVAL;
8766 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
8899 * yield - yield the current processor to other threads.
8928 * yield_to - yield the current processor to another thread in
8940 * -ESRCH if there's no task to yield to.
8958 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
8959 yielded = -ESRCH;
8969 if (!curr->sched_class->yield_to_task)
8972 if (curr->sched_class != p->sched_class)
8978 yielded = curr->sched_class->yield_to_task(rq, p);
8980 schedstat_inc(rq->yld_count);
9003 int old_iowait = current->in_iowait;
9005 current->in_iowait = 1;
9006 blk_flush_plug(current->plug, true);
9012 current->in_iowait = token;
9016 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
9043 * sys_sched_get_priority_max - return maximum RT priority.
9052 int ret = -EINVAL;
9057 ret = MAX_RT_PRIO-1;
9070 * sys_sched_get_priority_min - return minimum RT priority.
9079 int ret = -EINVAL;
9104 return -EINVAL;
9106 retval = -ESRCH;
9118 if (p->sched_class->get_rr_interval)
9119 time_slice = p->sched_class->get_rr_interval(rq, p);
9132 * sys_sched_rr_get_interval - return the default timeslice of a process.
9137 * into the user-space timespec buffer. A value of '0' means infinity.
9175 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
9185 ppid = task_pid_nr(rcu_dereference(p->real_parent));
9187 pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
9201 unsigned int state = READ_ONCE(p->__state);
9229 * reset the NMI-timeout, listing all files on a slow
9254 * init_idle - set up an idle thread for a given CPU
9272 raw_spin_lock_irqsave(&idle->pi_lock, flags);
9275 idle->__state = TASK_RUNNING;
9276 idle->se.exec_start = sched_clock();
9279 * look like a proper per-CPU kthread.
9281 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
9293 * holding rq->lock, the CPU isn't yet set to this CPU so the
9297 * use task_rq_lock() here and obtain the other rq->lock.
9305 rq->idle = idle;
9306 rcu_assign_pointer(rq->curr, idle);
9307 idle->on_rq = TASK_ON_RQ_QUEUED;
9309 idle->on_cpu = 1;
9312 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
9320 idle->sched_class = &idle_sched_class;
9324 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
9356 if (p->flags & PF_NO_SETAFFINITY)
9357 ret = -EINVAL;
9374 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
9375 return -EINVAL;
9402 p->numa_preferred_nid = nid;
9419 struct mm_struct *mm = current->active_mm;
9422 BUG_ON(current != this_rq()->idle);
9439 raw_spin_lock_irq(&p->pi_lock);
9445 cpu = select_fallback_rq(rq->cpu, p);
9450 raw_spin_unlock_irq(&p->pi_lock);
9460 * Ensure we only run per-cpu kthreads once the CPU goes !active.
9467 struct task_struct *push_task = rq->curr;
9474 rq->balance_callback = &balance_push_callback;
9480 if (!cpu_dying(rq->cpu) || rq != this_rq())
9484 * Both the cpu-hotplug and stop task are in this case and are
9501 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
9502 rcuwait_active(&rq->hotplug_wait)) {
9504 rcuwait_wake_up(&rq->hotplug_wait);
9512 * Temporarily drop rq->lock such that we can wake-up the stop task.
9517 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
9535 WARN_ON_ONCE(rq->balance_callback);
9536 rq->balance_callback = &balance_push_callback;
9537 } else if (rq->balance_callback == &balance_push_callback) {
9538 rq->balance_callback = NULL;
9553 rcuwait_wait_event(&rq->hotplug_wait,
9554 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
9576 if (!rq->online) {
9579 cpumask_set_cpu(rq->cpu, rq->rd->online);
9580 rq->online = 1;
9583 if (class->rq_online)
9584 class->rq_online(rq);
9591 if (rq->online) {
9596 if (class->rq_offline)
9597 class->rq_offline(rq);
9600 cpumask_clear_cpu(rq->cpu, rq->rd->online);
9601 rq->online = 0;
9610 if (rq->rd) {
9611 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9622 if (rq->rd) {
9623 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9652 if (--num_cpus_frozen)
9754 * preempt-disabled and RCU users of this state to go away such that
9796 rq->calc_load_update = calc_load_update;
9834 * Also see the comment "Global load-average calculations".
9851 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
9859 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
9872 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
9899 /* Move init over to a non-isolated CPU */
9902 current->flags &= ~PF_NO_SETAFFINITY;
10012 raw_spin_lock_init(&rq->__lock);
10013 rq->nr_running = 0;
10014 rq->calc_load_active = 0;
10015 rq->calc_load_update = jiffies + LOAD_FREQ;
10016 init_cfs_rq(&rq->cfs);
10017 init_rt_rq(&rq->rt);
10018 init_dl_rq(&rq->dl);
10020 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
10021 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
10025 * In case of task-groups formed thr' the cgroup filesystem, it
10028 * root_task_group and its child task-groups in a fair manner,
10029 * based on each entity's (task or task-group's) weight
10030 * (se->load.weight).
10039 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
10041 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
10044 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
10046 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
10049 rq->sd = NULL;
10050 rq->rd = NULL;
10051 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
10052 rq->balance_callback = &balance_push_callback;
10053 rq->active_balance = 0;
10054 rq->next_balance = jiffies;
10055 rq->push_cpu = 0;
10056 rq->cpu = i;
10057 rq->online = 0;
10058 rq->idle_stamp = 0;
10059 rq->avg_idle = 2*sysctl_sched_migration_cost;
10060 rq->wake_stamp = jiffies;
10061 rq->wake_avg_idle = rq->avg_idle;
10062 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
10064 INIT_LIST_HEAD(&rq->cfs_tasks);
10068 rq->last_blocked_load_update_tick = jiffies;
10069 atomic_set(&rq->nohz_flags, 0);
10071 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
10074 rcuwait_init(&rq->hotplug_wait);
10078 atomic_set(&rq->nr_iowait, 0);
10081 rq->core = rq;
10082 rq->core_pick = NULL;
10083 rq->core_enabled = 0;
10084 rq->core_tree = RB_ROOT;
10085 rq->core_forceidle_count = 0;
10086 rq->core_forceidle_occupation = 0;
10087 rq->core_forceidle_start = 0;
10089 rq->core_cookie = 0UL;
10091 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
10104 * is dressed up as a per-CPU kthread and thus needs to play the part
10105 * if we want to avoid special-casing it in code that deals with per-CPU
10142 * Blocking primitives will set (and therefore destroy) current->state,
10146 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
10149 (void *)current->task_state_change,
10150 (void *)current->task_state_change);
10188 !is_idle_task(current) && !current->non_block_count) ||
10203 in_atomic(), irqs_disabled(), current->non_block_count,
10204 current->pid, current->comm);
10248 current->pid, current->comm);
10280 current->pid, current->comm);
10303 if (p->flags & PF_KTHREAD)
10306 p->se.exec_start = 0;
10307 schedstat_set(p->stats.wait_start, 0);
10308 schedstat_set(p->stats.sleep_start, 0);
10309 schedstat_set(p->stats.block_start, 0);
10333 * stopped - every CPU needs to be quiescent, and no scheduling
10340 * curr_task - return the current task for a given CPU.
10356 * ia64_set_curr_task - set the current task for a given CPU.
10360 * Description: This function must only be used when non-maskable interrupts
10362 * notion of the current task on a CPU in a non-blocking manner. This function
10366 * re-starting the system.
10388 uclamp_se_set(&tg->uclamp_req[clamp_id],
10390 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
10416 call_rcu(&tg->rcu, sched_free_group_rcu);
10426 return ERR_PTR(-ENOMEM);
10440 return ERR_PTR(-ENOMEM);
10448 list_add_rcu(&tg->list, &task_groups);
10453 tg->parent = parent;
10454 INIT_LIST_HEAD(&tg->children);
10455 list_add_rcu(&tg->siblings, &parent->children);
10471 call_rcu(&tg->rcu, sched_unregister_group_rcu);
10492 list_del_rcu(&tg->list);
10493 list_del_rcu(&tg->siblings);
10515 tsk->sched_task_group = group;
10518 if (tsk->sched_class->task_change_group)
10519 tsk->sched_class->task_change_group(tsk);
10529 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10546 if (group == tsk->sched_task_group)
10595 return ERR_PTR(-ENOMEM);
10597 return &tg->css;
10604 struct task_group *parent = css_tg(css->parent);
10646 return -EINVAL;
10675 uc_parent = css_tg(css)->parent
10676 ? css_tg(css)->parent->uclamp : NULL;
10680 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
10692 uc_se = css_tg(css)->uclamp;
10742 req.ret = -ERANGE;
10770 if (tg->uclamp_req[clamp_id].value != req.util)
10771 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
10777 tg->uclamp_pct[clamp_id] = req.percent;
10812 util_clamp = tg->uclamp_req[clamp_id].value;
10820 percent = tg->uclamp_pct[clamp_id];
10852 return (u64) scale_load_down(tg->shares);
10869 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10872 return -EINVAL;
10880 return -EINVAL;
10888 return -EINVAL;
10894 return -EINVAL;
10898 return -EINVAL;
10901 * Prevent race between setting of cfs_rq->runtime_enabled and
10912 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
10914 * If we need to toggle cfs_bandwidth_used, off->on must occur
10915 * before making related changes, and on->off must occur afterwards
10920 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
10921 cfs_b->period = ns_to_ktime(period);
10922 cfs_b->quota = quota;
10923 cfs_b->burst = burst;
10936 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
10937 struct rq *rq = cfs_rq->rq;
10940 cfs_rq->runtime_enabled = runtime_enabled;
10941 cfs_rq->runtime_remaining = 0;
10943 if (cfs_rq->throttled)
10957 period = ktime_to_ns(tg->cfs_bandwidth.period);
10958 burst = tg->cfs_bandwidth.burst;
10964 return -EINVAL;
10973 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
10974 return -1;
10976 quota_us = tg->cfs_bandwidth.quota;
10987 return -EINVAL;
10990 quota = tg->cfs_bandwidth.quota;
10991 burst = tg->cfs_bandwidth.burst;
11000 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
11011 return -EINVAL;
11014 period = ktime_to_ns(tg->cfs_bandwidth.period);
11015 quota = tg->cfs_bandwidth.quota;
11024 burst_us = tg->cfs_bandwidth.burst;
11080 if (tg == d->tg) {
11081 period = d->period;
11082 quota = d->quota;
11089 if (quota == RUNTIME_INF || quota == -1)
11098 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11099 s64 quota = 0, parent_quota = -1;
11101 if (!tg->parent) {
11104 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
11107 parent_quota = parent_b->hierarchical_quota;
11111 * always take the non-RUNTIME_INF min. On cgroup1, only
11125 return -EINVAL;
11128 cfs_b->hierarchical_quota = quota;
11157 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11159 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
11160 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
11161 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
11169 stats = __schedstats_from_se(tg->se[i]);
11170 ws += schedstat_val(stats->wait_sum);
11176 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
11177 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
11188 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
11235 return css_tg(css)->idle;
11318 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11321 throttled_usec = cfs_b->throttled_time;
11323 burst_usec = cfs_b->burst_time;
11331 cfs_b->nr_periods, cfs_b->nr_throttled,
11332 throttled_usec, cfs_b->nr_burst, burst_usec);
11361 u64 weight = scale_load_down(tg->shares);
11373 * value used by scheduler and the round-trip conversions preserve
11377 return -ERANGE;
11387 unsigned long weight = scale_load_down(css_tg(css)->shares);
11393 delta = abs(sched_prio_to_weight[prio] - weight);
11399 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
11409 return -ERANGE;
11411 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
11437 return -EINVAL;
11446 return -EINVAL;
11465 u64 burst = tg->cfs_bandwidth.burst;
11568 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11569 * nice 1, it will get ~10% less CPU time than another CPU-bound task
11573 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11579 /* -20 */ 88761, 71755, 56483, 46273, 36291,
11580 /* -15 */ 29154, 23254, 18705, 14949, 11916,
11581 /* -10 */ 9548, 7620, 6100, 4904, 3906,
11582 /* -5 */ 3121, 2501, 1991, 1586, 1277,
11597 /* -20 */ 48388, 59856, 76040, 92818, 118348,
11598 /* -15 */ 147320, 184698, 229616, 287308, 360437,
11599 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
11600 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
11615 * @cid_lock: Guarantee forward-progress of cid allocation.
11617 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
11618 * is only used when contention is detected by the lock-free allocation so
11624 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
11626 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
11635 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
11641 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
11658 * per-mm/cpu cid value.
11660 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
11661 * task->mm != mm for the rest of the discussion. There are two scheduler state
11664 * (TSA) Store to rq->curr with transition from (N) to (Y)
11666 * (TSB) Store to rq->curr with transition from (Y) to (N)
11668 * On the remote-clear side, there is one transition we care about:
11673 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
11687 * Context switch CS-1 Remote-clear
11688 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
11690 * - switch_mm_cid()
11691 * - memory barrier (see switch_mm_cid()
11695 * - mm_cid_get (next)
11696 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
11703 * still an active task on the cpu. Remote-clear will therefore not transition
11724 t->migrate_from_cpu = task_cpu(t);
11732 struct mm_struct *mm = t->mm;
11737 return -1;
11739 last_mm_cid = t->last_mm_cid;
11745 if (last_mm_cid == -1)
11746 return -1;
11747 src_cid = READ_ONCE(src_pcpu_cid->cid);
11749 return -1;
11757 src_task = rcu_dereference(src_rq->curr);
11758 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11760 t->last_mm_cid = -1;
11761 return -1;
11775 struct mm_struct *mm = t->mm;
11778 if (src_cid == -1)
11779 return -1;
11786 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
11787 return -1;
11790 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11791 * rq->curr->mm matches the scheduler barrier in context_switch()
11792 * between store to rq->curr and load of prev and next task's
11793 * per-mm/cpu cid.
11795 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11796 * rq->curr->mm_cid_active matches the barrier in
11798 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11799 * load of per-mm/cpu cid.
11804 * the lazy-put flag, this task will be responsible for transitioning
11805 * from lazy-put flag set to MM_CID_UNSET.
11808 src_task = rcu_dereference(src_rq->curr);
11809 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11815 t->last_mm_cid = -1;
11816 return -1;
11823 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11824 return -1;
11836 struct mm_struct *mm = t->mm;
11844 src_cpu = t->migrate_from_cpu;
11845 if (src_cpu == -1) {
11846 t->last_mm_cid = -1;
11859 * greater or equal to the number of allowed cpus, because user-space
11863 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
11864 dst_cid = READ_ONCE(dst_pcpu_cid->cid);
11866 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
11868 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
11871 if (src_cid == -1)
11875 if (src_cid == -1)
11883 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
11894 cid = READ_ONCE(pcpu_cid->cid);
11905 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
11909 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11910 * rq->curr->mm matches the scheduler barrier in context_switch()
11911 * between store to rq->curr and load of prev and next task's
11912 * per-mm/cpu cid.
11914 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11915 * rq->curr->mm_cid_active matches the barrier in
11917 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11918 * load of per-mm/cpu cid.
11923 * the lazy-put flag, that task will be responsible for transitioning
11924 * from lazy-put flag set to MM_CID_UNSET.
11927 t = rcu_dereference(rq->curr);
11928 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) {
11940 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11953 * rq->clock load is racy on 32-bit but one spurious clear once in a
11956 rq_clock = READ_ONCE(rq->clock);
11957 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11965 curr = rcu_dereference(rq->curr);
11966 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
11967 WRITE_ONCE(pcpu_cid->time, rq_clock);
11973 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
11984 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11985 cid = READ_ONCE(pcpu_cid->cid);
12001 work->next = work; /* Prevent double-add */
12002 if (t->flags & PF_EXITING)
12004 mm = t->mm;
12007 old_scan = READ_ONCE(mm->mm_cid_next_scan);
12012 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
12020 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
12037 struct mm_struct *mm = t->mm;
12041 mm_users = atomic_read(&mm->mm_users);
12043 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
12045 t->cid_work.next = &t->cid_work; /* Protect against double add */
12046 init_task_work(&t->cid_work, task_mm_cid_work);
12051 struct callback_head *work = &curr->cid_work;
12054 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
12055 work->next != work)
12057 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
12066 struct mm_struct *mm = t->mm;
12077 WRITE_ONCE(t->mm_cid_active, 0);
12079 * Store t->mm_cid_active before loading per-mm/cpu cid.
12084 t->last_mm_cid = t->mm_cid = -1;
12090 struct mm_struct *mm = t->mm;
12101 WRITE_ONCE(t->mm_cid_active, 0);
12103 * Store t->mm_cid_active before loading per-mm/cpu cid.
12108 t->last_mm_cid = t->mm_cid = -1;
12114 struct mm_struct *mm = t->mm;
12125 WRITE_ONCE(t->mm_cid_active, 1);
12127 * Store t->mm_cid_active before loading per-mm/cpu cid.
12131 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
12138 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
12139 t->mm_cid_active = 1;