Lines Matching +full:no +full:- +full:idle +full:- +full:on +full:- +full:init
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Context tracking: Probe on high level context boundaries such as kernel,
4 * userspace, guest or idle.
6 * This is used by RCU to remove its dependency on the timer tick while a CPU
7 * runs in idle, userspace or guest mode.
13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
41 /* Record the current task on dyntick-idle entry. */
45 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); in rcu_dynticks_task_enter()
49 /* Record no current task on dyntick-idle exit. */
53 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); in rcu_dynticks_task_exit()
57 /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
62 current->trc_reader_special.b.need_mb = true; in rcu_dynticks_task_trace_enter()
66 /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
71 current->trc_reader_special.b.need_mb = false; in rcu_dynticks_task_trace_exit()
78 * RCU is watching prior to the call to this function and is no longer
86 * CPUs seeing atomic_add_return() must see prior RCU read-side in ct_kernel_exit_state()
88 * next idle sojourn. in ct_kernel_exit_state()
90 rcu_dynticks_task_trace_enter(); // Before ->dynticks update! in ct_kernel_exit_state()
92 // RCU is no longer watching. Better be in extended quiescent state! in ct_kernel_exit_state()
106 * CPUs seeing atomic_add_return() must see prior idle sojourns, in ct_kernel_enter_state()
107 * and we also must force ordering with the next RCU read-side in ct_kernel_enter_state()
112 rcu_dynticks_task_trace_exit(); // After ->dynticks update! in ct_kernel_enter_state()
118 * idle loop or adaptive-tickless usermode execution.
120 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
129 WRITE_ONCE(ct->dynticks_nmi_nesting, 0); in ct_kernel_exit()
134 ct->dynticks_nesting--; in ct_kernel_exit()
145 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_exit()
148 WRITE_ONCE(ct->dynticks_nesting, 0); /* Avoid irq-access tearing. */ in ct_kernel_exit()
151 // ... but is no longer watching here. in ct_kernel_exit()
157 * idle loop or adaptive-tickless usermode execution.
159 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
173 ct->dynticks_nesting++; in ct_kernel_enter()
183 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_kernel_enter()
187 WRITE_ONCE(ct->dynticks_nesting, 1); in ct_kernel_enter()
189 WRITE_ONCE(ct->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); in ct_kernel_enter()
194 * ct_nmi_exit - inform RCU of exit from NMI context
197 * RCU-idle period, update ct->state and ct->dynticks_nmi_nesting
198 * to let the RCU grace-period handling know that the CPU is back to
199 * being RCU-idle.
210 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. in ct_nmi_exit()
218 * If the nesting level is not 1, the CPU wasn't RCU-idle, so in ct_nmi_exit()
219 * leave it in non-RCU-idle state. in ct_nmi_exit()
222 trace_rcu_dyntick(TPS("--="), ct_dynticks_nmi_nesting(), ct_dynticks_nmi_nesting() - 2, in ct_nmi_exit()
224 WRITE_ONCE(ct->dynticks_nmi_nesting, /* No store tearing. */ in ct_nmi_exit()
225 ct_dynticks_nmi_nesting() - 2); in ct_nmi_exit()
230 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ in ct_nmi_exit()
232 WRITE_ONCE(ct->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ in ct_nmi_exit()
235 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_exit()
240 // ... but is no longer watching here. in ct_nmi_exit()
247 * ct_nmi_enter - inform RCU of entry to NMI context
249 * If the CPU was idle from RCU's viewpoint, update ct->state and
250 * ct->dynticks_nmi_nesting to let the RCU grace-period handling know
267 * If idle from RCU viewpoint, atomically increment ->dynticks in ct_nmi_enter()
268 * to mark non-idle and increment ->dynticks_nmi_nesting by one. in ct_nmi_enter()
269 * Otherwise, increment ->dynticks_nmi_nesting by two. This means in ct_nmi_enter()
270 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed in ct_nmi_enter()
271 * to be in the outermost NMI handler that interrupted an RCU-idle in ct_nmi_enter()
285 instrument_atomic_read(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
287 instrument_atomic_write(&ct->state, sizeof(ct->state)); in ct_nmi_enter()
301 WRITE_ONCE(ct->dynticks_nmi_nesting, /* Prevent store tearing. */ in ct_nmi_enter()
307 * ct_idle_enter - inform RCU that current CPU is entering idle
309 * Enter idle mode, in other words, -leave- the mode in which RCU
310 * read-side critical sections can occur. (Though RCU read-side
311 * critical sections can occur in irq handlers in idle, a possibility
325 * ct_idle_exit - inform RCU that current CPU is leaving idle
327 * Exit idle mode, in other words, -enter- the mode in which RCU
328 * read-side critical sections can occur.
338 ct_kernel_enter(false, RCU_DYNTICKS_IDX - CONTEXT_IDLE); in ct_idle_exit()
344 * ct_irq_enter - inform RCU that current CPU is entering irq away from idle
347 * idle mode, in other words, entering the mode in which read-side critical
352 * This code assumes that the idle loop never does upcalls to user mode.
353 * If your architecture's idle loop does do upcalls to user mode (or does
372 * ct_irq_exit - inform RCU that current CPU is exiting irq towards idle
375 * idle mode, in other words, leaving the mode in which read-side critical
378 * This code assumes that the idle loop never does anything that might
380 * architecture's idle loop violates this assumption, RCU will give you what
458 * __ct_user_enter - Inform the context tracking that the CPU is going
472 WARN_ON_ONCE(!current->mm); in __ct_user_enter()
478 if (ct->active) { in __ct_user_enter()
482 * any RCU read-side critical section until the next call to in __ct_user_enter()
484 * on the tick. in __ct_user_enter()
500 * Enter RCU idle mode right before resuming userspace. No use of RCU in __ct_user_enter()
508 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_enter()
513 raw_atomic_set(&ct->state, state); in __ct_user_enter()
516 * Even if context tracking is disabled on this CPU, because it's outside in __ct_user_enter()
518 * context transitions and states to prevent inconsistency on those of in __ct_user_enter()
520 * If a task triggers an exception in userspace, sleep on the exception in __ct_user_enter()
529 /* Tracking for vtime only, no concurrent RCU EQS accounting */ in __ct_user_enter()
530 raw_atomic_set(&ct->state, state); in __ct_user_enter()
538 raw_atomic_add(state, &ct->state); in __ct_user_enter()
578 * user_enter_callable() - Unfortunate ASM callable version of user_enter() for
595 * __ct_user_exit - Inform the context tracking that the CPU is
603 * This call supports re-entrancy. This way it can be called from any exception
614 if (ct->active) { in __ct_user_exit()
616 * Exit RCU idle mode while entering the kernel because it can in __ct_user_exit()
619 ct_kernel_enter(true, RCU_DYNTICKS_IDX - state); in __ct_user_exit()
628 * Special case if we only track user <-> kernel transitions for tickless in __ct_user_exit()
633 raw_atomic_set(&ct->state, CONTEXT_KERNEL); in __ct_user_exit()
637 /* Tracking for vtime only, no concurrent RCU EQS accounting */ in __ct_user_exit()
638 raw_atomic_set(&ct->state, CONTEXT_KERNEL); in __ct_user_exit()
646 raw_atomic_sub(state, &ct->state); in __ct_user_exit()
678 * user_exit_callable() - Unfortunate ASM callable version of user_exit() for
708 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork in ct_cpu_track_user()
709 * This assumes that init is the only task at this early boot stage. in ct_cpu_track_user()