xref: /openbmc/linux/kernel/rcu/tree.c (revision 1d7a0395)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/panic.h>
36 #include <linux/panic_notifier.h>
37 #include <linux/percpu.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <linux/mutex.h>
41 #include <linux/time.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/wait.h>
44 #include <linux/kthread.h>
45 #include <uapi/linux/sched/types.h>
46 #include <linux/prefetch.h>
47 #include <linux/delay.h>
48 #include <linux/random.h>
49 #include <linux/trace_events.h>
50 #include <linux/suspend.h>
51 #include <linux/ftrace.h>
52 #include <linux/tick.h>
53 #include <linux/sysrq.h>
54 #include <linux/kprobes.h>
55 #include <linux/gfp.h>
56 #include <linux/oom.h>
57 #include <linux/smpboot.h>
58 #include <linux/jiffies.h>
59 #include <linux/slab.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/sched/clock.h>
62 #include <linux/vmalloc.h>
63 #include <linux/mm.h>
64 #include <linux/kasan.h>
65 #include "../time/tick-internal.h"
66 
67 #include "tree.h"
68 #include "rcu.h"
69 
70 #ifdef MODULE_PARAM_PREFIX
71 #undef MODULE_PARAM_PREFIX
72 #endif
73 #define MODULE_PARAM_PREFIX "rcutree."
74 
75 /* Data structures. */
76 
77 /*
78  * Steal a bit from the bottom of ->dynticks for idle entry/exit
79  * control.  Initially this is for TLB flushing.
80  */
81 #define RCU_DYNTICK_CTRL_MASK 0x1
82 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
83 
84 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
85 	.dynticks_nesting = 1,
86 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
87 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
88 #ifdef CONFIG_RCU_NOCB_CPU
89 	.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
90 #endif
91 };
92 static struct rcu_state rcu_state = {
93 	.level = { &rcu_state.node[0] },
94 	.gp_state = RCU_GP_IDLE,
95 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
96 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
97 	.name = RCU_NAME,
98 	.abbr = RCU_ABBR,
99 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
100 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
101 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
102 };
103 
104 /* Dump rcu_node combining tree at boot to verify correct setup. */
105 static bool dump_tree;
106 module_param(dump_tree, bool, 0444);
107 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
108 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
109 #ifndef CONFIG_PREEMPT_RT
110 module_param(use_softirq, bool, 0444);
111 #endif
112 /* Control rcu_node-tree auto-balancing at boot time. */
113 static bool rcu_fanout_exact;
114 module_param(rcu_fanout_exact, bool, 0444);
115 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
116 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
117 module_param(rcu_fanout_leaf, int, 0444);
118 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
119 /* Number of rcu_nodes at specified level. */
120 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
121 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
122 
123 /*
124  * The rcu_scheduler_active variable is initialized to the value
125  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
126  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
127  * RCU can assume that there is but one task, allowing RCU to (for example)
128  * optimize synchronize_rcu() to a simple barrier().  When this variable
129  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
130  * to detect real grace periods.  This variable is also used to suppress
131  * boot-time false positives from lockdep-RCU error checking.  Finally, it
132  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
133  * is fully initialized, including all of its kthreads having been spawned.
134  */
135 int rcu_scheduler_active __read_mostly;
136 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
137 
138 /*
139  * The rcu_scheduler_fully_active variable transitions from zero to one
140  * during the early_initcall() processing, which is after the scheduler
141  * is capable of creating new tasks.  So RCU processing (for example,
142  * creating tasks for RCU priority boosting) must be delayed until after
143  * rcu_scheduler_fully_active transitions from zero to one.  We also
144  * currently delay invocation of any RCU callbacks until after this point.
145  *
146  * It might later prove better for people registering RCU callbacks during
147  * early boot to take responsibility for these callbacks, but one step at
148  * a time.
149  */
150 static int rcu_scheduler_fully_active __read_mostly;
151 
152 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
153 			      unsigned long gps, unsigned long flags);
154 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
155 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
156 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
157 static void invoke_rcu_core(void);
158 static void rcu_report_exp_rdp(struct rcu_data *rdp);
159 static void sync_sched_exp_online_cleanup(int cpu);
160 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
161 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
162 
163 /* rcuc/rcub kthread realtime priority */
164 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
165 module_param(kthread_prio, int, 0444);
166 
167 /* Delay in jiffies for grace-period initialization delays, debug only. */
168 
169 static int gp_preinit_delay;
170 module_param(gp_preinit_delay, int, 0444);
171 static int gp_init_delay;
172 module_param(gp_init_delay, int, 0444);
173 static int gp_cleanup_delay;
174 module_param(gp_cleanup_delay, int, 0444);
175 
176 // Add delay to rcu_read_unlock() for strict grace periods.
177 static int rcu_unlock_delay;
178 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
179 module_param(rcu_unlock_delay, int, 0444);
180 #endif
181 
182 /*
183  * This rcu parameter is runtime-read-only. It reflects
184  * a minimum allowed number of objects which can be cached
185  * per-CPU. Object size is equal to one page. This value
186  * can be changed at boot time.
187  */
188 static int rcu_min_cached_objs = 5;
189 module_param(rcu_min_cached_objs, int, 0444);
190 
191 /* Retrieve RCU kthreads priority for rcutorture */
192 int rcu_get_gp_kthreads_prio(void)
193 {
194 	return kthread_prio;
195 }
196 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
197 
198 /*
199  * Number of grace periods between delays, normalized by the duration of
200  * the delay.  The longer the delay, the more the grace periods between
201  * each delay.  The reason for this normalization is that it means that,
202  * for non-zero delays, the overall slowdown of grace periods is constant
203  * regardless of the duration of the delay.  This arrangement balances
204  * the need for long delays to increase some race probabilities with the
205  * need for fast grace periods to increase other race probabilities.
206  */
207 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
208 
209 /*
210  * Compute the mask of online CPUs for the specified rcu_node structure.
211  * This will not be stable unless the rcu_node structure's ->lock is
212  * held, but the bit corresponding to the current CPU will be stable
213  * in most contexts.
214  */
215 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
216 {
217 	return READ_ONCE(rnp->qsmaskinitnext);
218 }
219 
220 /*
221  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
222  * permit this function to be invoked without holding the root rcu_node
223  * structure's ->lock, but of course results can be subject to change.
224  */
225 static int rcu_gp_in_progress(void)
226 {
227 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
228 }
229 
230 /*
231  * Return the number of callbacks queued on the specified CPU.
232  * Handles both the nocbs and normal cases.
233  */
234 static long rcu_get_n_cbs_cpu(int cpu)
235 {
236 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
237 
238 	if (rcu_segcblist_is_enabled(&rdp->cblist))
239 		return rcu_segcblist_n_cbs(&rdp->cblist);
240 	return 0;
241 }
242 
243 void rcu_softirq_qs(void)
244 {
245 	rcu_qs();
246 	rcu_preempt_deferred_qs(current);
247 }
248 
249 /*
250  * Record entry into an extended quiescent state.  This is only to be
251  * called when not already in an extended quiescent state, that is,
252  * RCU is watching prior to the call to this function and is no longer
253  * watching upon return.
254  */
255 static noinstr void rcu_dynticks_eqs_enter(void)
256 {
257 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
258 	int seq;
259 
260 	/*
261 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
262 	 * critical sections, and we also must force ordering with the
263 	 * next idle sojourn.
264 	 */
265 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
266 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
267 	// RCU is no longer watching.  Better be in extended quiescent state!
268 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
269 		     (seq & RCU_DYNTICK_CTRL_CTR));
270 	/* Better not have special action (TLB flush) pending! */
271 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
272 		     (seq & RCU_DYNTICK_CTRL_MASK));
273 }
274 
275 /*
276  * Record exit from an extended quiescent state.  This is only to be
277  * called from an extended quiescent state, that is, RCU is not watching
278  * prior to the call to this function and is watching upon return.
279  */
280 static noinstr void rcu_dynticks_eqs_exit(void)
281 {
282 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
283 	int seq;
284 
285 	/*
286 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
287 	 * and we also must force ordering with the next RCU read-side
288 	 * critical section.
289 	 */
290 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
291 	// RCU is now watching.  Better not be in an extended quiescent state!
292 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
293 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
294 		     !(seq & RCU_DYNTICK_CTRL_CTR));
295 	if (seq & RCU_DYNTICK_CTRL_MASK) {
296 		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
297 		smp_mb__after_atomic(); /* _exit after clearing mask. */
298 	}
299 }
300 
301 /*
302  * Reset the current CPU's ->dynticks counter to indicate that the
303  * newly onlined CPU is no longer in an extended quiescent state.
304  * This will either leave the counter unchanged, or increment it
305  * to the next non-quiescent value.
306  *
307  * The non-atomic test/increment sequence works because the upper bits
308  * of the ->dynticks counter are manipulated only by the corresponding CPU,
309  * or when the corresponding CPU is offline.
310  */
311 static void rcu_dynticks_eqs_online(void)
312 {
313 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
314 
315 	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
316 		return;
317 	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
318 }
319 
320 /*
321  * Is the current CPU in an extended quiescent state?
322  *
323  * No ordering, as we are sampling CPU-local information.
324  */
325 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
326 {
327 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
328 
329 	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
330 }
331 
332 /*
333  * Snapshot the ->dynticks counter with full ordering so as to allow
334  * stable comparison of this counter with past and future snapshots.
335  */
336 static int rcu_dynticks_snap(struct rcu_data *rdp)
337 {
338 	int snap = atomic_add_return(0, &rdp->dynticks);
339 
340 	return snap & ~RCU_DYNTICK_CTRL_MASK;
341 }
342 
343 /*
344  * Return true if the snapshot returned from rcu_dynticks_snap()
345  * indicates that RCU is in an extended quiescent state.
346  */
347 static bool rcu_dynticks_in_eqs(int snap)
348 {
349 	return !(snap & RCU_DYNTICK_CTRL_CTR);
350 }
351 
352 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
353 bool rcu_is_idle_cpu(int cpu)
354 {
355 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
356 
357 	return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
358 }
359 
360 /*
361  * Return true if the CPU corresponding to the specified rcu_data
362  * structure has spent some time in an extended quiescent state since
363  * rcu_dynticks_snap() returned the specified snapshot.
364  */
365 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
366 {
367 	return snap != rcu_dynticks_snap(rdp);
368 }
369 
370 /*
371  * Return true if the referenced integer is zero while the specified
372  * CPU remains within a single extended quiescent state.
373  */
374 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
375 {
376 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
377 	int snap;
378 
379 	// If not quiescent, force back to earlier extended quiescent state.
380 	snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
381 					       RCU_DYNTICK_CTRL_CTR);
382 
383 	smp_rmb(); // Order ->dynticks and *vp reads.
384 	if (READ_ONCE(*vp))
385 		return false;  // Non-zero, so report failure;
386 	smp_rmb(); // Order *vp read and ->dynticks re-read.
387 
388 	// If still in the same extended quiescent state, we are good!
389 	return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
390 }
391 
392 /*
393  * Set the special (bottom) bit of the specified CPU so that it
394  * will take special action (such as flushing its TLB) on the
395  * next exit from an extended quiescent state.  Returns true if
396  * the bit was successfully set, or false if the CPU was not in
397  * an extended quiescent state.
398  */
399 bool rcu_eqs_special_set(int cpu)
400 {
401 	int old;
402 	int new;
403 	int new_old;
404 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
405 
406 	new_old = atomic_read(&rdp->dynticks);
407 	do {
408 		old = new_old;
409 		if (old & RCU_DYNTICK_CTRL_CTR)
410 			return false;
411 		new = old | RCU_DYNTICK_CTRL_MASK;
412 		new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
413 	} while (new_old != old);
414 	return true;
415 }
416 
417 /*
418  * Let the RCU core know that this CPU has gone through the scheduler,
419  * which is a quiescent state.  This is called when the need for a
420  * quiescent state is urgent, so we burn an atomic operation and full
421  * memory barriers to let the RCU core know about it, regardless of what
422  * this CPU might (or might not) do in the near future.
423  *
424  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
425  *
426  * The caller must have disabled interrupts and must not be idle.
427  */
428 notrace void rcu_momentary_dyntick_idle(void)
429 {
430 	int special;
431 
432 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
433 	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
434 				    &this_cpu_ptr(&rcu_data)->dynticks);
435 	/* It is illegal to call this from idle state. */
436 	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
437 	rcu_preempt_deferred_qs(current);
438 }
439 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
440 
441 /**
442  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
443  *
444  * If the current CPU is idle and running at a first-level (not nested)
445  * interrupt, or directly, from idle, return true.
446  *
447  * The caller must have at least disabled IRQs.
448  */
449 static int rcu_is_cpu_rrupt_from_idle(void)
450 {
451 	long nesting;
452 
453 	/*
454 	 * Usually called from the tick; but also used from smp_function_call()
455 	 * for expedited grace periods. This latter can result in running from
456 	 * the idle task, instead of an actual IPI.
457 	 */
458 	lockdep_assert_irqs_disabled();
459 
460 	/* Check for counter underflows */
461 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
462 			 "RCU dynticks_nesting counter underflow!");
463 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
464 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
465 
466 	/* Are we at first interrupt nesting level? */
467 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
468 	if (nesting > 1)
469 		return false;
470 
471 	/*
472 	 * If we're not in an interrupt, we must be in the idle task!
473 	 */
474 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
475 
476 	/* Does CPU appear to be idle from an RCU standpoint? */
477 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
478 }
479 
480 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
481 				// Maximum callbacks per rcu_do_batch ...
482 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
483 static long blimit = DEFAULT_RCU_BLIMIT;
484 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
485 static long qhimark = DEFAULT_RCU_QHIMARK;
486 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
487 static long qlowmark = DEFAULT_RCU_QLOMARK;
488 #define DEFAULT_RCU_QOVLD_MULT 2
489 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
490 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
491 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
492 
493 module_param(blimit, long, 0444);
494 module_param(qhimark, long, 0444);
495 module_param(qlowmark, long, 0444);
496 module_param(qovld, long, 0444);
497 
498 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
499 static ulong jiffies_till_next_fqs = ULONG_MAX;
500 static bool rcu_kick_kthreads;
501 static int rcu_divisor = 7;
502 module_param(rcu_divisor, int, 0644);
503 
504 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
505 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
506 module_param(rcu_resched_ns, long, 0644);
507 
508 /*
509  * How long the grace period must be before we start recruiting
510  * quiescent-state help from rcu_note_context_switch().
511  */
512 static ulong jiffies_till_sched_qs = ULONG_MAX;
513 module_param(jiffies_till_sched_qs, ulong, 0444);
514 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
515 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
516 
517 /*
518  * Make sure that we give the grace-period kthread time to detect any
519  * idle CPUs before taking active measures to force quiescent states.
520  * However, don't go below 100 milliseconds, adjusted upwards for really
521  * large systems.
522  */
523 static void adjust_jiffies_till_sched_qs(void)
524 {
525 	unsigned long j;
526 
527 	/* If jiffies_till_sched_qs was specified, respect the request. */
528 	if (jiffies_till_sched_qs != ULONG_MAX) {
529 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
530 		return;
531 	}
532 	/* Otherwise, set to third fqs scan, but bound below on large system. */
533 	j = READ_ONCE(jiffies_till_first_fqs) +
534 		      2 * READ_ONCE(jiffies_till_next_fqs);
535 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
536 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
537 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
538 	WRITE_ONCE(jiffies_to_sched_qs, j);
539 }
540 
541 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
542 {
543 	ulong j;
544 	int ret = kstrtoul(val, 0, &j);
545 
546 	if (!ret) {
547 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
548 		adjust_jiffies_till_sched_qs();
549 	}
550 	return ret;
551 }
552 
553 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
554 {
555 	ulong j;
556 	int ret = kstrtoul(val, 0, &j);
557 
558 	if (!ret) {
559 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
560 		adjust_jiffies_till_sched_qs();
561 	}
562 	return ret;
563 }
564 
565 static const struct kernel_param_ops first_fqs_jiffies_ops = {
566 	.set = param_set_first_fqs_jiffies,
567 	.get = param_get_ulong,
568 };
569 
570 static const struct kernel_param_ops next_fqs_jiffies_ops = {
571 	.set = param_set_next_fqs_jiffies,
572 	.get = param_get_ulong,
573 };
574 
575 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
576 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
577 module_param(rcu_kick_kthreads, bool, 0644);
578 
579 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
580 static int rcu_pending(int user);
581 
582 /*
583  * Return the number of RCU GPs completed thus far for debug & stats.
584  */
585 unsigned long rcu_get_gp_seq(void)
586 {
587 	return READ_ONCE(rcu_state.gp_seq);
588 }
589 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
590 
591 /*
592  * Return the number of RCU expedited batches completed thus far for
593  * debug & stats.  Odd numbers mean that a batch is in progress, even
594  * numbers mean idle.  The value returned will thus be roughly double
595  * the cumulative batches since boot.
596  */
597 unsigned long rcu_exp_batches_completed(void)
598 {
599 	return rcu_state.expedited_sequence;
600 }
601 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
602 
603 /*
604  * Return the root node of the rcu_state structure.
605  */
606 static struct rcu_node *rcu_get_root(void)
607 {
608 	return &rcu_state.node[0];
609 }
610 
611 /*
612  * Send along grace-period-related data for rcutorture diagnostics.
613  */
614 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
615 			    unsigned long *gp_seq)
616 {
617 	switch (test_type) {
618 	case RCU_FLAVOR:
619 		*flags = READ_ONCE(rcu_state.gp_flags);
620 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
621 		break;
622 	default:
623 		break;
624 	}
625 }
626 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
627 
628 /*
629  * Enter an RCU extended quiescent state, which can be either the
630  * idle loop or adaptive-tickless usermode execution.
631  *
632  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
633  * the possibility of usermode upcalls having messed up our count
634  * of interrupt nesting level during the prior busy period.
635  */
636 static noinstr void rcu_eqs_enter(bool user)
637 {
638 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
639 
640 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
641 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
642 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
643 		     rdp->dynticks_nesting == 0);
644 	if (rdp->dynticks_nesting != 1) {
645 		// RCU will still be watching, so just do accounting and leave.
646 		rdp->dynticks_nesting--;
647 		return;
648 	}
649 
650 	lockdep_assert_irqs_disabled();
651 	instrumentation_begin();
652 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
653 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
654 	rcu_prepare_for_idle();
655 	rcu_preempt_deferred_qs(current);
656 
657 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
658 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
659 
660 	instrumentation_end();
661 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
662 	// RCU is watching here ...
663 	rcu_dynticks_eqs_enter();
664 	// ... but is no longer watching here.
665 	rcu_dynticks_task_enter();
666 }
667 
668 /**
669  * rcu_idle_enter - inform RCU that current CPU is entering idle
670  *
671  * Enter idle mode, in other words, -leave- the mode in which RCU
672  * read-side critical sections can occur.  (Though RCU read-side
673  * critical sections can occur in irq handlers in idle, a possibility
674  * handled by irq_enter() and irq_exit().)
675  *
676  * If you add or remove a call to rcu_idle_enter(), be sure to test with
677  * CONFIG_RCU_EQS_DEBUG=y.
678  */
679 void rcu_idle_enter(void)
680 {
681 	lockdep_assert_irqs_disabled();
682 	rcu_eqs_enter(false);
683 }
684 EXPORT_SYMBOL_GPL(rcu_idle_enter);
685 
686 #ifdef CONFIG_NO_HZ_FULL
687 
688 #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
689 /*
690  * An empty function that will trigger a reschedule on
691  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
692  */
693 static void late_wakeup_func(struct irq_work *work)
694 {
695 }
696 
697 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
698 	IRQ_WORK_INIT(late_wakeup_func);
699 
700 /*
701  * If either:
702  *
703  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
704  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
705  *
706  * In these cases the late RCU wake ups aren't supported in the resched loops and our
707  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
708  * get re-enabled again.
709  */
710 noinstr static void rcu_irq_work_resched(void)
711 {
712 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
713 
714 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
715 		return;
716 
717 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
718 		return;
719 
720 	instrumentation_begin();
721 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
722 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
723 	}
724 	instrumentation_end();
725 }
726 
727 #else
728 static inline void rcu_irq_work_resched(void) { }
729 #endif
730 
731 /**
732  * rcu_user_enter - inform RCU that we are resuming userspace.
733  *
734  * Enter RCU idle mode right before resuming userspace.  No use of RCU
735  * is permitted between this call and rcu_user_exit(). This way the
736  * CPU doesn't need to maintain the tick for RCU maintenance purposes
737  * when the CPU runs in userspace.
738  *
739  * If you add or remove a call to rcu_user_enter(), be sure to test with
740  * CONFIG_RCU_EQS_DEBUG=y.
741  */
742 noinstr void rcu_user_enter(void)
743 {
744 	lockdep_assert_irqs_disabled();
745 
746 	/*
747 	 * Other than generic entry implementation, we may be past the last
748 	 * rescheduling opportunity in the entry code. Trigger a self IPI
749 	 * that will fire and reschedule once we resume in user/guest mode.
750 	 */
751 	rcu_irq_work_resched();
752 	rcu_eqs_enter(true);
753 }
754 
755 #endif /* CONFIG_NO_HZ_FULL */
756 
757 /**
758  * rcu_nmi_exit - inform RCU of exit from NMI context
759  *
760  * If we are returning from the outermost NMI handler that interrupted an
761  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
762  * to let the RCU grace-period handling know that the CPU is back to
763  * being RCU-idle.
764  *
765  * If you add or remove a call to rcu_nmi_exit(), be sure to test
766  * with CONFIG_RCU_EQS_DEBUG=y.
767  */
768 noinstr void rcu_nmi_exit(void)
769 {
770 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
771 
772 	instrumentation_begin();
773 	/*
774 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
775 	 * (We are exiting an NMI handler, so RCU better be paying attention
776 	 * to us!)
777 	 */
778 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
779 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
780 
781 	/*
782 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
783 	 * leave it in non-RCU-idle state.
784 	 */
785 	if (rdp->dynticks_nmi_nesting != 1) {
786 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
787 				  atomic_read(&rdp->dynticks));
788 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
789 			   rdp->dynticks_nmi_nesting - 2);
790 		instrumentation_end();
791 		return;
792 	}
793 
794 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
795 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
796 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
797 
798 	if (!in_nmi())
799 		rcu_prepare_for_idle();
800 
801 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
802 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
803 	instrumentation_end();
804 
805 	// RCU is watching here ...
806 	rcu_dynticks_eqs_enter();
807 	// ... but is no longer watching here.
808 
809 	if (!in_nmi())
810 		rcu_dynticks_task_enter();
811 }
812 
813 /**
814  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
815  *
816  * Exit from an interrupt handler, which might possibly result in entering
817  * idle mode, in other words, leaving the mode in which read-side critical
818  * sections can occur.  The caller must have disabled interrupts.
819  *
820  * This code assumes that the idle loop never does anything that might
821  * result in unbalanced calls to irq_enter() and irq_exit().  If your
822  * architecture's idle loop violates this assumption, RCU will give you what
823  * you deserve, good and hard.  But very infrequently and irreproducibly.
824  *
825  * Use things like work queues to work around this limitation.
826  *
827  * You have been warned.
828  *
829  * If you add or remove a call to rcu_irq_exit(), be sure to test with
830  * CONFIG_RCU_EQS_DEBUG=y.
831  */
832 void noinstr rcu_irq_exit(void)
833 {
834 	lockdep_assert_irqs_disabled();
835 	rcu_nmi_exit();
836 }
837 
838 /**
839  * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
840  *			  towards in kernel preemption
841  *
842  * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
843  * from RCU point of view. Invoked from return from interrupt before kernel
844  * preemption.
845  */
846 void rcu_irq_exit_preempt(void)
847 {
848 	lockdep_assert_irqs_disabled();
849 	rcu_nmi_exit();
850 
851 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
852 			 "RCU dynticks_nesting counter underflow/zero!");
853 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
854 			 DYNTICK_IRQ_NONIDLE,
855 			 "Bad RCU  dynticks_nmi_nesting counter\n");
856 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
857 			 "RCU in extended quiescent state!");
858 }
859 
860 #ifdef CONFIG_PROVE_RCU
861 /**
862  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
863  */
864 void rcu_irq_exit_check_preempt(void)
865 {
866 	lockdep_assert_irqs_disabled();
867 
868 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
869 			 "RCU dynticks_nesting counter underflow/zero!");
870 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
871 			 DYNTICK_IRQ_NONIDLE,
872 			 "Bad RCU  dynticks_nmi_nesting counter\n");
873 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
874 			 "RCU in extended quiescent state!");
875 }
876 #endif /* #ifdef CONFIG_PROVE_RCU */
877 
878 /*
879  * Wrapper for rcu_irq_exit() where interrupts are enabled.
880  *
881  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
882  * with CONFIG_RCU_EQS_DEBUG=y.
883  */
884 void rcu_irq_exit_irqson(void)
885 {
886 	unsigned long flags;
887 
888 	local_irq_save(flags);
889 	rcu_irq_exit();
890 	local_irq_restore(flags);
891 }
892 
893 /*
894  * Exit an RCU extended quiescent state, which can be either the
895  * idle loop or adaptive-tickless usermode execution.
896  *
897  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
898  * allow for the possibility of usermode upcalls messing up our count of
899  * interrupt nesting level during the busy period that is just now starting.
900  */
901 static void noinstr rcu_eqs_exit(bool user)
902 {
903 	struct rcu_data *rdp;
904 	long oldval;
905 
906 	lockdep_assert_irqs_disabled();
907 	rdp = this_cpu_ptr(&rcu_data);
908 	oldval = rdp->dynticks_nesting;
909 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
910 	if (oldval) {
911 		// RCU was already watching, so just do accounting and leave.
912 		rdp->dynticks_nesting++;
913 		return;
914 	}
915 	rcu_dynticks_task_exit();
916 	// RCU is not watching here ...
917 	rcu_dynticks_eqs_exit();
918 	// ... but is watching here.
919 	instrumentation_begin();
920 
921 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
922 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
923 
924 	rcu_cleanup_after_idle();
925 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
926 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
927 	WRITE_ONCE(rdp->dynticks_nesting, 1);
928 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
929 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
930 	instrumentation_end();
931 }
932 
933 /**
934  * rcu_idle_exit - inform RCU that current CPU is leaving idle
935  *
936  * Exit idle mode, in other words, -enter- the mode in which RCU
937  * read-side critical sections can occur.
938  *
939  * If you add or remove a call to rcu_idle_exit(), be sure to test with
940  * CONFIG_RCU_EQS_DEBUG=y.
941  */
942 void rcu_idle_exit(void)
943 {
944 	unsigned long flags;
945 
946 	local_irq_save(flags);
947 	rcu_eqs_exit(false);
948 	local_irq_restore(flags);
949 }
950 EXPORT_SYMBOL_GPL(rcu_idle_exit);
951 
952 #ifdef CONFIG_NO_HZ_FULL
953 /**
954  * rcu_user_exit - inform RCU that we are exiting userspace.
955  *
956  * Exit RCU idle mode while entering the kernel because it can
957  * run a RCU read side critical section anytime.
958  *
959  * If you add or remove a call to rcu_user_exit(), be sure to test with
960  * CONFIG_RCU_EQS_DEBUG=y.
961  */
962 void noinstr rcu_user_exit(void)
963 {
964 	rcu_eqs_exit(1);
965 }
966 
967 /**
968  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
969  *
970  * The scheduler tick is not normally enabled when CPUs enter the kernel
971  * from nohz_full userspace execution.  After all, nohz_full userspace
972  * execution is an RCU quiescent state and the time executing in the kernel
973  * is quite short.  Except of course when it isn't.  And it is not hard to
974  * cause a large system to spend tens of seconds or even minutes looping
975  * in the kernel, which can cause a number of problems, include RCU CPU
976  * stall warnings.
977  *
978  * Therefore, if a nohz_full CPU fails to report a quiescent state
979  * in a timely manner, the RCU grace-period kthread sets that CPU's
980  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
981  * exception will invoke this function, which will turn on the scheduler
982  * tick, which will enable RCU to detect that CPU's quiescent states,
983  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
984  * The tick will be disabled once a quiescent state is reported for
985  * this CPU.
986  *
987  * Of course, in carefully tuned systems, there might never be an
988  * interrupt or exception.  In that case, the RCU grace-period kthread
989  * will eventually cause one to happen.  However, in less carefully
990  * controlled environments, this function allows RCU to get what it
991  * needs without creating otherwise useless interruptions.
992  */
993 void __rcu_irq_enter_check_tick(void)
994 {
995 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
996 
997 	// If we're here from NMI there's nothing to do.
998 	if (in_nmi())
999 		return;
1000 
1001 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
1002 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
1003 
1004 	if (!tick_nohz_full_cpu(rdp->cpu) ||
1005 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
1006 	    READ_ONCE(rdp->rcu_forced_tick)) {
1007 		// RCU doesn't need nohz_full help from this CPU, or it is
1008 		// already getting that help.
1009 		return;
1010 	}
1011 
1012 	// We get here only when not in an extended quiescent state and
1013 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
1014 	// already watching and (2) The fact that we are in an interrupt
1015 	// handler and that the rcu_node lock is an irq-disabled lock
1016 	// prevents self-deadlock.  So we can safely recheck under the lock.
1017 	// Note that the nohz_full state currently cannot change.
1018 	raw_spin_lock_rcu_node(rdp->mynode);
1019 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
1020 		// A nohz_full CPU is in the kernel and RCU needs a
1021 		// quiescent state.  Turn on the tick!
1022 		WRITE_ONCE(rdp->rcu_forced_tick, true);
1023 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1024 	}
1025 	raw_spin_unlock_rcu_node(rdp->mynode);
1026 }
1027 #endif /* CONFIG_NO_HZ_FULL */
1028 
1029 /**
1030  * rcu_nmi_enter - inform RCU of entry to NMI context
1031  *
1032  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
1033  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
1034  * that the CPU is active.  This implementation permits nested NMIs, as
1035  * long as the nesting level does not overflow an int.  (You will probably
1036  * run out of stack space first.)
1037  *
1038  * If you add or remove a call to rcu_nmi_enter(), be sure to test
1039  * with CONFIG_RCU_EQS_DEBUG=y.
1040  */
1041 noinstr void rcu_nmi_enter(void)
1042 {
1043 	long incby = 2;
1044 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1045 
1046 	/* Complain about underflow. */
1047 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
1048 
1049 	/*
1050 	 * If idle from RCU viewpoint, atomically increment ->dynticks
1051 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
1052 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
1053 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
1054 	 * to be in the outermost NMI handler that interrupted an RCU-idle
1055 	 * period (observation due to Andy Lutomirski).
1056 	 */
1057 	if (rcu_dynticks_curr_cpu_in_eqs()) {
1058 
1059 		if (!in_nmi())
1060 			rcu_dynticks_task_exit();
1061 
1062 		// RCU is not watching here ...
1063 		rcu_dynticks_eqs_exit();
1064 		// ... but is watching here.
1065 
1066 		if (!in_nmi()) {
1067 			instrumentation_begin();
1068 			rcu_cleanup_after_idle();
1069 			instrumentation_end();
1070 		}
1071 
1072 		instrumentation_begin();
1073 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1074 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1075 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1076 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1077 
1078 		incby = 1;
1079 	} else if (!in_nmi()) {
1080 		instrumentation_begin();
1081 		rcu_irq_enter_check_tick();
1082 	} else  {
1083 		instrumentation_begin();
1084 	}
1085 
1086 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1087 			  rdp->dynticks_nmi_nesting,
1088 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1089 	instrumentation_end();
1090 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1091 		   rdp->dynticks_nmi_nesting + incby);
1092 	barrier();
1093 }
1094 
1095 /**
1096  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1097  *
1098  * Enter an interrupt handler, which might possibly result in exiting
1099  * idle mode, in other words, entering the mode in which read-side critical
1100  * sections can occur.  The caller must have disabled interrupts.
1101  *
1102  * Note that the Linux kernel is fully capable of entering an interrupt
1103  * handler that it never exits, for example when doing upcalls to user mode!
1104  * This code assumes that the idle loop never does upcalls to user mode.
1105  * If your architecture's idle loop does do upcalls to user mode (or does
1106  * anything else that results in unbalanced calls to the irq_enter() and
1107  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1108  * But very infrequently and irreproducibly.
1109  *
1110  * Use things like work queues to work around this limitation.
1111  *
1112  * You have been warned.
1113  *
1114  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1115  * CONFIG_RCU_EQS_DEBUG=y.
1116  */
1117 noinstr void rcu_irq_enter(void)
1118 {
1119 	lockdep_assert_irqs_disabled();
1120 	rcu_nmi_enter();
1121 }
1122 
1123 /*
1124  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1125  *
1126  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1127  * with CONFIG_RCU_EQS_DEBUG=y.
1128  */
1129 void rcu_irq_enter_irqson(void)
1130 {
1131 	unsigned long flags;
1132 
1133 	local_irq_save(flags);
1134 	rcu_irq_enter();
1135 	local_irq_restore(flags);
1136 }
1137 
1138 /*
1139  * If any sort of urgency was applied to the current CPU (for example,
1140  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1141  * to get to a quiescent state, disable it.
1142  */
1143 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1144 {
1145 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1146 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1147 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1148 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1149 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1150 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1151 	}
1152 }
1153 
1154 /**
1155  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1156  *
1157  * Return true if RCU is watching the running CPU, which means that this
1158  * CPU can safely enter RCU read-side critical sections.  In other words,
1159  * if the current CPU is not in its idle loop or is in an interrupt or
1160  * NMI handler, return true.
1161  *
1162  * Make notrace because it can be called by the internal functions of
1163  * ftrace, and making this notrace removes unnecessary recursion calls.
1164  */
1165 notrace bool rcu_is_watching(void)
1166 {
1167 	bool ret;
1168 
1169 	preempt_disable_notrace();
1170 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1171 	preempt_enable_notrace();
1172 	return ret;
1173 }
1174 EXPORT_SYMBOL_GPL(rcu_is_watching);
1175 
1176 /*
1177  * If a holdout task is actually running, request an urgent quiescent
1178  * state from its CPU.  This is unsynchronized, so migrations can cause
1179  * the request to go to the wrong CPU.  Which is OK, all that will happen
1180  * is that the CPU's next context switch will be a bit slower and next
1181  * time around this task will generate another request.
1182  */
1183 void rcu_request_urgent_qs_task(struct task_struct *t)
1184 {
1185 	int cpu;
1186 
1187 	barrier();
1188 	cpu = task_cpu(t);
1189 	if (!task_curr(t))
1190 		return; /* This task is not running on that CPU. */
1191 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1192 }
1193 
1194 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1195 
1196 /*
1197  * Is the current CPU online as far as RCU is concerned?
1198  *
1199  * Disable preemption to avoid false positives that could otherwise
1200  * happen due to the current CPU number being sampled, this task being
1201  * preempted, its old CPU being taken offline, resuming on some other CPU,
1202  * then determining that its old CPU is now offline.
1203  *
1204  * Disable checking if in an NMI handler because we cannot safely
1205  * report errors from NMI handlers anyway.  In addition, it is OK to use
1206  * RCU on an offline processor during initial boot, hence the check for
1207  * rcu_scheduler_fully_active.
1208  */
1209 bool rcu_lockdep_current_cpu_online(void)
1210 {
1211 	struct rcu_data *rdp;
1212 	struct rcu_node *rnp;
1213 	bool ret = false;
1214 
1215 	if (in_nmi() || !rcu_scheduler_fully_active)
1216 		return true;
1217 	preempt_disable_notrace();
1218 	rdp = this_cpu_ptr(&rcu_data);
1219 	rnp = rdp->mynode;
1220 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1221 		ret = true;
1222 	preempt_enable_notrace();
1223 	return ret;
1224 }
1225 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1226 
1227 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1228 
1229 /*
1230  * We are reporting a quiescent state on behalf of some other CPU, so
1231  * it is our responsibility to check for and handle potential overflow
1232  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1233  * After all, the CPU might be in deep idle state, and thus executing no
1234  * code whatsoever.
1235  */
1236 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1237 {
1238 	raw_lockdep_assert_held_rcu_node(rnp);
1239 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1240 			 rnp->gp_seq))
1241 		WRITE_ONCE(rdp->gpwrap, true);
1242 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1243 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1244 }
1245 
1246 /*
1247  * Snapshot the specified CPU's dynticks counter so that we can later
1248  * credit them with an implicit quiescent state.  Return 1 if this CPU
1249  * is in dynticks idle mode, which is an extended quiescent state.
1250  */
1251 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1252 {
1253 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1254 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1255 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1256 		rcu_gpnum_ovf(rdp->mynode, rdp);
1257 		return 1;
1258 	}
1259 	return 0;
1260 }
1261 
1262 /*
1263  * Return true if the specified CPU has passed through a quiescent
1264  * state by virtue of being in or having passed through an dynticks
1265  * idle state since the last call to dyntick_save_progress_counter()
1266  * for this same CPU, or by virtue of having been offline.
1267  */
1268 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1269 {
1270 	unsigned long jtsq;
1271 	bool *rnhqp;
1272 	bool *ruqp;
1273 	struct rcu_node *rnp = rdp->mynode;
1274 
1275 	/*
1276 	 * If the CPU passed through or entered a dynticks idle phase with
1277 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1278 	 * already acknowledged the request to pass through a quiescent
1279 	 * state.  Either way, that CPU cannot possibly be in an RCU
1280 	 * read-side critical section that started before the beginning
1281 	 * of the current RCU grace period.
1282 	 */
1283 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1284 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1285 		rcu_gpnum_ovf(rnp, rdp);
1286 		return 1;
1287 	}
1288 
1289 	/*
1290 	 * Complain if a CPU that is considered to be offline from RCU's
1291 	 * perspective has not yet reported a quiescent state.  After all,
1292 	 * the offline CPU should have reported a quiescent state during
1293 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1294 	 * if it ran concurrently with either the CPU going offline or the
1295 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1296 	 * critical section while all CPUs corresponding to that structure
1297 	 * are offline.  This added warning detects bugs in any of these
1298 	 * code paths.
1299 	 *
1300 	 * The rcu_node structure's ->lock is held here, which excludes
1301 	 * the relevant portions the CPU-hotplug code, the grace-period
1302 	 * initialization code, and the rcu_read_unlock() code paths.
1303 	 *
1304 	 * For more detail, please refer to the "Hotplug CPU" section
1305 	 * of RCU's Requirements documentation.
1306 	 */
1307 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1308 		bool onl;
1309 		struct rcu_node *rnp1;
1310 
1311 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1312 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1313 			(long)rnp->gp_seq, (long)rnp->completedqs);
1314 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1315 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1316 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1317 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1318 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1319 			__func__, rdp->cpu, ".o"[onl],
1320 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1321 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1322 		return 1; /* Break things loose after complaining. */
1323 	}
1324 
1325 	/*
1326 	 * A CPU running for an extended time within the kernel can
1327 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1328 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1329 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1330 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1331 	 * variable are safe because the assignments are repeated if this
1332 	 * CPU failed to pass through a quiescent state.  This code
1333 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1334 	 * is set way high.
1335 	 */
1336 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1337 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1338 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1339 	if (!READ_ONCE(*rnhqp) &&
1340 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1341 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1342 	     rcu_state.cbovld)) {
1343 		WRITE_ONCE(*rnhqp, true);
1344 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1345 		smp_store_release(ruqp, true);
1346 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1347 		WRITE_ONCE(*ruqp, true);
1348 	}
1349 
1350 	/*
1351 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1352 	 * The above code handles this, but only for straight cond_resched().
1353 	 * And some in-kernel loops check need_resched() before calling
1354 	 * cond_resched(), which defeats the above code for CPUs that are
1355 	 * running in-kernel with scheduling-clock interrupts disabled.
1356 	 * So hit them over the head with the resched_cpu() hammer!
1357 	 */
1358 	if (tick_nohz_full_cpu(rdp->cpu) &&
1359 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1360 	     rcu_state.cbovld)) {
1361 		WRITE_ONCE(*ruqp, true);
1362 		resched_cpu(rdp->cpu);
1363 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1364 	}
1365 
1366 	/*
1367 	 * If more than halfway to RCU CPU stall-warning time, invoke
1368 	 * resched_cpu() more frequently to try to loosen things up a bit.
1369 	 * Also check to see if the CPU is getting hammered with interrupts,
1370 	 * but only once per grace period, just to keep the IPIs down to
1371 	 * a dull roar.
1372 	 */
1373 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1374 		if (time_after(jiffies,
1375 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1376 			resched_cpu(rdp->cpu);
1377 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1378 		}
1379 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1380 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1381 		    (rnp->ffmask & rdp->grpmask)) {
1382 			rdp->rcu_iw_pending = true;
1383 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1384 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1385 		}
1386 	}
1387 
1388 	return 0;
1389 }
1390 
1391 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
1392 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1393 			      unsigned long gp_seq_req, const char *s)
1394 {
1395 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1396 				      gp_seq_req, rnp->level,
1397 				      rnp->grplo, rnp->grphi, s);
1398 }
1399 
1400 /*
1401  * rcu_start_this_gp - Request the start of a particular grace period
1402  * @rnp_start: The leaf node of the CPU from which to start.
1403  * @rdp: The rcu_data corresponding to the CPU from which to start.
1404  * @gp_seq_req: The gp_seq of the grace period to start.
1405  *
1406  * Start the specified grace period, as needed to handle newly arrived
1407  * callbacks.  The required future grace periods are recorded in each
1408  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1409  * is reason to awaken the grace-period kthread.
1410  *
1411  * The caller must hold the specified rcu_node structure's ->lock, which
1412  * is why the caller is responsible for waking the grace-period kthread.
1413  *
1414  * Returns true if the GP thread needs to be awakened else false.
1415  */
1416 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1417 			      unsigned long gp_seq_req)
1418 {
1419 	bool ret = false;
1420 	struct rcu_node *rnp;
1421 
1422 	/*
1423 	 * Use funnel locking to either acquire the root rcu_node
1424 	 * structure's lock or bail out if the need for this grace period
1425 	 * has already been recorded -- or if that grace period has in
1426 	 * fact already started.  If there is already a grace period in
1427 	 * progress in a non-leaf node, no recording is needed because the
1428 	 * end of the grace period will scan the leaf rcu_node structures.
1429 	 * Note that rnp_start->lock must not be released.
1430 	 */
1431 	raw_lockdep_assert_held_rcu_node(rnp_start);
1432 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1433 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1434 		if (rnp != rnp_start)
1435 			raw_spin_lock_rcu_node(rnp);
1436 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1437 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1438 		    (rnp != rnp_start &&
1439 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1440 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1441 					  TPS("Prestarted"));
1442 			goto unlock_out;
1443 		}
1444 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1445 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1446 			/*
1447 			 * We just marked the leaf or internal node, and a
1448 			 * grace period is in progress, which means that
1449 			 * rcu_gp_cleanup() will see the marking.  Bail to
1450 			 * reduce contention.
1451 			 */
1452 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1453 					  TPS("Startedleaf"));
1454 			goto unlock_out;
1455 		}
1456 		if (rnp != rnp_start && rnp->parent != NULL)
1457 			raw_spin_unlock_rcu_node(rnp);
1458 		if (!rnp->parent)
1459 			break;  /* At root, and perhaps also leaf. */
1460 	}
1461 
1462 	/* If GP already in progress, just leave, otherwise start one. */
1463 	if (rcu_gp_in_progress()) {
1464 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1465 		goto unlock_out;
1466 	}
1467 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1468 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1469 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1470 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1471 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1472 		goto unlock_out;
1473 	}
1474 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1475 	ret = true;  /* Caller must wake GP kthread. */
1476 unlock_out:
1477 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1478 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1479 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1480 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1481 	}
1482 	if (rnp != rnp_start)
1483 		raw_spin_unlock_rcu_node(rnp);
1484 	return ret;
1485 }
1486 
1487 /*
1488  * Clean up any old requests for the just-ended grace period.  Also return
1489  * whether any additional grace periods have been requested.
1490  */
1491 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1492 {
1493 	bool needmore;
1494 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1495 
1496 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1497 	if (!needmore)
1498 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1499 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1500 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1501 	return needmore;
1502 }
1503 
1504 /*
1505  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1506  * interrupt or softirq handler, in which case we just might immediately
1507  * sleep upon return, resulting in a grace-period hang), and don't bother
1508  * awakening when there is nothing for the grace-period kthread to do
1509  * (as in several CPUs raced to awaken, we lost), and finally don't try
1510  * to awaken a kthread that has not yet been created.  If all those checks
1511  * are passed, track some debug information and awaken.
1512  *
1513  * So why do the self-wakeup when in an interrupt or softirq handler
1514  * in the grace-period kthread's context?  Because the kthread might have
1515  * been interrupted just as it was going to sleep, and just after the final
1516  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1517  * is required, and is therefore supplied.
1518  */
1519 static void rcu_gp_kthread_wake(void)
1520 {
1521 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1522 
1523 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1524 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1525 		return;
1526 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1527 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1528 	swake_up_one(&rcu_state.gp_wq);
1529 }
1530 
1531 /*
1532  * If there is room, assign a ->gp_seq number to any callbacks on this
1533  * CPU that have not already been assigned.  Also accelerate any callbacks
1534  * that were previously assigned a ->gp_seq number that has since proven
1535  * to be too conservative, which can happen if callbacks get assigned a
1536  * ->gp_seq number while RCU is idle, but with reference to a non-root
1537  * rcu_node structure.  This function is idempotent, so it does not hurt
1538  * to call it repeatedly.  Returns an flag saying that we should awaken
1539  * the RCU grace-period kthread.
1540  *
1541  * The caller must hold rnp->lock with interrupts disabled.
1542  */
1543 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1544 {
1545 	unsigned long gp_seq_req;
1546 	bool ret = false;
1547 
1548 	rcu_lockdep_assert_cblist_protected(rdp);
1549 	raw_lockdep_assert_held_rcu_node(rnp);
1550 
1551 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1552 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1553 		return false;
1554 
1555 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1556 
1557 	/*
1558 	 * Callbacks are often registered with incomplete grace-period
1559 	 * information.  Something about the fact that getting exact
1560 	 * information requires acquiring a global lock...  RCU therefore
1561 	 * makes a conservative estimate of the grace period number at which
1562 	 * a given callback will become ready to invoke.	The following
1563 	 * code checks this estimate and improves it when possible, thus
1564 	 * accelerating callback invocation to an earlier grace-period
1565 	 * number.
1566 	 */
1567 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1568 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1569 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1570 
1571 	/* Trace depending on how much we were able to accelerate. */
1572 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1573 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1574 	else
1575 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1576 
1577 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1578 
1579 	return ret;
1580 }
1581 
1582 /*
1583  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1584  * rcu_node structure's ->lock be held.  It consults the cached value
1585  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1586  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1587  * while holding the leaf rcu_node structure's ->lock.
1588  */
1589 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1590 					struct rcu_data *rdp)
1591 {
1592 	unsigned long c;
1593 	bool needwake;
1594 
1595 	rcu_lockdep_assert_cblist_protected(rdp);
1596 	c = rcu_seq_snap(&rcu_state.gp_seq);
1597 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1598 		/* Old request still live, so mark recent callbacks. */
1599 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1600 		return;
1601 	}
1602 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1603 	needwake = rcu_accelerate_cbs(rnp, rdp);
1604 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1605 	if (needwake)
1606 		rcu_gp_kthread_wake();
1607 }
1608 
1609 /*
1610  * Move any callbacks whose grace period has completed to the
1611  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1612  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1613  * sublist.  This function is idempotent, so it does not hurt to
1614  * invoke it repeatedly.  As long as it is not invoked -too- often...
1615  * Returns true if the RCU grace-period kthread needs to be awakened.
1616  *
1617  * The caller must hold rnp->lock with interrupts disabled.
1618  */
1619 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1620 {
1621 	rcu_lockdep_assert_cblist_protected(rdp);
1622 	raw_lockdep_assert_held_rcu_node(rnp);
1623 
1624 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1625 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1626 		return false;
1627 
1628 	/*
1629 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1630 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1631 	 */
1632 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1633 
1634 	/* Classify any remaining callbacks. */
1635 	return rcu_accelerate_cbs(rnp, rdp);
1636 }
1637 
1638 /*
1639  * Move and classify callbacks, but only if doing so won't require
1640  * that the RCU grace-period kthread be awakened.
1641  */
1642 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1643 						  struct rcu_data *rdp)
1644 {
1645 	rcu_lockdep_assert_cblist_protected(rdp);
1646 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1647 	    !raw_spin_trylock_rcu_node(rnp))
1648 		return;
1649 	WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1650 	raw_spin_unlock_rcu_node(rnp);
1651 }
1652 
1653 /*
1654  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1655  * quiescent state.  This is intended to be invoked when the CPU notices
1656  * a new grace period.
1657  */
1658 static void rcu_strict_gp_check_qs(void)
1659 {
1660 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1661 		rcu_read_lock();
1662 		rcu_read_unlock();
1663 	}
1664 }
1665 
1666 /*
1667  * Update CPU-local rcu_data state to record the beginnings and ends of
1668  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1669  * structure corresponding to the current CPU, and must have irqs disabled.
1670  * Returns true if the grace-period kthread needs to be awakened.
1671  */
1672 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1673 {
1674 	bool ret = false;
1675 	bool need_qs;
1676 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1677 
1678 	raw_lockdep_assert_held_rcu_node(rnp);
1679 
1680 	if (rdp->gp_seq == rnp->gp_seq)
1681 		return false; /* Nothing to do. */
1682 
1683 	/* Handle the ends of any preceding grace periods first. */
1684 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1685 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1686 		if (!offloaded)
1687 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1688 		rdp->core_needs_qs = false;
1689 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1690 	} else {
1691 		if (!offloaded)
1692 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1693 		if (rdp->core_needs_qs)
1694 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1695 	}
1696 
1697 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1698 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1699 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1700 		/*
1701 		 * If the current grace period is waiting for this CPU,
1702 		 * set up to detect a quiescent state, otherwise don't
1703 		 * go looking for one.
1704 		 */
1705 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1706 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1707 		rdp->cpu_no_qs.b.norm = need_qs;
1708 		rdp->core_needs_qs = need_qs;
1709 		zero_cpu_stall_ticks(rdp);
1710 	}
1711 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1712 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1713 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1714 	WRITE_ONCE(rdp->gpwrap, false);
1715 	rcu_gpnum_ovf(rnp, rdp);
1716 	return ret;
1717 }
1718 
1719 static void note_gp_changes(struct rcu_data *rdp)
1720 {
1721 	unsigned long flags;
1722 	bool needwake;
1723 	struct rcu_node *rnp;
1724 
1725 	local_irq_save(flags);
1726 	rnp = rdp->mynode;
1727 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1728 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1729 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1730 		local_irq_restore(flags);
1731 		return;
1732 	}
1733 	needwake = __note_gp_changes(rnp, rdp);
1734 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1735 	rcu_strict_gp_check_qs();
1736 	if (needwake)
1737 		rcu_gp_kthread_wake();
1738 }
1739 
1740 static void rcu_gp_slow(int delay)
1741 {
1742 	if (delay > 0 &&
1743 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1744 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1745 		schedule_timeout_idle(delay);
1746 }
1747 
1748 static unsigned long sleep_duration;
1749 
1750 /* Allow rcutorture to stall the grace-period kthread. */
1751 void rcu_gp_set_torture_wait(int duration)
1752 {
1753 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1754 		WRITE_ONCE(sleep_duration, duration);
1755 }
1756 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1757 
1758 /* Actually implement the aforementioned wait. */
1759 static void rcu_gp_torture_wait(void)
1760 {
1761 	unsigned long duration;
1762 
1763 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1764 		return;
1765 	duration = xchg(&sleep_duration, 0UL);
1766 	if (duration > 0) {
1767 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1768 		schedule_timeout_idle(duration);
1769 		pr_alert("%s: Wait complete\n", __func__);
1770 	}
1771 }
1772 
1773 /*
1774  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1775  * processing.
1776  */
1777 static void rcu_strict_gp_boundary(void *unused)
1778 {
1779 	invoke_rcu_core();
1780 }
1781 
1782 /*
1783  * Initialize a new grace period.  Return false if no grace period required.
1784  */
1785 static bool rcu_gp_init(void)
1786 {
1787 	unsigned long firstseq;
1788 	unsigned long flags;
1789 	unsigned long oldmask;
1790 	unsigned long mask;
1791 	struct rcu_data *rdp;
1792 	struct rcu_node *rnp = rcu_get_root();
1793 
1794 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1795 	raw_spin_lock_irq_rcu_node(rnp);
1796 	if (!READ_ONCE(rcu_state.gp_flags)) {
1797 		/* Spurious wakeup, tell caller to go back to sleep.  */
1798 		raw_spin_unlock_irq_rcu_node(rnp);
1799 		return false;
1800 	}
1801 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1802 
1803 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1804 		/*
1805 		 * Grace period already in progress, don't start another.
1806 		 * Not supposed to be able to happen.
1807 		 */
1808 		raw_spin_unlock_irq_rcu_node(rnp);
1809 		return false;
1810 	}
1811 
1812 	/* Advance to a new grace period and initialize state. */
1813 	record_gp_stall_check_time();
1814 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1815 	rcu_seq_start(&rcu_state.gp_seq);
1816 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1817 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1818 	raw_spin_unlock_irq_rcu_node(rnp);
1819 
1820 	/*
1821 	 * Apply per-leaf buffered online and offline operations to
1822 	 * the rcu_node tree. Note that this new grace period need not
1823 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1824 	 * offlining path, when combined with checks in this function,
1825 	 * will handle CPUs that are currently going offline or that will
1826 	 * go offline later.  Please also refer to "Hotplug CPU" section
1827 	 * of RCU's Requirements documentation.
1828 	 */
1829 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1830 	rcu_for_each_leaf_node(rnp) {
1831 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1832 		firstseq = READ_ONCE(rnp->ofl_seq);
1833 		if (firstseq & 0x1)
1834 			while (firstseq == READ_ONCE(rnp->ofl_seq))
1835 				schedule_timeout_idle(1);  // Can't wake unless RCU is watching.
1836 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1837 		raw_spin_lock(&rcu_state.ofl_lock);
1838 		raw_spin_lock_irq_rcu_node(rnp);
1839 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1840 		    !rnp->wait_blkd_tasks) {
1841 			/* Nothing to do on this leaf rcu_node structure. */
1842 			raw_spin_unlock_irq_rcu_node(rnp);
1843 			raw_spin_unlock(&rcu_state.ofl_lock);
1844 			continue;
1845 		}
1846 
1847 		/* Record old state, apply changes to ->qsmaskinit field. */
1848 		oldmask = rnp->qsmaskinit;
1849 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1850 
1851 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1852 		if (!oldmask != !rnp->qsmaskinit) {
1853 			if (!oldmask) { /* First online CPU for rcu_node. */
1854 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1855 					rcu_init_new_rnp(rnp);
1856 			} else if (rcu_preempt_has_tasks(rnp)) {
1857 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1858 			} else { /* Last offline CPU and can propagate. */
1859 				rcu_cleanup_dead_rnp(rnp);
1860 			}
1861 		}
1862 
1863 		/*
1864 		 * If all waited-on tasks from prior grace period are
1865 		 * done, and if all this rcu_node structure's CPUs are
1866 		 * still offline, propagate up the rcu_node tree and
1867 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1868 		 * rcu_node structure's CPUs has since come back online,
1869 		 * simply clear ->wait_blkd_tasks.
1870 		 */
1871 		if (rnp->wait_blkd_tasks &&
1872 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1873 			rnp->wait_blkd_tasks = false;
1874 			if (!rnp->qsmaskinit)
1875 				rcu_cleanup_dead_rnp(rnp);
1876 		}
1877 
1878 		raw_spin_unlock_irq_rcu_node(rnp);
1879 		raw_spin_unlock(&rcu_state.ofl_lock);
1880 	}
1881 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1882 
1883 	/*
1884 	 * Set the quiescent-state-needed bits in all the rcu_node
1885 	 * structures for all currently online CPUs in breadth-first
1886 	 * order, starting from the root rcu_node structure, relying on the
1887 	 * layout of the tree within the rcu_state.node[] array.  Note that
1888 	 * other CPUs will access only the leaves of the hierarchy, thus
1889 	 * seeing that no grace period is in progress, at least until the
1890 	 * corresponding leaf node has been initialized.
1891 	 *
1892 	 * The grace period cannot complete until the initialization
1893 	 * process finishes, because this kthread handles both.
1894 	 */
1895 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1896 	rcu_for_each_node_breadth_first(rnp) {
1897 		rcu_gp_slow(gp_init_delay);
1898 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1899 		rdp = this_cpu_ptr(&rcu_data);
1900 		rcu_preempt_check_blocked_tasks(rnp);
1901 		rnp->qsmask = rnp->qsmaskinit;
1902 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1903 		if (rnp == rdp->mynode)
1904 			(void)__note_gp_changes(rnp, rdp);
1905 		rcu_preempt_boost_start_gp(rnp);
1906 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1907 					    rnp->level, rnp->grplo,
1908 					    rnp->grphi, rnp->qsmask);
1909 		/* Quiescent states for tasks on any now-offline CPUs. */
1910 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1911 		rnp->rcu_gp_init_mask = mask;
1912 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1913 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1914 		else
1915 			raw_spin_unlock_irq_rcu_node(rnp);
1916 		cond_resched_tasks_rcu_qs();
1917 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1918 	}
1919 
1920 	// If strict, make all CPUs aware of new grace period.
1921 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1922 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1923 
1924 	return true;
1925 }
1926 
1927 /*
1928  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1929  * time.
1930  */
1931 static bool rcu_gp_fqs_check_wake(int *gfp)
1932 {
1933 	struct rcu_node *rnp = rcu_get_root();
1934 
1935 	// If under overload conditions, force an immediate FQS scan.
1936 	if (*gfp & RCU_GP_FLAG_OVLD)
1937 		return true;
1938 
1939 	// Someone like call_rcu() requested a force-quiescent-state scan.
1940 	*gfp = READ_ONCE(rcu_state.gp_flags);
1941 	if (*gfp & RCU_GP_FLAG_FQS)
1942 		return true;
1943 
1944 	// The current grace period has completed.
1945 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1946 		return true;
1947 
1948 	return false;
1949 }
1950 
1951 /*
1952  * Do one round of quiescent-state forcing.
1953  */
1954 static void rcu_gp_fqs(bool first_time)
1955 {
1956 	struct rcu_node *rnp = rcu_get_root();
1957 
1958 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1959 	rcu_state.n_force_qs++;
1960 	if (first_time) {
1961 		/* Collect dyntick-idle snapshots. */
1962 		force_qs_rnp(dyntick_save_progress_counter);
1963 	} else {
1964 		/* Handle dyntick-idle and offline CPUs. */
1965 		force_qs_rnp(rcu_implicit_dynticks_qs);
1966 	}
1967 	/* Clear flag to prevent immediate re-entry. */
1968 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1969 		raw_spin_lock_irq_rcu_node(rnp);
1970 		WRITE_ONCE(rcu_state.gp_flags,
1971 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1972 		raw_spin_unlock_irq_rcu_node(rnp);
1973 	}
1974 }
1975 
1976 /*
1977  * Loop doing repeated quiescent-state forcing until the grace period ends.
1978  */
1979 static void rcu_gp_fqs_loop(void)
1980 {
1981 	bool first_gp_fqs;
1982 	int gf = 0;
1983 	unsigned long j;
1984 	int ret;
1985 	struct rcu_node *rnp = rcu_get_root();
1986 
1987 	first_gp_fqs = true;
1988 	j = READ_ONCE(jiffies_till_first_fqs);
1989 	if (rcu_state.cbovld)
1990 		gf = RCU_GP_FLAG_OVLD;
1991 	ret = 0;
1992 	for (;;) {
1993 		if (!ret) {
1994 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1995 			/*
1996 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1997 			 * update; required for stall checks.
1998 			 */
1999 			smp_wmb();
2000 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2001 				   jiffies + (j ? 3 * j : 2));
2002 		}
2003 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2004 				       TPS("fqswait"));
2005 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2006 		ret = swait_event_idle_timeout_exclusive(
2007 				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
2008 		rcu_gp_torture_wait();
2009 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2010 		/* Locking provides needed memory barriers. */
2011 		/* If grace period done, leave loop. */
2012 		if (!READ_ONCE(rnp->qsmask) &&
2013 		    !rcu_preempt_blocked_readers_cgp(rnp))
2014 			break;
2015 		/* If time for quiescent-state forcing, do it. */
2016 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2017 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2018 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2019 					       TPS("fqsstart"));
2020 			rcu_gp_fqs(first_gp_fqs);
2021 			gf = 0;
2022 			if (first_gp_fqs) {
2023 				first_gp_fqs = false;
2024 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2025 			}
2026 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2027 					       TPS("fqsend"));
2028 			cond_resched_tasks_rcu_qs();
2029 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2030 			ret = 0; /* Force full wait till next FQS. */
2031 			j = READ_ONCE(jiffies_till_next_fqs);
2032 		} else {
2033 			/* Deal with stray signal. */
2034 			cond_resched_tasks_rcu_qs();
2035 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2036 			WARN_ON(signal_pending(current));
2037 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2038 					       TPS("fqswaitsig"));
2039 			ret = 1; /* Keep old FQS timing. */
2040 			j = jiffies;
2041 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
2042 				j = 1;
2043 			else
2044 				j = rcu_state.jiffies_force_qs - j;
2045 			gf = 0;
2046 		}
2047 	}
2048 }
2049 
2050 /*
2051  * Clean up after the old grace period.
2052  */
2053 static void rcu_gp_cleanup(void)
2054 {
2055 	int cpu;
2056 	bool needgp = false;
2057 	unsigned long gp_duration;
2058 	unsigned long new_gp_seq;
2059 	bool offloaded;
2060 	struct rcu_data *rdp;
2061 	struct rcu_node *rnp = rcu_get_root();
2062 	struct swait_queue_head *sq;
2063 
2064 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2065 	raw_spin_lock_irq_rcu_node(rnp);
2066 	rcu_state.gp_end = jiffies;
2067 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2068 	if (gp_duration > rcu_state.gp_max)
2069 		rcu_state.gp_max = gp_duration;
2070 
2071 	/*
2072 	 * We know the grace period is complete, but to everyone else
2073 	 * it appears to still be ongoing.  But it is also the case
2074 	 * that to everyone else it looks like there is nothing that
2075 	 * they can do to advance the grace period.  It is therefore
2076 	 * safe for us to drop the lock in order to mark the grace
2077 	 * period as completed in all of the rcu_node structures.
2078 	 */
2079 	raw_spin_unlock_irq_rcu_node(rnp);
2080 
2081 	/*
2082 	 * Propagate new ->gp_seq value to rcu_node structures so that
2083 	 * other CPUs don't have to wait until the start of the next grace
2084 	 * period to process their callbacks.  This also avoids some nasty
2085 	 * RCU grace-period initialization races by forcing the end of
2086 	 * the current grace period to be completely recorded in all of
2087 	 * the rcu_node structures before the beginning of the next grace
2088 	 * period is recorded in any of the rcu_node structures.
2089 	 */
2090 	new_gp_seq = rcu_state.gp_seq;
2091 	rcu_seq_end(&new_gp_seq);
2092 	rcu_for_each_node_breadth_first(rnp) {
2093 		raw_spin_lock_irq_rcu_node(rnp);
2094 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2095 			dump_blkd_tasks(rnp, 10);
2096 		WARN_ON_ONCE(rnp->qsmask);
2097 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2098 		rdp = this_cpu_ptr(&rcu_data);
2099 		if (rnp == rdp->mynode)
2100 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2101 		/* smp_mb() provided by prior unlock-lock pair. */
2102 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2103 		// Reset overload indication for CPUs no longer overloaded
2104 		if (rcu_is_leaf_node(rnp))
2105 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2106 				rdp = per_cpu_ptr(&rcu_data, cpu);
2107 				check_cb_ovld_locked(rdp, rnp);
2108 			}
2109 		sq = rcu_nocb_gp_get(rnp);
2110 		raw_spin_unlock_irq_rcu_node(rnp);
2111 		rcu_nocb_gp_cleanup(sq);
2112 		cond_resched_tasks_rcu_qs();
2113 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2114 		rcu_gp_slow(gp_cleanup_delay);
2115 	}
2116 	rnp = rcu_get_root();
2117 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2118 
2119 	/* Declare grace period done, trace first to use old GP number. */
2120 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2121 	rcu_seq_end(&rcu_state.gp_seq);
2122 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2123 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2124 	/* Check for GP requests since above loop. */
2125 	rdp = this_cpu_ptr(&rcu_data);
2126 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2127 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2128 				  TPS("CleanupMore"));
2129 		needgp = true;
2130 	}
2131 	/* Advance CBs to reduce false positives below. */
2132 	offloaded = rcu_rdp_is_offloaded(rdp);
2133 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2134 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2135 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2136 		trace_rcu_grace_period(rcu_state.name,
2137 				       rcu_state.gp_seq,
2138 				       TPS("newreq"));
2139 	} else {
2140 		WRITE_ONCE(rcu_state.gp_flags,
2141 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2142 	}
2143 	raw_spin_unlock_irq_rcu_node(rnp);
2144 
2145 	// If strict, make all CPUs aware of the end of the old grace period.
2146 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2147 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2148 }
2149 
2150 /*
2151  * Body of kthread that handles grace periods.
2152  */
2153 static int __noreturn rcu_gp_kthread(void *unused)
2154 {
2155 	rcu_bind_gp_kthread();
2156 	for (;;) {
2157 
2158 		/* Handle grace-period start. */
2159 		for (;;) {
2160 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2161 					       TPS("reqwait"));
2162 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2163 			swait_event_idle_exclusive(rcu_state.gp_wq,
2164 					 READ_ONCE(rcu_state.gp_flags) &
2165 					 RCU_GP_FLAG_INIT);
2166 			rcu_gp_torture_wait();
2167 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2168 			/* Locking provides needed memory barrier. */
2169 			if (rcu_gp_init())
2170 				break;
2171 			cond_resched_tasks_rcu_qs();
2172 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2173 			WARN_ON(signal_pending(current));
2174 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2175 					       TPS("reqwaitsig"));
2176 		}
2177 
2178 		/* Handle quiescent-state forcing. */
2179 		rcu_gp_fqs_loop();
2180 
2181 		/* Handle grace-period end. */
2182 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2183 		rcu_gp_cleanup();
2184 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2185 	}
2186 }
2187 
2188 /*
2189  * Report a full set of quiescent states to the rcu_state data structure.
2190  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2191  * another grace period is required.  Whether we wake the grace-period
2192  * kthread or it awakens itself for the next round of quiescent-state
2193  * forcing, that kthread will clean up after the just-completed grace
2194  * period.  Note that the caller must hold rnp->lock, which is released
2195  * before return.
2196  */
2197 static void rcu_report_qs_rsp(unsigned long flags)
2198 	__releases(rcu_get_root()->lock)
2199 {
2200 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2201 	WARN_ON_ONCE(!rcu_gp_in_progress());
2202 	WRITE_ONCE(rcu_state.gp_flags,
2203 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2204 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2205 	rcu_gp_kthread_wake();
2206 }
2207 
2208 /*
2209  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2210  * Allows quiescent states for a group of CPUs to be reported at one go
2211  * to the specified rcu_node structure, though all the CPUs in the group
2212  * must be represented by the same rcu_node structure (which need not be a
2213  * leaf rcu_node structure, though it often will be).  The gps parameter
2214  * is the grace-period snapshot, which means that the quiescent states
2215  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2216  * must be held upon entry, and it is released before return.
2217  *
2218  * As a special case, if mask is zero, the bit-already-cleared check is
2219  * disabled.  This allows propagating quiescent state due to resumed tasks
2220  * during grace-period initialization.
2221  */
2222 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2223 			      unsigned long gps, unsigned long flags)
2224 	__releases(rnp->lock)
2225 {
2226 	unsigned long oldmask = 0;
2227 	struct rcu_node *rnp_c;
2228 
2229 	raw_lockdep_assert_held_rcu_node(rnp);
2230 
2231 	/* Walk up the rcu_node hierarchy. */
2232 	for (;;) {
2233 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2234 
2235 			/*
2236 			 * Our bit has already been cleared, or the
2237 			 * relevant grace period is already over, so done.
2238 			 */
2239 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2240 			return;
2241 		}
2242 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2243 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2244 			     rcu_preempt_blocked_readers_cgp(rnp));
2245 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2246 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2247 						 mask, rnp->qsmask, rnp->level,
2248 						 rnp->grplo, rnp->grphi,
2249 						 !!rnp->gp_tasks);
2250 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2251 
2252 			/* Other bits still set at this level, so done. */
2253 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2254 			return;
2255 		}
2256 		rnp->completedqs = rnp->gp_seq;
2257 		mask = rnp->grpmask;
2258 		if (rnp->parent == NULL) {
2259 
2260 			/* No more levels.  Exit loop holding root lock. */
2261 
2262 			break;
2263 		}
2264 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2265 		rnp_c = rnp;
2266 		rnp = rnp->parent;
2267 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2268 		oldmask = READ_ONCE(rnp_c->qsmask);
2269 	}
2270 
2271 	/*
2272 	 * Get here if we are the last CPU to pass through a quiescent
2273 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2274 	 * to clean up and start the next grace period if one is needed.
2275 	 */
2276 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2277 }
2278 
2279 /*
2280  * Record a quiescent state for all tasks that were previously queued
2281  * on the specified rcu_node structure and that were blocking the current
2282  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2283  * irqs disabled, and this lock is released upon return, but irqs remain
2284  * disabled.
2285  */
2286 static void __maybe_unused
2287 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2288 	__releases(rnp->lock)
2289 {
2290 	unsigned long gps;
2291 	unsigned long mask;
2292 	struct rcu_node *rnp_p;
2293 
2294 	raw_lockdep_assert_held_rcu_node(rnp);
2295 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2296 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2297 	    rnp->qsmask != 0) {
2298 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2299 		return;  /* Still need more quiescent states! */
2300 	}
2301 
2302 	rnp->completedqs = rnp->gp_seq;
2303 	rnp_p = rnp->parent;
2304 	if (rnp_p == NULL) {
2305 		/*
2306 		 * Only one rcu_node structure in the tree, so don't
2307 		 * try to report up to its nonexistent parent!
2308 		 */
2309 		rcu_report_qs_rsp(flags);
2310 		return;
2311 	}
2312 
2313 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2314 	gps = rnp->gp_seq;
2315 	mask = rnp->grpmask;
2316 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2317 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2318 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2319 }
2320 
2321 /*
2322  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2323  * structure.  This must be called from the specified CPU.
2324  */
2325 static void
2326 rcu_report_qs_rdp(struct rcu_data *rdp)
2327 {
2328 	unsigned long flags;
2329 	unsigned long mask;
2330 	bool needwake = false;
2331 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
2332 	struct rcu_node *rnp;
2333 
2334 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2335 	rnp = rdp->mynode;
2336 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2337 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2338 	    rdp->gpwrap) {
2339 
2340 		/*
2341 		 * The grace period in which this quiescent state was
2342 		 * recorded has ended, so don't report it upwards.
2343 		 * We will instead need a new quiescent state that lies
2344 		 * within the current grace period.
2345 		 */
2346 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2347 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2348 		return;
2349 	}
2350 	mask = rdp->grpmask;
2351 	rdp->core_needs_qs = false;
2352 	if ((rnp->qsmask & mask) == 0) {
2353 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2354 	} else {
2355 		/*
2356 		 * This GP can't end until cpu checks in, so all of our
2357 		 * callbacks can be processed during the next GP.
2358 		 */
2359 		if (!offloaded)
2360 			needwake = rcu_accelerate_cbs(rnp, rdp);
2361 
2362 		rcu_disable_urgency_upon_qs(rdp);
2363 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2364 		/* ^^^ Released rnp->lock */
2365 		if (needwake)
2366 			rcu_gp_kthread_wake();
2367 	}
2368 }
2369 
2370 /*
2371  * Check to see if there is a new grace period of which this CPU
2372  * is not yet aware, and if so, set up local rcu_data state for it.
2373  * Otherwise, see if this CPU has just passed through its first
2374  * quiescent state for this grace period, and record that fact if so.
2375  */
2376 static void
2377 rcu_check_quiescent_state(struct rcu_data *rdp)
2378 {
2379 	/* Check for grace-period ends and beginnings. */
2380 	note_gp_changes(rdp);
2381 
2382 	/*
2383 	 * Does this CPU still need to do its part for current grace period?
2384 	 * If no, return and let the other CPUs do their part as well.
2385 	 */
2386 	if (!rdp->core_needs_qs)
2387 		return;
2388 
2389 	/*
2390 	 * Was there a quiescent state since the beginning of the grace
2391 	 * period? If no, then exit and wait for the next call.
2392 	 */
2393 	if (rdp->cpu_no_qs.b.norm)
2394 		return;
2395 
2396 	/*
2397 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2398 	 * judge of that).
2399 	 */
2400 	rcu_report_qs_rdp(rdp);
2401 }
2402 
2403 /*
2404  * Near the end of the offline process.  Trace the fact that this CPU
2405  * is going offline.
2406  */
2407 int rcutree_dying_cpu(unsigned int cpu)
2408 {
2409 	bool blkd;
2410 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2411 	struct rcu_node *rnp = rdp->mynode;
2412 
2413 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2414 		return 0;
2415 
2416 	blkd = !!(rnp->qsmask & rdp->grpmask);
2417 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2418 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2419 	return 0;
2420 }
2421 
2422 /*
2423  * All CPUs for the specified rcu_node structure have gone offline,
2424  * and all tasks that were preempted within an RCU read-side critical
2425  * section while running on one of those CPUs have since exited their RCU
2426  * read-side critical section.  Some other CPU is reporting this fact with
2427  * the specified rcu_node structure's ->lock held and interrupts disabled.
2428  * This function therefore goes up the tree of rcu_node structures,
2429  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2430  * the leaf rcu_node structure's ->qsmaskinit field has already been
2431  * updated.
2432  *
2433  * This function does check that the specified rcu_node structure has
2434  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2435  * prematurely.  That said, invoking it after the fact will cost you
2436  * a needless lock acquisition.  So once it has done its work, don't
2437  * invoke it again.
2438  */
2439 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2440 {
2441 	long mask;
2442 	struct rcu_node *rnp = rnp_leaf;
2443 
2444 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2445 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2446 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2447 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2448 		return;
2449 	for (;;) {
2450 		mask = rnp->grpmask;
2451 		rnp = rnp->parent;
2452 		if (!rnp)
2453 			break;
2454 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2455 		rnp->qsmaskinit &= ~mask;
2456 		/* Between grace periods, so better already be zero! */
2457 		WARN_ON_ONCE(rnp->qsmask);
2458 		if (rnp->qsmaskinit) {
2459 			raw_spin_unlock_rcu_node(rnp);
2460 			/* irqs remain disabled. */
2461 			return;
2462 		}
2463 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2464 	}
2465 }
2466 
2467 /*
2468  * The CPU has been completely removed, and some other CPU is reporting
2469  * this fact from process context.  Do the remainder of the cleanup.
2470  * There can only be one CPU hotplug operation at a time, so no need for
2471  * explicit locking.
2472  */
2473 int rcutree_dead_cpu(unsigned int cpu)
2474 {
2475 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2476 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2477 
2478 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2479 		return 0;
2480 
2481 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2482 	/* Adjust any no-longer-needed kthreads. */
2483 	rcu_boost_kthread_setaffinity(rnp, -1);
2484 	/* Do any needed no-CB deferred wakeups from this CPU. */
2485 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2486 
2487 	// Stop-machine done, so allow nohz_full to disable tick.
2488 	tick_dep_clear(TICK_DEP_BIT_RCU);
2489 	return 0;
2490 }
2491 
2492 /*
2493  * Invoke any RCU callbacks that have made it to the end of their grace
2494  * period.  Thottle as specified by rdp->blimit.
2495  */
2496 static void rcu_do_batch(struct rcu_data *rdp)
2497 {
2498 	int div;
2499 	bool __maybe_unused empty;
2500 	unsigned long flags;
2501 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
2502 	struct rcu_head *rhp;
2503 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2504 	long bl, count = 0;
2505 	long pending, tlimit = 0;
2506 
2507 	/* If no callbacks are ready, just return. */
2508 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2509 		trace_rcu_batch_start(rcu_state.name,
2510 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2511 		trace_rcu_batch_end(rcu_state.name, 0,
2512 				    !rcu_segcblist_empty(&rdp->cblist),
2513 				    need_resched(), is_idle_task(current),
2514 				    rcu_is_callbacks_kthread());
2515 		return;
2516 	}
2517 
2518 	/*
2519 	 * Extract the list of ready callbacks, disabling to prevent
2520 	 * races with call_rcu() from interrupt handlers.  Leave the
2521 	 * callback counts, as rcu_barrier() needs to be conservative.
2522 	 */
2523 	local_irq_save(flags);
2524 	rcu_nocb_lock(rdp);
2525 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2526 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2527 	div = READ_ONCE(rcu_divisor);
2528 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2529 	bl = max(rdp->blimit, pending >> div);
2530 	if (unlikely(bl > 100)) {
2531 		long rrn = READ_ONCE(rcu_resched_ns);
2532 
2533 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2534 		tlimit = local_clock() + rrn;
2535 	}
2536 	trace_rcu_batch_start(rcu_state.name,
2537 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2538 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2539 	if (offloaded)
2540 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2541 
2542 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2543 	rcu_nocb_unlock_irqrestore(rdp, flags);
2544 
2545 	/* Invoke callbacks. */
2546 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2547 	rhp = rcu_cblist_dequeue(&rcl);
2548 
2549 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2550 		rcu_callback_t f;
2551 
2552 		count++;
2553 		debug_rcu_head_unqueue(rhp);
2554 
2555 		rcu_lock_acquire(&rcu_callback_map);
2556 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2557 
2558 		f = rhp->func;
2559 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2560 		f(rhp);
2561 
2562 		rcu_lock_release(&rcu_callback_map);
2563 
2564 		/*
2565 		 * Stop only if limit reached and CPU has something to do.
2566 		 */
2567 		if (count >= bl && !offloaded &&
2568 		    (need_resched() ||
2569 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2570 			break;
2571 		if (unlikely(tlimit)) {
2572 			/* only call local_clock() every 32 callbacks */
2573 			if (likely((count & 31) || local_clock() < tlimit))
2574 				continue;
2575 			/* Exceeded the time limit, so leave. */
2576 			break;
2577 		}
2578 		if (!in_serving_softirq()) {
2579 			local_bh_enable();
2580 			lockdep_assert_irqs_enabled();
2581 			cond_resched_tasks_rcu_qs();
2582 			lockdep_assert_irqs_enabled();
2583 			local_bh_disable();
2584 		}
2585 	}
2586 
2587 	local_irq_save(flags);
2588 	rcu_nocb_lock(rdp);
2589 	rdp->n_cbs_invoked += count;
2590 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2591 			    is_idle_task(current), rcu_is_callbacks_kthread());
2592 
2593 	/* Update counts and requeue any remaining callbacks. */
2594 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2595 	rcu_segcblist_add_len(&rdp->cblist, -count);
2596 
2597 	/* Reinstate batch limit if we have worked down the excess. */
2598 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2599 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2600 		rdp->blimit = blimit;
2601 
2602 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2603 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2604 		rdp->qlen_last_fqs_check = 0;
2605 		rdp->n_force_qs_snap = rcu_state.n_force_qs;
2606 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2607 		rdp->qlen_last_fqs_check = count;
2608 
2609 	/*
2610 	 * The following usually indicates a double call_rcu().  To track
2611 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2612 	 */
2613 	empty = rcu_segcblist_empty(&rdp->cblist);
2614 	WARN_ON_ONCE(count == 0 && !empty);
2615 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2616 		     count != 0 && empty);
2617 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2618 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2619 
2620 	rcu_nocb_unlock_irqrestore(rdp, flags);
2621 
2622 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2623 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2624 		invoke_rcu_core();
2625 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2626 }
2627 
2628 /*
2629  * This function is invoked from each scheduling-clock interrupt,
2630  * and checks to see if this CPU is in a non-context-switch quiescent
2631  * state, for example, user mode or idle loop.  It also schedules RCU
2632  * core processing.  If the current grace period has gone on too long,
2633  * it will ask the scheduler to manufacture a context switch for the sole
2634  * purpose of providing a providing the needed quiescent state.
2635  */
2636 void rcu_sched_clock_irq(int user)
2637 {
2638 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2639 	lockdep_assert_irqs_disabled();
2640 	raw_cpu_inc(rcu_data.ticks_this_gp);
2641 	/* The load-acquire pairs with the store-release setting to true. */
2642 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2643 		/* Idle and userspace execution already are quiescent states. */
2644 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2645 			set_tsk_need_resched(current);
2646 			set_preempt_need_resched();
2647 		}
2648 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2649 	}
2650 	rcu_flavor_sched_clock_irq(user);
2651 	if (rcu_pending(user))
2652 		invoke_rcu_core();
2653 	lockdep_assert_irqs_disabled();
2654 
2655 	trace_rcu_utilization(TPS("End scheduler-tick"));
2656 }
2657 
2658 /*
2659  * Scan the leaf rcu_node structures.  For each structure on which all
2660  * CPUs have reported a quiescent state and on which there are tasks
2661  * blocking the current grace period, initiate RCU priority boosting.
2662  * Otherwise, invoke the specified function to check dyntick state for
2663  * each CPU that has not yet reported a quiescent state.
2664  */
2665 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2666 {
2667 	int cpu;
2668 	unsigned long flags;
2669 	unsigned long mask;
2670 	struct rcu_data *rdp;
2671 	struct rcu_node *rnp;
2672 
2673 	rcu_state.cbovld = rcu_state.cbovldnext;
2674 	rcu_state.cbovldnext = false;
2675 	rcu_for_each_leaf_node(rnp) {
2676 		cond_resched_tasks_rcu_qs();
2677 		mask = 0;
2678 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2679 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2680 		if (rnp->qsmask == 0) {
2681 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2682 				/*
2683 				 * No point in scanning bits because they
2684 				 * are all zero.  But we might need to
2685 				 * priority-boost blocked readers.
2686 				 */
2687 				rcu_initiate_boost(rnp, flags);
2688 				/* rcu_initiate_boost() releases rnp->lock */
2689 				continue;
2690 			}
2691 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2692 			continue;
2693 		}
2694 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2695 			rdp = per_cpu_ptr(&rcu_data, cpu);
2696 			if (f(rdp)) {
2697 				mask |= rdp->grpmask;
2698 				rcu_disable_urgency_upon_qs(rdp);
2699 			}
2700 		}
2701 		if (mask != 0) {
2702 			/* Idle/offline CPUs, report (releases rnp->lock). */
2703 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2704 		} else {
2705 			/* Nothing to do here, so just drop the lock. */
2706 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2707 		}
2708 	}
2709 }
2710 
2711 /*
2712  * Force quiescent states on reluctant CPUs, and also detect which
2713  * CPUs are in dyntick-idle mode.
2714  */
2715 void rcu_force_quiescent_state(void)
2716 {
2717 	unsigned long flags;
2718 	bool ret;
2719 	struct rcu_node *rnp;
2720 	struct rcu_node *rnp_old = NULL;
2721 
2722 	/* Funnel through hierarchy to reduce memory contention. */
2723 	rnp = __this_cpu_read(rcu_data.mynode);
2724 	for (; rnp != NULL; rnp = rnp->parent) {
2725 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2726 		       !raw_spin_trylock(&rnp->fqslock);
2727 		if (rnp_old != NULL)
2728 			raw_spin_unlock(&rnp_old->fqslock);
2729 		if (ret)
2730 			return;
2731 		rnp_old = rnp;
2732 	}
2733 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2734 
2735 	/* Reached the root of the rcu_node tree, acquire lock. */
2736 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2737 	raw_spin_unlock(&rnp_old->fqslock);
2738 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2739 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2740 		return;  /* Someone beat us to it. */
2741 	}
2742 	WRITE_ONCE(rcu_state.gp_flags,
2743 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2744 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2745 	rcu_gp_kthread_wake();
2746 }
2747 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2748 
2749 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2750 // grace periods.
2751 static void strict_work_handler(struct work_struct *work)
2752 {
2753 	rcu_read_lock();
2754 	rcu_read_unlock();
2755 }
2756 
2757 /* Perform RCU core processing work for the current CPU.  */
2758 static __latent_entropy void rcu_core(void)
2759 {
2760 	unsigned long flags;
2761 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2762 	struct rcu_node *rnp = rdp->mynode;
2763 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2764 
2765 	if (cpu_is_offline(smp_processor_id()))
2766 		return;
2767 	trace_rcu_utilization(TPS("Start RCU core"));
2768 	WARN_ON_ONCE(!rdp->beenonline);
2769 
2770 	/* Report any deferred quiescent states if preemption enabled. */
2771 	if (!(preempt_count() & PREEMPT_MASK)) {
2772 		rcu_preempt_deferred_qs(current);
2773 	} else if (rcu_preempt_need_deferred_qs(current)) {
2774 		set_tsk_need_resched(current);
2775 		set_preempt_need_resched();
2776 	}
2777 
2778 	/* Update RCU state based on any recent quiescent states. */
2779 	rcu_check_quiescent_state(rdp);
2780 
2781 	/* No grace period and unregistered callbacks? */
2782 	if (!rcu_gp_in_progress() &&
2783 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2784 		rcu_nocb_lock_irqsave(rdp, flags);
2785 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2786 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2787 		rcu_nocb_unlock_irqrestore(rdp, flags);
2788 	}
2789 
2790 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2791 
2792 	/* If there are callbacks ready, invoke them. */
2793 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2794 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2795 		rcu_do_batch(rdp);
2796 
2797 	/* Do any needed deferred wakeups of rcuo kthreads. */
2798 	do_nocb_deferred_wakeup(rdp);
2799 	trace_rcu_utilization(TPS("End RCU core"));
2800 
2801 	// If strict GPs, schedule an RCU reader in a clean environment.
2802 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2803 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2804 }
2805 
2806 static void rcu_core_si(struct softirq_action *h)
2807 {
2808 	rcu_core();
2809 }
2810 
2811 static void rcu_wake_cond(struct task_struct *t, int status)
2812 {
2813 	/*
2814 	 * If the thread is yielding, only wake it when this
2815 	 * is invoked from idle
2816 	 */
2817 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2818 		wake_up_process(t);
2819 }
2820 
2821 static void invoke_rcu_core_kthread(void)
2822 {
2823 	struct task_struct *t;
2824 	unsigned long flags;
2825 
2826 	local_irq_save(flags);
2827 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2828 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2829 	if (t != NULL && t != current)
2830 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2831 	local_irq_restore(flags);
2832 }
2833 
2834 /*
2835  * Wake up this CPU's rcuc kthread to do RCU core processing.
2836  */
2837 static void invoke_rcu_core(void)
2838 {
2839 	if (!cpu_online(smp_processor_id()))
2840 		return;
2841 	if (use_softirq)
2842 		raise_softirq(RCU_SOFTIRQ);
2843 	else
2844 		invoke_rcu_core_kthread();
2845 }
2846 
2847 static void rcu_cpu_kthread_park(unsigned int cpu)
2848 {
2849 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2850 }
2851 
2852 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2853 {
2854 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2855 }
2856 
2857 /*
2858  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2859  * the RCU softirq used in configurations of RCU that do not support RCU
2860  * priority boosting.
2861  */
2862 static void rcu_cpu_kthread(unsigned int cpu)
2863 {
2864 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2865 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2866 	int spincnt;
2867 
2868 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2869 	for (spincnt = 0; spincnt < 10; spincnt++) {
2870 		local_bh_disable();
2871 		*statusp = RCU_KTHREAD_RUNNING;
2872 		local_irq_disable();
2873 		work = *workp;
2874 		*workp = 0;
2875 		local_irq_enable();
2876 		if (work)
2877 			rcu_core();
2878 		local_bh_enable();
2879 		if (*workp == 0) {
2880 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2881 			*statusp = RCU_KTHREAD_WAITING;
2882 			return;
2883 		}
2884 	}
2885 	*statusp = RCU_KTHREAD_YIELDING;
2886 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2887 	schedule_timeout_idle(2);
2888 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2889 	*statusp = RCU_KTHREAD_WAITING;
2890 }
2891 
2892 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2893 	.store			= &rcu_data.rcu_cpu_kthread_task,
2894 	.thread_should_run	= rcu_cpu_kthread_should_run,
2895 	.thread_fn		= rcu_cpu_kthread,
2896 	.thread_comm		= "rcuc/%u",
2897 	.setup			= rcu_cpu_kthread_setup,
2898 	.park			= rcu_cpu_kthread_park,
2899 };
2900 
2901 /*
2902  * Spawn per-CPU RCU core processing kthreads.
2903  */
2904 static int __init rcu_spawn_core_kthreads(void)
2905 {
2906 	int cpu;
2907 
2908 	for_each_possible_cpu(cpu)
2909 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2910 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2911 		return 0;
2912 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2913 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2914 	return 0;
2915 }
2916 early_initcall(rcu_spawn_core_kthreads);
2917 
2918 /*
2919  * Handle any core-RCU processing required by a call_rcu() invocation.
2920  */
2921 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2922 			    unsigned long flags)
2923 {
2924 	/*
2925 	 * If called from an extended quiescent state, invoke the RCU
2926 	 * core in order to force a re-evaluation of RCU's idleness.
2927 	 */
2928 	if (!rcu_is_watching())
2929 		invoke_rcu_core();
2930 
2931 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2932 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2933 		return;
2934 
2935 	/*
2936 	 * Force the grace period if too many callbacks or too long waiting.
2937 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2938 	 * if some other CPU has recently done so.  Also, don't bother
2939 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2940 	 * is the only one waiting for a grace period to complete.
2941 	 */
2942 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2943 		     rdp->qlen_last_fqs_check + qhimark)) {
2944 
2945 		/* Are we ignoring a completed grace period? */
2946 		note_gp_changes(rdp);
2947 
2948 		/* Start a new grace period if one not already started. */
2949 		if (!rcu_gp_in_progress()) {
2950 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2951 		} else {
2952 			/* Give the grace period a kick. */
2953 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2954 			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2955 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2956 				rcu_force_quiescent_state();
2957 			rdp->n_force_qs_snap = rcu_state.n_force_qs;
2958 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2959 		}
2960 	}
2961 }
2962 
2963 /*
2964  * RCU callback function to leak a callback.
2965  */
2966 static void rcu_leak_callback(struct rcu_head *rhp)
2967 {
2968 }
2969 
2970 /*
2971  * Check and if necessary update the leaf rcu_node structure's
2972  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2973  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2974  * structure's ->lock.
2975  */
2976 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2977 {
2978 	raw_lockdep_assert_held_rcu_node(rnp);
2979 	if (qovld_calc <= 0)
2980 		return; // Early boot and wildcard value set.
2981 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2982 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2983 	else
2984 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2985 }
2986 
2987 /*
2988  * Check and if necessary update the leaf rcu_node structure's
2989  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2990  * number of queued RCU callbacks.  No locks need be held, but the
2991  * caller must have disabled interrupts.
2992  *
2993  * Note that this function ignores the possibility that there are a lot
2994  * of callbacks all of which have already seen the end of their respective
2995  * grace periods.  This omission is due to the need for no-CBs CPUs to
2996  * be holding ->nocb_lock to do this check, which is too heavy for a
2997  * common-case operation.
2998  */
2999 static void check_cb_ovld(struct rcu_data *rdp)
3000 {
3001 	struct rcu_node *const rnp = rdp->mynode;
3002 
3003 	if (qovld_calc <= 0 ||
3004 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3005 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3006 		return; // Early boot wildcard value or already set correctly.
3007 	raw_spin_lock_rcu_node(rnp);
3008 	check_cb_ovld_locked(rdp, rnp);
3009 	raw_spin_unlock_rcu_node(rnp);
3010 }
3011 
3012 /* Helper function for call_rcu() and friends.  */
3013 static void
3014 __call_rcu(struct rcu_head *head, rcu_callback_t func)
3015 {
3016 	static atomic_t doublefrees;
3017 	unsigned long flags;
3018 	struct rcu_data *rdp;
3019 	bool was_alldone;
3020 
3021 	/* Misaligned rcu_head! */
3022 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3023 
3024 	if (debug_rcu_head_queue(head)) {
3025 		/*
3026 		 * Probable double call_rcu(), so leak the callback.
3027 		 * Use rcu:rcu_callback trace event to find the previous
3028 		 * time callback was passed to __call_rcu().
3029 		 */
3030 		if (atomic_inc_return(&doublefrees) < 4) {
3031 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
3032 			mem_dump_obj(head);
3033 		}
3034 		WRITE_ONCE(head->func, rcu_leak_callback);
3035 		return;
3036 	}
3037 	head->func = func;
3038 	head->next = NULL;
3039 	local_irq_save(flags);
3040 	kasan_record_aux_stack(head);
3041 	rdp = this_cpu_ptr(&rcu_data);
3042 
3043 	/* Add the callback to our list. */
3044 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3045 		// This can trigger due to call_rcu() from offline CPU:
3046 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3047 		WARN_ON_ONCE(!rcu_is_watching());
3048 		// Very early boot, before rcu_init().  Initialize if needed
3049 		// and then drop through to queue the callback.
3050 		if (rcu_segcblist_empty(&rdp->cblist))
3051 			rcu_segcblist_init(&rdp->cblist);
3052 	}
3053 
3054 	check_cb_ovld(rdp);
3055 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
3056 		return; // Enqueued onto ->nocb_bypass, so just leave.
3057 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
3058 	rcu_segcblist_enqueue(&rdp->cblist, head);
3059 	if (__is_kvfree_rcu_offset((unsigned long)func))
3060 		trace_rcu_kvfree_callback(rcu_state.name, head,
3061 					 (unsigned long)func,
3062 					 rcu_segcblist_n_cbs(&rdp->cblist));
3063 	else
3064 		trace_rcu_callback(rcu_state.name, head,
3065 				   rcu_segcblist_n_cbs(&rdp->cblist));
3066 
3067 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
3068 
3069 	/* Go handle any RCU core processing required. */
3070 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
3071 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3072 	} else {
3073 		__call_rcu_core(rdp, head, flags);
3074 		local_irq_restore(flags);
3075 	}
3076 }
3077 
3078 /**
3079  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3080  * @head: structure to be used for queueing the RCU updates.
3081  * @func: actual callback function to be invoked after the grace period
3082  *
3083  * The callback function will be invoked some time after a full grace
3084  * period elapses, in other words after all pre-existing RCU read-side
3085  * critical sections have completed.  However, the callback function
3086  * might well execute concurrently with RCU read-side critical sections
3087  * that started after call_rcu() was invoked.  RCU read-side critical
3088  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3089  * may be nested.  In addition, regions of code across which interrupts,
3090  * preemption, or softirqs have been disabled also serve as RCU read-side
3091  * critical sections.  This includes hardware interrupt handlers, softirq
3092  * handlers, and NMI handlers.
3093  *
3094  * Note that all CPUs must agree that the grace period extended beyond
3095  * all pre-existing RCU read-side critical section.  On systems with more
3096  * than one CPU, this means that when "func()" is invoked, each CPU is
3097  * guaranteed to have executed a full memory barrier since the end of its
3098  * last RCU read-side critical section whose beginning preceded the call
3099  * to call_rcu().  It also means that each CPU executing an RCU read-side
3100  * critical section that continues beyond the start of "func()" must have
3101  * executed a memory barrier after the call_rcu() but before the beginning
3102  * of that RCU read-side critical section.  Note that these guarantees
3103  * include CPUs that are offline, idle, or executing in user mode, as
3104  * well as CPUs that are executing in the kernel.
3105  *
3106  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3107  * resulting RCU callback function "func()", then both CPU A and CPU B are
3108  * guaranteed to execute a full memory barrier during the time interval
3109  * between the call to call_rcu() and the invocation of "func()" -- even
3110  * if CPU A and CPU B are the same CPU (but again only if the system has
3111  * more than one CPU).
3112  */
3113 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3114 {
3115 	__call_rcu(head, func);
3116 }
3117 EXPORT_SYMBOL_GPL(call_rcu);
3118 
3119 
3120 /* Maximum number of jiffies to wait before draining a batch. */
3121 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3122 #define KFREE_N_BATCHES 2
3123 #define FREE_N_CHANNELS 2
3124 
3125 /**
3126  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3127  * @nr_records: Number of active pointers in the array
3128  * @next: Next bulk object in the block chain
3129  * @records: Array of the kvfree_rcu() pointers
3130  */
3131 struct kvfree_rcu_bulk_data {
3132 	unsigned long nr_records;
3133 	struct kvfree_rcu_bulk_data *next;
3134 	void *records[];
3135 };
3136 
3137 /*
3138  * This macro defines how many entries the "records" array
3139  * will contain. It is based on the fact that the size of
3140  * kvfree_rcu_bulk_data structure becomes exactly one page.
3141  */
3142 #define KVFREE_BULK_MAX_ENTR \
3143 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3144 
3145 /**
3146  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3147  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3148  * @head_free: List of kfree_rcu() objects waiting for a grace period
3149  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3150  * @krcp: Pointer to @kfree_rcu_cpu structure
3151  */
3152 
3153 struct kfree_rcu_cpu_work {
3154 	struct rcu_work rcu_work;
3155 	struct rcu_head *head_free;
3156 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3157 	struct kfree_rcu_cpu *krcp;
3158 };
3159 
3160 /**
3161  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3162  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3163  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3164  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3165  * @lock: Synchronize access to this structure
3166  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3167  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3168  * @initialized: The @rcu_work fields have been initialized
3169  * @count: Number of objects for which GP not started
3170  * @bkvcache:
3171  *	A simple cache list that contains objects for reuse purpose.
3172  *	In order to save some per-cpu space the list is singular.
3173  *	Even though it is lockless an access has to be protected by the
3174  *	per-cpu lock.
3175  * @page_cache_work: A work to refill the cache when it is empty
3176  * @work_in_progress: Indicates that page_cache_work is running
3177  * @hrtimer: A hrtimer for scheduling a page_cache_work
3178  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3179  *
3180  * This is a per-CPU structure.  The reason that it is not included in
3181  * the rcu_data structure is to permit this code to be extracted from
3182  * the RCU files.  Such extraction could allow further optimization of
3183  * the interactions with the slab allocators.
3184  */
3185 struct kfree_rcu_cpu {
3186 	struct rcu_head *head;
3187 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3188 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3189 	raw_spinlock_t lock;
3190 	struct delayed_work monitor_work;
3191 	bool monitor_todo;
3192 	bool initialized;
3193 	int count;
3194 
3195 	struct work_struct page_cache_work;
3196 	atomic_t work_in_progress;
3197 	struct hrtimer hrtimer;
3198 
3199 	struct llist_head bkvcache;
3200 	int nr_bkv_objs;
3201 };
3202 
3203 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3204 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3205 };
3206 
3207 static __always_inline void
3208 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3209 {
3210 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3211 	int i;
3212 
3213 	for (i = 0; i < bhead->nr_records; i++)
3214 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3215 #endif
3216 }
3217 
3218 static inline struct kfree_rcu_cpu *
3219 krc_this_cpu_lock(unsigned long *flags)
3220 {
3221 	struct kfree_rcu_cpu *krcp;
3222 
3223 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3224 	krcp = this_cpu_ptr(&krc);
3225 	raw_spin_lock(&krcp->lock);
3226 
3227 	return krcp;
3228 }
3229 
3230 static inline void
3231 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3232 {
3233 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3234 }
3235 
3236 static inline struct kvfree_rcu_bulk_data *
3237 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3238 {
3239 	if (!krcp->nr_bkv_objs)
3240 		return NULL;
3241 
3242 	krcp->nr_bkv_objs--;
3243 	return (struct kvfree_rcu_bulk_data *)
3244 		llist_del_first(&krcp->bkvcache);
3245 }
3246 
3247 static inline bool
3248 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3249 	struct kvfree_rcu_bulk_data *bnode)
3250 {
3251 	// Check the limit.
3252 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3253 		return false;
3254 
3255 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3256 	krcp->nr_bkv_objs++;
3257 	return true;
3258 
3259 }
3260 
3261 /*
3262  * This function is invoked in workqueue context after a grace period.
3263  * It frees all the objects queued on ->bhead_free or ->head_free.
3264  */
3265 static void kfree_rcu_work(struct work_struct *work)
3266 {
3267 	unsigned long flags;
3268 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3269 	struct rcu_head *head, *next;
3270 	struct kfree_rcu_cpu *krcp;
3271 	struct kfree_rcu_cpu_work *krwp;
3272 	int i, j;
3273 
3274 	krwp = container_of(to_rcu_work(work),
3275 			    struct kfree_rcu_cpu_work, rcu_work);
3276 	krcp = krwp->krcp;
3277 
3278 	raw_spin_lock_irqsave(&krcp->lock, flags);
3279 	// Channels 1 and 2.
3280 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3281 		bkvhead[i] = krwp->bkvhead_free[i];
3282 		krwp->bkvhead_free[i] = NULL;
3283 	}
3284 
3285 	// Channel 3.
3286 	head = krwp->head_free;
3287 	krwp->head_free = NULL;
3288 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3289 
3290 	// Handle two first channels.
3291 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3292 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3293 			bnext = bkvhead[i]->next;
3294 			debug_rcu_bhead_unqueue(bkvhead[i]);
3295 
3296 			rcu_lock_acquire(&rcu_callback_map);
3297 			if (i == 0) { // kmalloc() / kfree().
3298 				trace_rcu_invoke_kfree_bulk_callback(
3299 					rcu_state.name, bkvhead[i]->nr_records,
3300 					bkvhead[i]->records);
3301 
3302 				kfree_bulk(bkvhead[i]->nr_records,
3303 					bkvhead[i]->records);
3304 			} else { // vmalloc() / vfree().
3305 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3306 					trace_rcu_invoke_kvfree_callback(
3307 						rcu_state.name,
3308 						bkvhead[i]->records[j], 0);
3309 
3310 					vfree(bkvhead[i]->records[j]);
3311 				}
3312 			}
3313 			rcu_lock_release(&rcu_callback_map);
3314 
3315 			raw_spin_lock_irqsave(&krcp->lock, flags);
3316 			if (put_cached_bnode(krcp, bkvhead[i]))
3317 				bkvhead[i] = NULL;
3318 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3319 
3320 			if (bkvhead[i])
3321 				free_page((unsigned long) bkvhead[i]);
3322 
3323 			cond_resched_tasks_rcu_qs();
3324 		}
3325 	}
3326 
3327 	/*
3328 	 * Emergency case only. It can happen under low memory
3329 	 * condition when an allocation gets failed, so the "bulk"
3330 	 * path can not be temporary maintained.
3331 	 */
3332 	for (; head; head = next) {
3333 		unsigned long offset = (unsigned long)head->func;
3334 		void *ptr = (void *)head - offset;
3335 
3336 		next = head->next;
3337 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3338 		rcu_lock_acquire(&rcu_callback_map);
3339 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3340 
3341 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3342 			kvfree(ptr);
3343 
3344 		rcu_lock_release(&rcu_callback_map);
3345 		cond_resched_tasks_rcu_qs();
3346 	}
3347 }
3348 
3349 /*
3350  * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3351  *
3352  * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3353  * timeout has been reached.
3354  */
3355 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3356 {
3357 	struct kfree_rcu_cpu_work *krwp;
3358 	bool repeat = false;
3359 	int i, j;
3360 
3361 	lockdep_assert_held(&krcp->lock);
3362 
3363 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3364 		krwp = &(krcp->krw_arr[i]);
3365 
3366 		/*
3367 		 * Try to detach bkvhead or head and attach it over any
3368 		 * available corresponding free channel. It can be that
3369 		 * a previous RCU batch is in progress, it means that
3370 		 * immediately to queue another one is not possible so
3371 		 * return false to tell caller to retry.
3372 		 */
3373 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3374 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3375 				(krcp->head && !krwp->head_free)) {
3376 			// Channel 1 corresponds to SLAB ptrs.
3377 			// Channel 2 corresponds to vmalloc ptrs.
3378 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3379 				if (!krwp->bkvhead_free[j]) {
3380 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3381 					krcp->bkvhead[j] = NULL;
3382 				}
3383 			}
3384 
3385 			// Channel 3 corresponds to emergency path.
3386 			if (!krwp->head_free) {
3387 				krwp->head_free = krcp->head;
3388 				krcp->head = NULL;
3389 			}
3390 
3391 			WRITE_ONCE(krcp->count, 0);
3392 
3393 			/*
3394 			 * One work is per one batch, so there are three
3395 			 * "free channels", the batch can handle. It can
3396 			 * be that the work is in the pending state when
3397 			 * channels have been detached following by each
3398 			 * other.
3399 			 */
3400 			queue_rcu_work(system_wq, &krwp->rcu_work);
3401 		}
3402 
3403 		// Repeat if any "free" corresponding channel is still busy.
3404 		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3405 			repeat = true;
3406 	}
3407 
3408 	return !repeat;
3409 }
3410 
3411 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3412 					  unsigned long flags)
3413 {
3414 	// Attempt to start a new batch.
3415 	krcp->monitor_todo = false;
3416 	if (queue_kfree_rcu_work(krcp)) {
3417 		// Success! Our job is done here.
3418 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3419 		return;
3420 	}
3421 
3422 	// Previous RCU batch still in progress, try again later.
3423 	krcp->monitor_todo = true;
3424 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3425 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3426 }
3427 
3428 /*
3429  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3430  * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3431  */
3432 static void kfree_rcu_monitor(struct work_struct *work)
3433 {
3434 	unsigned long flags;
3435 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3436 						 monitor_work.work);
3437 
3438 	raw_spin_lock_irqsave(&krcp->lock, flags);
3439 	if (krcp->monitor_todo)
3440 		kfree_rcu_drain_unlock(krcp, flags);
3441 	else
3442 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3443 }
3444 
3445 static enum hrtimer_restart
3446 schedule_page_work_fn(struct hrtimer *t)
3447 {
3448 	struct kfree_rcu_cpu *krcp =
3449 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3450 
3451 	queue_work(system_highpri_wq, &krcp->page_cache_work);
3452 	return HRTIMER_NORESTART;
3453 }
3454 
3455 static void fill_page_cache_func(struct work_struct *work)
3456 {
3457 	struct kvfree_rcu_bulk_data *bnode;
3458 	struct kfree_rcu_cpu *krcp =
3459 		container_of(work, struct kfree_rcu_cpu,
3460 			page_cache_work);
3461 	unsigned long flags;
3462 	bool pushed;
3463 	int i;
3464 
3465 	for (i = 0; i < rcu_min_cached_objs; i++) {
3466 		bnode = (struct kvfree_rcu_bulk_data *)
3467 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3468 
3469 		if (bnode) {
3470 			raw_spin_lock_irqsave(&krcp->lock, flags);
3471 			pushed = put_cached_bnode(krcp, bnode);
3472 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3473 
3474 			if (!pushed) {
3475 				free_page((unsigned long) bnode);
3476 				break;
3477 			}
3478 		}
3479 	}
3480 
3481 	atomic_set(&krcp->work_in_progress, 0);
3482 }
3483 
3484 static void
3485 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3486 {
3487 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3488 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3489 		hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
3490 			HRTIMER_MODE_REL);
3491 		krcp->hrtimer.function = schedule_page_work_fn;
3492 		hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3493 	}
3494 }
3495 
3496 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3497 // state specified by flags.  If can_alloc is true, the caller must
3498 // be schedulable and not be holding any locks or mutexes that might be
3499 // acquired by the memory allocator or anything that it might invoke.
3500 // Returns true if ptr was successfully recorded, else the caller must
3501 // use a fallback.
3502 static inline bool
3503 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3504 	unsigned long *flags, void *ptr, bool can_alloc)
3505 {
3506 	struct kvfree_rcu_bulk_data *bnode;
3507 	int idx;
3508 
3509 	*krcp = krc_this_cpu_lock(flags);
3510 	if (unlikely(!(*krcp)->initialized))
3511 		return false;
3512 
3513 	idx = !!is_vmalloc_addr(ptr);
3514 
3515 	/* Check if a new block is required. */
3516 	if (!(*krcp)->bkvhead[idx] ||
3517 			(*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3518 		bnode = get_cached_bnode(*krcp);
3519 		if (!bnode && can_alloc) {
3520 			krc_this_cpu_unlock(*krcp, *flags);
3521 
3522 			// __GFP_NORETRY - allows a light-weight direct reclaim
3523 			// what is OK from minimizing of fallback hitting point of
3524 			// view. Apart of that it forbids any OOM invoking what is
3525 			// also beneficial since we are about to release memory soon.
3526 			//
3527 			// __GFP_NOMEMALLOC - prevents from consuming of all the
3528 			// memory reserves. Please note we have a fallback path.
3529 			//
3530 			// __GFP_NOWARN - it is supposed that an allocation can
3531 			// be failed under low memory or high memory pressure
3532 			// scenarios.
3533 			bnode = (struct kvfree_rcu_bulk_data *)
3534 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3535 			*krcp = krc_this_cpu_lock(flags);
3536 		}
3537 
3538 		if (!bnode)
3539 			return false;
3540 
3541 		/* Initialize the new block. */
3542 		bnode->nr_records = 0;
3543 		bnode->next = (*krcp)->bkvhead[idx];
3544 
3545 		/* Attach it to the head. */
3546 		(*krcp)->bkvhead[idx] = bnode;
3547 	}
3548 
3549 	/* Finally insert. */
3550 	(*krcp)->bkvhead[idx]->records
3551 		[(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3552 
3553 	return true;
3554 }
3555 
3556 /*
3557  * Queue a request for lazy invocation of appropriate free routine after a
3558  * grace period. Please note there are three paths are maintained, two are the
3559  * main ones that use array of pointers interface and third one is emergency
3560  * one, that is used only when the main path can not be maintained temporary,
3561  * due to memory pressure.
3562  *
3563  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3564  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3565  * be free'd in workqueue context. This allows us to: batch requests together to
3566  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3567  */
3568 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3569 {
3570 	unsigned long flags;
3571 	struct kfree_rcu_cpu *krcp;
3572 	bool success;
3573 	void *ptr;
3574 
3575 	if (head) {
3576 		ptr = (void *) head - (unsigned long) func;
3577 	} else {
3578 		/*
3579 		 * Please note there is a limitation for the head-less
3580 		 * variant, that is why there is a clear rule for such
3581 		 * objects: it can be used from might_sleep() context
3582 		 * only. For other places please embed an rcu_head to
3583 		 * your data.
3584 		 */
3585 		might_sleep();
3586 		ptr = (unsigned long *) func;
3587 	}
3588 
3589 	// Queue the object but don't yet schedule the batch.
3590 	if (debug_rcu_head_queue(ptr)) {
3591 		// Probable double kfree_rcu(), just leak.
3592 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3593 			  __func__, head);
3594 
3595 		// Mark as success and leave.
3596 		return;
3597 	}
3598 
3599 	kasan_record_aux_stack(ptr);
3600 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3601 	if (!success) {
3602 		run_page_cache_worker(krcp);
3603 
3604 		if (head == NULL)
3605 			// Inline if kvfree_rcu(one_arg) call.
3606 			goto unlock_return;
3607 
3608 		head->func = func;
3609 		head->next = krcp->head;
3610 		krcp->head = head;
3611 		success = true;
3612 	}
3613 
3614 	WRITE_ONCE(krcp->count, krcp->count + 1);
3615 
3616 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3617 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3618 	    !krcp->monitor_todo) {
3619 		krcp->monitor_todo = true;
3620 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3621 	}
3622 
3623 unlock_return:
3624 	krc_this_cpu_unlock(krcp, flags);
3625 
3626 	/*
3627 	 * Inline kvfree() after synchronize_rcu(). We can do
3628 	 * it from might_sleep() context only, so the current
3629 	 * CPU can pass the QS state.
3630 	 */
3631 	if (!success) {
3632 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3633 		synchronize_rcu();
3634 		kvfree(ptr);
3635 	}
3636 }
3637 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3638 
3639 static unsigned long
3640 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3641 {
3642 	int cpu;
3643 	unsigned long count = 0;
3644 
3645 	/* Snapshot count of all CPUs */
3646 	for_each_possible_cpu(cpu) {
3647 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3648 
3649 		count += READ_ONCE(krcp->count);
3650 	}
3651 
3652 	return count;
3653 }
3654 
3655 static unsigned long
3656 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3657 {
3658 	int cpu, freed = 0;
3659 	unsigned long flags;
3660 
3661 	for_each_possible_cpu(cpu) {
3662 		int count;
3663 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3664 
3665 		count = krcp->count;
3666 		raw_spin_lock_irqsave(&krcp->lock, flags);
3667 		if (krcp->monitor_todo)
3668 			kfree_rcu_drain_unlock(krcp, flags);
3669 		else
3670 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3671 
3672 		sc->nr_to_scan -= count;
3673 		freed += count;
3674 
3675 		if (sc->nr_to_scan <= 0)
3676 			break;
3677 	}
3678 
3679 	return freed == 0 ? SHRINK_STOP : freed;
3680 }
3681 
3682 static struct shrinker kfree_rcu_shrinker = {
3683 	.count_objects = kfree_rcu_shrink_count,
3684 	.scan_objects = kfree_rcu_shrink_scan,
3685 	.batch = 0,
3686 	.seeks = DEFAULT_SEEKS,
3687 };
3688 
3689 void __init kfree_rcu_scheduler_running(void)
3690 {
3691 	int cpu;
3692 	unsigned long flags;
3693 
3694 	for_each_possible_cpu(cpu) {
3695 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3696 
3697 		raw_spin_lock_irqsave(&krcp->lock, flags);
3698 		if (!krcp->head || krcp->monitor_todo) {
3699 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3700 			continue;
3701 		}
3702 		krcp->monitor_todo = true;
3703 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3704 					 KFREE_DRAIN_JIFFIES);
3705 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3706 	}
3707 }
3708 
3709 /*
3710  * During early boot, any blocking grace-period wait automatically
3711  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3712  *
3713  * However, because a context switch is a grace period for !PREEMPTION, any
3714  * blocking grace-period wait automatically implies a grace period if
3715  * there is only one CPU online at any point time during execution of
3716  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3717  * occasionally incorrectly indicate that there are multiple CPUs online
3718  * when there was in fact only one the whole time, as this just adds some
3719  * overhead: RCU still operates correctly.
3720  */
3721 static int rcu_blocking_is_gp(void)
3722 {
3723 	int ret;
3724 
3725 	if (IS_ENABLED(CONFIG_PREEMPTION))
3726 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3727 	might_sleep();  /* Check for RCU read-side critical section. */
3728 	preempt_disable();
3729 	/*
3730 	 * If the rcu_state.n_online_cpus counter is equal to one,
3731 	 * there is only one CPU, and that CPU sees all prior accesses
3732 	 * made by any CPU that was online at the time of its access.
3733 	 * Furthermore, if this counter is equal to one, its value cannot
3734 	 * change until after the preempt_enable() below.
3735 	 *
3736 	 * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3737 	 * all later CPUs (both this one and any that come online later
3738 	 * on) are guaranteed to see all accesses prior to this point
3739 	 * in the code, without the need for additional memory barriers.
3740 	 * Those memory barriers are provided by CPU-hotplug code.
3741 	 */
3742 	ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3743 	preempt_enable();
3744 	return ret;
3745 }
3746 
3747 /**
3748  * synchronize_rcu - wait until a grace period has elapsed.
3749  *
3750  * Control will return to the caller some time after a full grace
3751  * period has elapsed, in other words after all currently executing RCU
3752  * read-side critical sections have completed.  Note, however, that
3753  * upon return from synchronize_rcu(), the caller might well be executing
3754  * concurrently with new RCU read-side critical sections that began while
3755  * synchronize_rcu() was waiting.  RCU read-side critical sections are
3756  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3757  * In addition, regions of code across which interrupts, preemption, or
3758  * softirqs have been disabled also serve as RCU read-side critical
3759  * sections.  This includes hardware interrupt handlers, softirq handlers,
3760  * and NMI handlers.
3761  *
3762  * Note that this guarantee implies further memory-ordering guarantees.
3763  * On systems with more than one CPU, when synchronize_rcu() returns,
3764  * each CPU is guaranteed to have executed a full memory barrier since
3765  * the end of its last RCU read-side critical section whose beginning
3766  * preceded the call to synchronize_rcu().  In addition, each CPU having
3767  * an RCU read-side critical section that extends beyond the return from
3768  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3769  * after the beginning of synchronize_rcu() and before the beginning of
3770  * that RCU read-side critical section.  Note that these guarantees include
3771  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3772  * that are executing in the kernel.
3773  *
3774  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3775  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3776  * to have executed a full memory barrier during the execution of
3777  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3778  * again only if the system has more than one CPU).
3779  */
3780 void synchronize_rcu(void)
3781 {
3782 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3783 			 lock_is_held(&rcu_lock_map) ||
3784 			 lock_is_held(&rcu_sched_lock_map),
3785 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3786 	if (rcu_blocking_is_gp())
3787 		return;  // Context allows vacuous grace periods.
3788 	if (rcu_gp_is_expedited())
3789 		synchronize_rcu_expedited();
3790 	else
3791 		wait_rcu_gp(call_rcu);
3792 }
3793 EXPORT_SYMBOL_GPL(synchronize_rcu);
3794 
3795 /**
3796  * get_state_synchronize_rcu - Snapshot current RCU state
3797  *
3798  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3799  * or poll_state_synchronize_rcu() to determine whether or not a full
3800  * grace period has elapsed in the meantime.
3801  */
3802 unsigned long get_state_synchronize_rcu(void)
3803 {
3804 	/*
3805 	 * Any prior manipulation of RCU-protected data must happen
3806 	 * before the load from ->gp_seq.
3807 	 */
3808 	smp_mb();  /* ^^^ */
3809 	return rcu_seq_snap(&rcu_state.gp_seq);
3810 }
3811 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3812 
3813 /**
3814  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3815  *
3816  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3817  * or poll_state_synchronize_rcu() to determine whether or not a full
3818  * grace period has elapsed in the meantime.  If the needed grace period
3819  * is not already slated to start, notifies RCU core of the need for that
3820  * grace period.
3821  *
3822  * Interrupts must be enabled for the case where it is necessary to awaken
3823  * the grace-period kthread.
3824  */
3825 unsigned long start_poll_synchronize_rcu(void)
3826 {
3827 	unsigned long flags;
3828 	unsigned long gp_seq = get_state_synchronize_rcu();
3829 	bool needwake;
3830 	struct rcu_data *rdp;
3831 	struct rcu_node *rnp;
3832 
3833 	lockdep_assert_irqs_enabled();
3834 	local_irq_save(flags);
3835 	rdp = this_cpu_ptr(&rcu_data);
3836 	rnp = rdp->mynode;
3837 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3838 	needwake = rcu_start_this_gp(rnp, rdp, gp_seq);
3839 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3840 	if (needwake)
3841 		rcu_gp_kthread_wake();
3842 	return gp_seq;
3843 }
3844 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3845 
3846 /**
3847  * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
3848  *
3849  * @oldstate: return from call to get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3850  *
3851  * If a full RCU grace period has elapsed since the earlier call from
3852  * which oldstate was obtained, return @true, otherwise return @false.
3853  * If @false is returned, it is the caller's responsibilty to invoke this
3854  * function later on until it does return @true.  Alternatively, the caller
3855  * can explicitly wait for a grace period, for example, by passing @oldstate
3856  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3857  *
3858  * Yes, this function does not take counter wrap into account.
3859  * But counter wrap is harmless.  If the counter wraps, we have waited for
3860  * more than 2 billion grace periods (and way more on a 64-bit system!).
3861  * Those needing to keep oldstate values for very long time periods
3862  * (many hours even on 32-bit systems) should check them occasionally
3863  * and either refresh them or set a flag indicating that the grace period
3864  * has completed.
3865  */
3866 bool poll_state_synchronize_rcu(unsigned long oldstate)
3867 {
3868 	if (rcu_seq_done(&rcu_state.gp_seq, oldstate)) {
3869 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3870 		return true;
3871 	}
3872 	return false;
3873 }
3874 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3875 
3876 /**
3877  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3878  *
3879  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3880  *
3881  * If a full RCU grace period has elapsed since the earlier call to
3882  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3883  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3884  *
3885  * Yes, this function does not take counter wrap into account.  But
3886  * counter wrap is harmless.  If the counter wraps, we have waited for
3887  * more than 2 billion grace periods (and way more on a 64-bit system!),
3888  * so waiting for one additional grace period should be just fine.
3889  */
3890 void cond_synchronize_rcu(unsigned long oldstate)
3891 {
3892 	if (!poll_state_synchronize_rcu(oldstate))
3893 		synchronize_rcu();
3894 }
3895 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3896 
3897 /*
3898  * Check to see if there is any immediate RCU-related work to be done by
3899  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3900  * in order of increasing expense: checks that can be carried out against
3901  * CPU-local state are performed first.  However, we must check for CPU
3902  * stalls first, else we might not get a chance.
3903  */
3904 static int rcu_pending(int user)
3905 {
3906 	bool gp_in_progress;
3907 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3908 	struct rcu_node *rnp = rdp->mynode;
3909 
3910 	lockdep_assert_irqs_disabled();
3911 
3912 	/* Check for CPU stalls, if enabled. */
3913 	check_cpu_stall(rdp);
3914 
3915 	/* Does this CPU need a deferred NOCB wakeup? */
3916 	if (rcu_nocb_need_deferred_wakeup(rdp))
3917 		return 1;
3918 
3919 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3920 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3921 		return 0;
3922 
3923 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3924 	gp_in_progress = rcu_gp_in_progress();
3925 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3926 		return 1;
3927 
3928 	/* Does this CPU have callbacks ready to invoke? */
3929 	if (!rcu_rdp_is_offloaded(rdp) &&
3930 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3931 		return 1;
3932 
3933 	/* Has RCU gone idle with this CPU needing another grace period? */
3934 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3935 	    !rcu_rdp_is_offloaded(rdp) &&
3936 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3937 		return 1;
3938 
3939 	/* Have RCU grace period completed or started?  */
3940 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3941 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3942 		return 1;
3943 
3944 	/* nothing to do */
3945 	return 0;
3946 }
3947 
3948 /*
3949  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3950  * the compiler is expected to optimize this away.
3951  */
3952 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3953 {
3954 	trace_rcu_barrier(rcu_state.name, s, cpu,
3955 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3956 }
3957 
3958 /*
3959  * RCU callback function for rcu_barrier().  If we are last, wake
3960  * up the task executing rcu_barrier().
3961  *
3962  * Note that the value of rcu_state.barrier_sequence must be captured
3963  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3964  * other CPUs might count the value down to zero before this CPU gets
3965  * around to invoking rcu_barrier_trace(), which might result in bogus
3966  * data from the next instance of rcu_barrier().
3967  */
3968 static void rcu_barrier_callback(struct rcu_head *rhp)
3969 {
3970 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3971 
3972 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3973 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3974 		complete(&rcu_state.barrier_completion);
3975 	} else {
3976 		rcu_barrier_trace(TPS("CB"), -1, s);
3977 	}
3978 }
3979 
3980 /*
3981  * Called with preemption disabled, and from cross-cpu IRQ context.
3982  */
3983 static void rcu_barrier_func(void *cpu_in)
3984 {
3985 	uintptr_t cpu = (uintptr_t)cpu_in;
3986 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3987 
3988 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3989 	rdp->barrier_head.func = rcu_barrier_callback;
3990 	debug_rcu_head_queue(&rdp->barrier_head);
3991 	rcu_nocb_lock(rdp);
3992 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3993 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3994 		atomic_inc(&rcu_state.barrier_cpu_count);
3995 	} else {
3996 		debug_rcu_head_unqueue(&rdp->barrier_head);
3997 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3998 				  rcu_state.barrier_sequence);
3999 	}
4000 	rcu_nocb_unlock(rdp);
4001 }
4002 
4003 /**
4004  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4005  *
4006  * Note that this primitive does not necessarily wait for an RCU grace period
4007  * to complete.  For example, if there are no RCU callbacks queued anywhere
4008  * in the system, then rcu_barrier() is within its rights to return
4009  * immediately, without waiting for anything, much less an RCU grace period.
4010  */
4011 void rcu_barrier(void)
4012 {
4013 	uintptr_t cpu;
4014 	struct rcu_data *rdp;
4015 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4016 
4017 	rcu_barrier_trace(TPS("Begin"), -1, s);
4018 
4019 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
4020 	mutex_lock(&rcu_state.barrier_mutex);
4021 
4022 	/* Did someone else do our work for us? */
4023 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4024 		rcu_barrier_trace(TPS("EarlyExit"), -1,
4025 				  rcu_state.barrier_sequence);
4026 		smp_mb(); /* caller's subsequent code after above check. */
4027 		mutex_unlock(&rcu_state.barrier_mutex);
4028 		return;
4029 	}
4030 
4031 	/* Mark the start of the barrier operation. */
4032 	rcu_seq_start(&rcu_state.barrier_sequence);
4033 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4034 
4035 	/*
4036 	 * Initialize the count to two rather than to zero in order
4037 	 * to avoid a too-soon return to zero in case of an immediate
4038 	 * invocation of the just-enqueued callback (or preemption of
4039 	 * this task).  Exclude CPU-hotplug operations to ensure that no
4040 	 * offline non-offloaded CPU has callbacks queued.
4041 	 */
4042 	init_completion(&rcu_state.barrier_completion);
4043 	atomic_set(&rcu_state.barrier_cpu_count, 2);
4044 	get_online_cpus();
4045 
4046 	/*
4047 	 * Force each CPU with callbacks to register a new callback.
4048 	 * When that callback is invoked, we will know that all of the
4049 	 * corresponding CPU's preceding callbacks have been invoked.
4050 	 */
4051 	for_each_possible_cpu(cpu) {
4052 		rdp = per_cpu_ptr(&rcu_data, cpu);
4053 		if (cpu_is_offline(cpu) &&
4054 		    !rcu_rdp_is_offloaded(rdp))
4055 			continue;
4056 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
4057 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
4058 					  rcu_state.barrier_sequence);
4059 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
4060 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
4061 			   cpu_is_offline(cpu)) {
4062 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
4063 					  rcu_state.barrier_sequence);
4064 			local_irq_disable();
4065 			rcu_barrier_func((void *)cpu);
4066 			local_irq_enable();
4067 		} else if (cpu_is_offline(cpu)) {
4068 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
4069 					  rcu_state.barrier_sequence);
4070 		} else {
4071 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
4072 					  rcu_state.barrier_sequence);
4073 		}
4074 	}
4075 	put_online_cpus();
4076 
4077 	/*
4078 	 * Now that we have an rcu_barrier_callback() callback on each
4079 	 * CPU, and thus each counted, remove the initial count.
4080 	 */
4081 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4082 		complete(&rcu_state.barrier_completion);
4083 
4084 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4085 	wait_for_completion(&rcu_state.barrier_completion);
4086 
4087 	/* Mark the end of the barrier operation. */
4088 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4089 	rcu_seq_end(&rcu_state.barrier_sequence);
4090 
4091 	/* Other rcu_barrier() invocations can now safely proceed. */
4092 	mutex_unlock(&rcu_state.barrier_mutex);
4093 }
4094 EXPORT_SYMBOL_GPL(rcu_barrier);
4095 
4096 /*
4097  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4098  * first CPU in a given leaf rcu_node structure coming online.  The caller
4099  * must hold the corresponding leaf rcu_node ->lock with interrrupts
4100  * disabled.
4101  */
4102 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4103 {
4104 	long mask;
4105 	long oldmask;
4106 	struct rcu_node *rnp = rnp_leaf;
4107 
4108 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4109 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4110 	for (;;) {
4111 		mask = rnp->grpmask;
4112 		rnp = rnp->parent;
4113 		if (rnp == NULL)
4114 			return;
4115 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4116 		oldmask = rnp->qsmaskinit;
4117 		rnp->qsmaskinit |= mask;
4118 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4119 		if (oldmask)
4120 			return;
4121 	}
4122 }
4123 
4124 /*
4125  * Do boot-time initialization of a CPU's per-CPU RCU data.
4126  */
4127 static void __init
4128 rcu_boot_init_percpu_data(int cpu)
4129 {
4130 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4131 
4132 	/* Set up local state, ensuring consistent view of global state. */
4133 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4134 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4135 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4136 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4137 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4138 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4139 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4140 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4141 	rdp->cpu = cpu;
4142 	rcu_boot_init_nocb_percpu_data(rdp);
4143 }
4144 
4145 /*
4146  * Invoked early in the CPU-online process, when pretty much all services
4147  * are available.  The incoming CPU is not present.
4148  *
4149  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4150  * offline event can be happening at a given time.  Note also that we can
4151  * accept some slop in the rsp->gp_seq access due to the fact that this
4152  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4153  * And any offloaded callbacks are being numbered elsewhere.
4154  */
4155 int rcutree_prepare_cpu(unsigned int cpu)
4156 {
4157 	unsigned long flags;
4158 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4159 	struct rcu_node *rnp = rcu_get_root();
4160 
4161 	/* Set up local state, ensuring consistent view of global state. */
4162 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4163 	rdp->qlen_last_fqs_check = 0;
4164 	rdp->n_force_qs_snap = rcu_state.n_force_qs;
4165 	rdp->blimit = blimit;
4166 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4167 	rcu_dynticks_eqs_online();
4168 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4169 
4170 	/*
4171 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4172 	 * (re-)initialized.
4173 	 */
4174 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4175 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4176 
4177 	/*
4178 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4179 	 * propagation up the rcu_node tree will happen at the beginning
4180 	 * of the next grace period.
4181 	 */
4182 	rnp = rdp->mynode;
4183 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4184 	rdp->beenonline = true;	 /* We have now been online. */
4185 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4186 	rdp->gp_seq_needed = rdp->gp_seq;
4187 	rdp->cpu_no_qs.b.norm = true;
4188 	rdp->core_needs_qs = false;
4189 	rdp->rcu_iw_pending = false;
4190 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4191 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4192 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4193 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4194 	rcu_prepare_kthreads(cpu);
4195 	rcu_spawn_cpu_nocb_kthread(cpu);
4196 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4197 
4198 	return 0;
4199 }
4200 
4201 /*
4202  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4203  */
4204 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4205 {
4206 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4207 
4208 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4209 }
4210 
4211 /*
4212  * Near the end of the CPU-online process.  Pretty much all services
4213  * enabled, and the CPU is now very much alive.
4214  */
4215 int rcutree_online_cpu(unsigned int cpu)
4216 {
4217 	unsigned long flags;
4218 	struct rcu_data *rdp;
4219 	struct rcu_node *rnp;
4220 
4221 	rdp = per_cpu_ptr(&rcu_data, cpu);
4222 	rnp = rdp->mynode;
4223 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4224 	rnp->ffmask |= rdp->grpmask;
4225 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4226 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4227 		return 0; /* Too early in boot for scheduler work. */
4228 	sync_sched_exp_online_cleanup(cpu);
4229 	rcutree_affinity_setting(cpu, -1);
4230 
4231 	// Stop-machine done, so allow nohz_full to disable tick.
4232 	tick_dep_clear(TICK_DEP_BIT_RCU);
4233 	return 0;
4234 }
4235 
4236 /*
4237  * Near the beginning of the process.  The CPU is still very much alive
4238  * with pretty much all services enabled.
4239  */
4240 int rcutree_offline_cpu(unsigned int cpu)
4241 {
4242 	unsigned long flags;
4243 	struct rcu_data *rdp;
4244 	struct rcu_node *rnp;
4245 
4246 	rdp = per_cpu_ptr(&rcu_data, cpu);
4247 	rnp = rdp->mynode;
4248 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4249 	rnp->ffmask &= ~rdp->grpmask;
4250 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4251 
4252 	rcutree_affinity_setting(cpu, cpu);
4253 
4254 	// nohz_full CPUs need the tick for stop-machine to work quickly
4255 	tick_dep_set(TICK_DEP_BIT_RCU);
4256 	return 0;
4257 }
4258 
4259 /*
4260  * Mark the specified CPU as being online so that subsequent grace periods
4261  * (both expedited and normal) will wait on it.  Note that this means that
4262  * incoming CPUs are not allowed to use RCU read-side critical sections
4263  * until this function is called.  Failing to observe this restriction
4264  * will result in lockdep splats.
4265  *
4266  * Note that this function is special in that it is invoked directly
4267  * from the incoming CPU rather than from the cpuhp_step mechanism.
4268  * This is because this function must be invoked at a precise location.
4269  */
4270 void rcu_cpu_starting(unsigned int cpu)
4271 {
4272 	unsigned long flags;
4273 	unsigned long mask;
4274 	struct rcu_data *rdp;
4275 	struct rcu_node *rnp;
4276 	bool newcpu;
4277 
4278 	rdp = per_cpu_ptr(&rcu_data, cpu);
4279 	if (rdp->cpu_started)
4280 		return;
4281 	rdp->cpu_started = true;
4282 
4283 	rnp = rdp->mynode;
4284 	mask = rdp->grpmask;
4285 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4286 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4287 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4288 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4289 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4290 	newcpu = !(rnp->expmaskinitnext & mask);
4291 	rnp->expmaskinitnext |= mask;
4292 	/* Allow lockless access for expedited grace periods. */
4293 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4294 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4295 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4296 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4297 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4298 
4299 	/* An incoming CPU should never be blocking a grace period. */
4300 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4301 		rcu_disable_urgency_upon_qs(rdp);
4302 		/* Report QS -after- changing ->qsmaskinitnext! */
4303 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4304 	} else {
4305 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4306 	}
4307 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4308 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4309 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4310 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4311 }
4312 
4313 /*
4314  * The outgoing function has no further need of RCU, so remove it from
4315  * the rcu_node tree's ->qsmaskinitnext bit masks.
4316  *
4317  * Note that this function is special in that it is invoked directly
4318  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4319  * This is because this function must be invoked at a precise location.
4320  */
4321 void rcu_report_dead(unsigned int cpu)
4322 {
4323 	unsigned long flags;
4324 	unsigned long mask;
4325 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4326 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4327 
4328 	// Do any dangling deferred wakeups.
4329 	do_nocb_deferred_wakeup(rdp);
4330 
4331 	/* QS for any half-done expedited grace period. */
4332 	preempt_disable();
4333 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4334 	preempt_enable();
4335 	rcu_preempt_deferred_qs(current);
4336 
4337 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4338 	mask = rdp->grpmask;
4339 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4340 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4341 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4342 	raw_spin_lock(&rcu_state.ofl_lock);
4343 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4344 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4345 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4346 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4347 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4348 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4349 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4350 	}
4351 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4352 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4353 	raw_spin_unlock(&rcu_state.ofl_lock);
4354 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4355 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4356 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4357 
4358 	rdp->cpu_started = false;
4359 }
4360 
4361 #ifdef CONFIG_HOTPLUG_CPU
4362 /*
4363  * The outgoing CPU has just passed through the dying-idle state, and we
4364  * are being invoked from the CPU that was IPIed to continue the offline
4365  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4366  */
4367 void rcutree_migrate_callbacks(int cpu)
4368 {
4369 	unsigned long flags;
4370 	struct rcu_data *my_rdp;
4371 	struct rcu_node *my_rnp;
4372 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4373 	bool needwake;
4374 
4375 	if (rcu_rdp_is_offloaded(rdp) ||
4376 	    rcu_segcblist_empty(&rdp->cblist))
4377 		return;  /* No callbacks to migrate. */
4378 
4379 	local_irq_save(flags);
4380 	my_rdp = this_cpu_ptr(&rcu_data);
4381 	my_rnp = my_rdp->mynode;
4382 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4383 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4384 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4385 	/* Leverage recent GPs and set GP for new callbacks. */
4386 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4387 		   rcu_advance_cbs(my_rnp, my_rdp);
4388 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4389 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4390 	rcu_segcblist_disable(&rdp->cblist);
4391 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4392 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4393 	if (rcu_rdp_is_offloaded(my_rdp)) {
4394 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4395 		__call_rcu_nocb_wake(my_rdp, true, flags);
4396 	} else {
4397 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4398 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4399 	}
4400 	if (needwake)
4401 		rcu_gp_kthread_wake();
4402 	lockdep_assert_irqs_enabled();
4403 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4404 		  !rcu_segcblist_empty(&rdp->cblist),
4405 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4406 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4407 		  rcu_segcblist_first_cb(&rdp->cblist));
4408 }
4409 #endif
4410 
4411 /*
4412  * On non-huge systems, use expedited RCU grace periods to make suspend
4413  * and hibernation run faster.
4414  */
4415 static int rcu_pm_notify(struct notifier_block *self,
4416 			 unsigned long action, void *hcpu)
4417 {
4418 	switch (action) {
4419 	case PM_HIBERNATION_PREPARE:
4420 	case PM_SUSPEND_PREPARE:
4421 		rcu_expedite_gp();
4422 		break;
4423 	case PM_POST_HIBERNATION:
4424 	case PM_POST_SUSPEND:
4425 		rcu_unexpedite_gp();
4426 		break;
4427 	default:
4428 		break;
4429 	}
4430 	return NOTIFY_OK;
4431 }
4432 
4433 /*
4434  * Spawn the kthreads that handle RCU's grace periods.
4435  */
4436 static int __init rcu_spawn_gp_kthread(void)
4437 {
4438 	unsigned long flags;
4439 	int kthread_prio_in = kthread_prio;
4440 	struct rcu_node *rnp;
4441 	struct sched_param sp;
4442 	struct task_struct *t;
4443 
4444 	/* Force priority into range. */
4445 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4446 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4447 		kthread_prio = 2;
4448 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4449 		kthread_prio = 1;
4450 	else if (kthread_prio < 0)
4451 		kthread_prio = 0;
4452 	else if (kthread_prio > 99)
4453 		kthread_prio = 99;
4454 
4455 	if (kthread_prio != kthread_prio_in)
4456 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4457 			 kthread_prio, kthread_prio_in);
4458 
4459 	rcu_scheduler_fully_active = 1;
4460 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4461 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4462 		return 0;
4463 	if (kthread_prio) {
4464 		sp.sched_priority = kthread_prio;
4465 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4466 	}
4467 	rnp = rcu_get_root();
4468 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4469 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4470 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4471 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4472 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4473 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4474 	wake_up_process(t);
4475 	rcu_spawn_nocb_kthreads();
4476 	rcu_spawn_boost_kthreads();
4477 	return 0;
4478 }
4479 early_initcall(rcu_spawn_gp_kthread);
4480 
4481 /*
4482  * This function is invoked towards the end of the scheduler's
4483  * initialization process.  Before this is called, the idle task might
4484  * contain synchronous grace-period primitives (during which time, this idle
4485  * task is booting the system, and such primitives are no-ops).  After this
4486  * function is called, any synchronous grace-period primitives are run as
4487  * expedited, with the requesting task driving the grace period forward.
4488  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4489  * runtime RCU functionality.
4490  */
4491 void rcu_scheduler_starting(void)
4492 {
4493 	WARN_ON(num_online_cpus() != 1);
4494 	WARN_ON(nr_context_switches() > 0);
4495 	rcu_test_sync_prims();
4496 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4497 	rcu_test_sync_prims();
4498 }
4499 
4500 /*
4501  * Helper function for rcu_init() that initializes the rcu_state structure.
4502  */
4503 static void __init rcu_init_one(void)
4504 {
4505 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4506 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4507 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4508 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4509 
4510 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4511 	int cpustride = 1;
4512 	int i;
4513 	int j;
4514 	struct rcu_node *rnp;
4515 
4516 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4517 
4518 	/* Silence gcc 4.8 false positive about array index out of range. */
4519 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4520 		panic("rcu_init_one: rcu_num_lvls out of range");
4521 
4522 	/* Initialize the level-tracking arrays. */
4523 
4524 	for (i = 1; i < rcu_num_lvls; i++)
4525 		rcu_state.level[i] =
4526 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4527 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4528 
4529 	/* Initialize the elements themselves, starting from the leaves. */
4530 
4531 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4532 		cpustride *= levelspread[i];
4533 		rnp = rcu_state.level[i];
4534 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4535 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4536 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4537 						   &rcu_node_class[i], buf[i]);
4538 			raw_spin_lock_init(&rnp->fqslock);
4539 			lockdep_set_class_and_name(&rnp->fqslock,
4540 						   &rcu_fqs_class[i], fqs[i]);
4541 			rnp->gp_seq = rcu_state.gp_seq;
4542 			rnp->gp_seq_needed = rcu_state.gp_seq;
4543 			rnp->completedqs = rcu_state.gp_seq;
4544 			rnp->qsmask = 0;
4545 			rnp->qsmaskinit = 0;
4546 			rnp->grplo = j * cpustride;
4547 			rnp->grphi = (j + 1) * cpustride - 1;
4548 			if (rnp->grphi >= nr_cpu_ids)
4549 				rnp->grphi = nr_cpu_ids - 1;
4550 			if (i == 0) {
4551 				rnp->grpnum = 0;
4552 				rnp->grpmask = 0;
4553 				rnp->parent = NULL;
4554 			} else {
4555 				rnp->grpnum = j % levelspread[i - 1];
4556 				rnp->grpmask = BIT(rnp->grpnum);
4557 				rnp->parent = rcu_state.level[i - 1] +
4558 					      j / levelspread[i - 1];
4559 			}
4560 			rnp->level = i;
4561 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4562 			rcu_init_one_nocb(rnp);
4563 			init_waitqueue_head(&rnp->exp_wq[0]);
4564 			init_waitqueue_head(&rnp->exp_wq[1]);
4565 			init_waitqueue_head(&rnp->exp_wq[2]);
4566 			init_waitqueue_head(&rnp->exp_wq[3]);
4567 			spin_lock_init(&rnp->exp_lock);
4568 		}
4569 	}
4570 
4571 	init_swait_queue_head(&rcu_state.gp_wq);
4572 	init_swait_queue_head(&rcu_state.expedited_wq);
4573 	rnp = rcu_first_leaf_node();
4574 	for_each_possible_cpu(i) {
4575 		while (i > rnp->grphi)
4576 			rnp++;
4577 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4578 		rcu_boot_init_percpu_data(i);
4579 	}
4580 }
4581 
4582 /*
4583  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4584  * replace the definitions in tree.h because those are needed to size
4585  * the ->node array in the rcu_state structure.
4586  */
4587 static void __init rcu_init_geometry(void)
4588 {
4589 	ulong d;
4590 	int i;
4591 	int rcu_capacity[RCU_NUM_LVLS];
4592 
4593 	/*
4594 	 * Initialize any unspecified boot parameters.
4595 	 * The default values of jiffies_till_first_fqs and
4596 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4597 	 * value, which is a function of HZ, then adding one for each
4598 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4599 	 */
4600 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4601 	if (jiffies_till_first_fqs == ULONG_MAX)
4602 		jiffies_till_first_fqs = d;
4603 	if (jiffies_till_next_fqs == ULONG_MAX)
4604 		jiffies_till_next_fqs = d;
4605 	adjust_jiffies_till_sched_qs();
4606 
4607 	/* If the compile-time values are accurate, just leave. */
4608 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4609 	    nr_cpu_ids == NR_CPUS)
4610 		return;
4611 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4612 		rcu_fanout_leaf, nr_cpu_ids);
4613 
4614 	/*
4615 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4616 	 * and cannot exceed the number of bits in the rcu_node masks.
4617 	 * Complain and fall back to the compile-time values if this
4618 	 * limit is exceeded.
4619 	 */
4620 	if (rcu_fanout_leaf < 2 ||
4621 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4622 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4623 		WARN_ON(1);
4624 		return;
4625 	}
4626 
4627 	/*
4628 	 * Compute number of nodes that can be handled an rcu_node tree
4629 	 * with the given number of levels.
4630 	 */
4631 	rcu_capacity[0] = rcu_fanout_leaf;
4632 	for (i = 1; i < RCU_NUM_LVLS; i++)
4633 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4634 
4635 	/*
4636 	 * The tree must be able to accommodate the configured number of CPUs.
4637 	 * If this limit is exceeded, fall back to the compile-time values.
4638 	 */
4639 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4640 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4641 		WARN_ON(1);
4642 		return;
4643 	}
4644 
4645 	/* Calculate the number of levels in the tree. */
4646 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4647 	}
4648 	rcu_num_lvls = i + 1;
4649 
4650 	/* Calculate the number of rcu_nodes at each level of the tree. */
4651 	for (i = 0; i < rcu_num_lvls; i++) {
4652 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4653 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4654 	}
4655 
4656 	/* Calculate the total number of rcu_node structures. */
4657 	rcu_num_nodes = 0;
4658 	for (i = 0; i < rcu_num_lvls; i++)
4659 		rcu_num_nodes += num_rcu_lvl[i];
4660 }
4661 
4662 /*
4663  * Dump out the structure of the rcu_node combining tree associated
4664  * with the rcu_state structure.
4665  */
4666 static void __init rcu_dump_rcu_node_tree(void)
4667 {
4668 	int level = 0;
4669 	struct rcu_node *rnp;
4670 
4671 	pr_info("rcu_node tree layout dump\n");
4672 	pr_info(" ");
4673 	rcu_for_each_node_breadth_first(rnp) {
4674 		if (rnp->level != level) {
4675 			pr_cont("\n");
4676 			pr_info(" ");
4677 			level = rnp->level;
4678 		}
4679 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4680 	}
4681 	pr_cont("\n");
4682 }
4683 
4684 struct workqueue_struct *rcu_gp_wq;
4685 struct workqueue_struct *rcu_par_gp_wq;
4686 
4687 static void __init kfree_rcu_batch_init(void)
4688 {
4689 	int cpu;
4690 	int i;
4691 
4692 	for_each_possible_cpu(cpu) {
4693 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4694 
4695 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4696 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4697 			krcp->krw_arr[i].krcp = krcp;
4698 		}
4699 
4700 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4701 		INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
4702 		krcp->initialized = true;
4703 	}
4704 	if (register_shrinker(&kfree_rcu_shrinker))
4705 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4706 }
4707 
4708 void __init rcu_init(void)
4709 {
4710 	int cpu;
4711 
4712 	rcu_early_boot_tests();
4713 
4714 	kfree_rcu_batch_init();
4715 	rcu_bootup_announce();
4716 	rcu_init_geometry();
4717 	rcu_init_one();
4718 	if (dump_tree)
4719 		rcu_dump_rcu_node_tree();
4720 	if (use_softirq)
4721 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4722 
4723 	/*
4724 	 * We don't need protection against CPU-hotplug here because
4725 	 * this is called early in boot, before either interrupts
4726 	 * or the scheduler are operational.
4727 	 */
4728 	pm_notifier(rcu_pm_notify, 0);
4729 	for_each_online_cpu(cpu) {
4730 		rcutree_prepare_cpu(cpu);
4731 		rcu_cpu_starting(cpu);
4732 		rcutree_online_cpu(cpu);
4733 	}
4734 
4735 	/* Create workqueue for expedited GPs and for Tree SRCU. */
4736 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4737 	WARN_ON(!rcu_gp_wq);
4738 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4739 	WARN_ON(!rcu_par_gp_wq);
4740 	srcu_init();
4741 
4742 	/* Fill in default value for rcutree.qovld boot parameter. */
4743 	/* -After- the rcu_node ->lock fields are initialized! */
4744 	if (qovld < 0)
4745 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4746 	else
4747 		qovld_calc = qovld;
4748 }
4749 
4750 #include "tree_stall.h"
4751 #include "tree_exp.h"
4752 #include "tree_plugin.h"
4753