xref: /openbmc/linux/kernel/rcu/tree.c (revision 1f012283)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/panic.h>
36 #include <linux/panic_notifier.h>
37 #include <linux/percpu.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <linux/mutex.h>
41 #include <linux/time.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/wait.h>
44 #include <linux/kthread.h>
45 #include <uapi/linux/sched/types.h>
46 #include <linux/prefetch.h>
47 #include <linux/delay.h>
48 #include <linux/random.h>
49 #include <linux/trace_events.h>
50 #include <linux/suspend.h>
51 #include <linux/ftrace.h>
52 #include <linux/tick.h>
53 #include <linux/sysrq.h>
54 #include <linux/kprobes.h>
55 #include <linux/gfp.h>
56 #include <linux/oom.h>
57 #include <linux/smpboot.h>
58 #include <linux/jiffies.h>
59 #include <linux/slab.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/sched/clock.h>
62 #include <linux/vmalloc.h>
63 #include <linux/mm.h>
64 #include <linux/kasan.h>
65 #include "../time/tick-internal.h"
66 
67 #include "tree.h"
68 #include "rcu.h"
69 
70 #ifdef MODULE_PARAM_PREFIX
71 #undef MODULE_PARAM_PREFIX
72 #endif
73 #define MODULE_PARAM_PREFIX "rcutree."
74 
75 /* Data structures. */
76 
77 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
78 	.dynticks_nesting = 1,
79 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
80 	.dynticks = ATOMIC_INIT(1),
81 #ifdef CONFIG_RCU_NOCB_CPU
82 	.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
83 #endif
84 };
85 static struct rcu_state rcu_state = {
86 	.level = { &rcu_state.node[0] },
87 	.gp_state = RCU_GP_IDLE,
88 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
89 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
90 	.name = RCU_NAME,
91 	.abbr = RCU_ABBR,
92 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
93 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
94 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
95 };
96 
97 /* Dump rcu_node combining tree at boot to verify correct setup. */
98 static bool dump_tree;
99 module_param(dump_tree, bool, 0444);
100 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
101 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
102 #ifndef CONFIG_PREEMPT_RT
103 module_param(use_softirq, bool, 0444);
104 #endif
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 
116 /*
117  * The rcu_scheduler_active variable is initialized to the value
118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
120  * RCU can assume that there is but one task, allowing RCU to (for example)
121  * optimize synchronize_rcu() to a simple barrier().  When this variable
122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123  * to detect real grace periods.  This variable is also used to suppress
124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126  * is fully initialized, including all of its kthreads having been spawned.
127  */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 
131 /*
132  * The rcu_scheduler_fully_active variable transitions from zero to one
133  * during the early_initcall() processing, which is after the scheduler
134  * is capable of creating new tasks.  So RCU processing (for example,
135  * creating tasks for RCU priority boosting) must be delayed until after
136  * rcu_scheduler_fully_active transitions from zero to one.  We also
137  * currently delay invocation of any RCU callbacks until after this point.
138  *
139  * It might later prove better for people registering RCU callbacks during
140  * early boot to take responsibility for these callbacks, but one step at
141  * a time.
142  */
143 static int rcu_scheduler_fully_active __read_mostly;
144 
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 			      unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
155 
156 /* rcuc/rcub kthread realtime priority */
157 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
158 module_param(kthread_prio, int, 0444);
159 
160 /* Delay in jiffies for grace-period initialization delays, debug only. */
161 
162 static int gp_preinit_delay;
163 module_param(gp_preinit_delay, int, 0444);
164 static int gp_init_delay;
165 module_param(gp_init_delay, int, 0444);
166 static int gp_cleanup_delay;
167 module_param(gp_cleanup_delay, int, 0444);
168 
169 // Add delay to rcu_read_unlock() for strict grace periods.
170 static int rcu_unlock_delay;
171 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
172 module_param(rcu_unlock_delay, int, 0444);
173 #endif
174 
175 /*
176  * This rcu parameter is runtime-read-only. It reflects
177  * a minimum allowed number of objects which can be cached
178  * per-CPU. Object size is equal to one page. This value
179  * can be changed at boot time.
180  */
181 static int rcu_min_cached_objs = 5;
182 module_param(rcu_min_cached_objs, int, 0444);
183 
184 // A page shrinker can ask for pages to be freed to make them
185 // available for other parts of the system. This usually happens
186 // under low memory conditions, and in that case we should also
187 // defer page-cache filling for a short time period.
188 //
189 // The default value is 5 seconds, which is long enough to reduce
190 // interference with the shrinker while it asks other systems to
191 // drain their caches.
192 static int rcu_delay_page_cache_fill_msec = 5000;
193 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
194 
195 /* Retrieve RCU kthreads priority for rcutorture */
196 int rcu_get_gp_kthreads_prio(void)
197 {
198 	return kthread_prio;
199 }
200 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
201 
202 /*
203  * Number of grace periods between delays, normalized by the duration of
204  * the delay.  The longer the delay, the more the grace periods between
205  * each delay.  The reason for this normalization is that it means that,
206  * for non-zero delays, the overall slowdown of grace periods is constant
207  * regardless of the duration of the delay.  This arrangement balances
208  * the need for long delays to increase some race probabilities with the
209  * need for fast grace periods to increase other race probabilities.
210  */
211 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
212 
213 /*
214  * Compute the mask of online CPUs for the specified rcu_node structure.
215  * This will not be stable unless the rcu_node structure's ->lock is
216  * held, but the bit corresponding to the current CPU will be stable
217  * in most contexts.
218  */
219 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
220 {
221 	return READ_ONCE(rnp->qsmaskinitnext);
222 }
223 
224 /*
225  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
226  * permit this function to be invoked without holding the root rcu_node
227  * structure's ->lock, but of course results can be subject to change.
228  */
229 static int rcu_gp_in_progress(void)
230 {
231 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
232 }
233 
234 /*
235  * Return the number of callbacks queued on the specified CPU.
236  * Handles both the nocbs and normal cases.
237  */
238 static long rcu_get_n_cbs_cpu(int cpu)
239 {
240 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
241 
242 	if (rcu_segcblist_is_enabled(&rdp->cblist))
243 		return rcu_segcblist_n_cbs(&rdp->cblist);
244 	return 0;
245 }
246 
247 void rcu_softirq_qs(void)
248 {
249 	rcu_qs();
250 	rcu_preempt_deferred_qs(current);
251 	rcu_tasks_qs(current, false);
252 }
253 
254 /*
255  * Increment the current CPU's rcu_data structure's ->dynticks field
256  * with ordering.  Return the new value.
257  */
258 static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
259 {
260 	return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
261 }
262 
263 /*
264  * Record entry into an extended quiescent state.  This is only to be
265  * called when not already in an extended quiescent state, that is,
266  * RCU is watching prior to the call to this function and is no longer
267  * watching upon return.
268  */
269 static noinstr void rcu_dynticks_eqs_enter(void)
270 {
271 	int seq;
272 
273 	/*
274 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
275 	 * critical sections, and we also must force ordering with the
276 	 * next idle sojourn.
277 	 */
278 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
279 	seq = rcu_dynticks_inc(1);
280 	// RCU is no longer watching.  Better be in extended quiescent state!
281 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
282 }
283 
284 /*
285  * Record exit from an extended quiescent state.  This is only to be
286  * called from an extended quiescent state, that is, RCU is not watching
287  * prior to the call to this function and is watching upon return.
288  */
289 static noinstr void rcu_dynticks_eqs_exit(void)
290 {
291 	int seq;
292 
293 	/*
294 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
295 	 * and we also must force ordering with the next RCU read-side
296 	 * critical section.
297 	 */
298 	seq = rcu_dynticks_inc(1);
299 	// RCU is now watching.  Better not be in an extended quiescent state!
300 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
301 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
302 }
303 
304 /*
305  * Reset the current CPU's ->dynticks counter to indicate that the
306  * newly onlined CPU is no longer in an extended quiescent state.
307  * This will either leave the counter unchanged, or increment it
308  * to the next non-quiescent value.
309  *
310  * The non-atomic test/increment sequence works because the upper bits
311  * of the ->dynticks counter are manipulated only by the corresponding CPU,
312  * or when the corresponding CPU is offline.
313  */
314 static void rcu_dynticks_eqs_online(void)
315 {
316 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
317 
318 	if (atomic_read(&rdp->dynticks) & 0x1)
319 		return;
320 	rcu_dynticks_inc(1);
321 }
322 
323 /*
324  * Is the current CPU in an extended quiescent state?
325  *
326  * No ordering, as we are sampling CPU-local information.
327  */
328 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
329 {
330 	return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
331 }
332 
333 /*
334  * Snapshot the ->dynticks counter with full ordering so as to allow
335  * stable comparison of this counter with past and future snapshots.
336  */
337 static int rcu_dynticks_snap(struct rcu_data *rdp)
338 {
339 	smp_mb();  // Fundamental RCU ordering guarantee.
340 	return atomic_read_acquire(&rdp->dynticks);
341 }
342 
343 /*
344  * Return true if the snapshot returned from rcu_dynticks_snap()
345  * indicates that RCU is in an extended quiescent state.
346  */
347 static bool rcu_dynticks_in_eqs(int snap)
348 {
349 	return !(snap & 0x1);
350 }
351 
352 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
353 bool rcu_is_idle_cpu(int cpu)
354 {
355 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
356 
357 	return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
358 }
359 
360 /*
361  * Return true if the CPU corresponding to the specified rcu_data
362  * structure has spent some time in an extended quiescent state since
363  * rcu_dynticks_snap() returned the specified snapshot.
364  */
365 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
366 {
367 	return snap != rcu_dynticks_snap(rdp);
368 }
369 
370 /*
371  * Return true if the referenced integer is zero while the specified
372  * CPU remains within a single extended quiescent state.
373  */
374 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
375 {
376 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
377 	int snap;
378 
379 	// If not quiescent, force back to earlier extended quiescent state.
380 	snap = atomic_read(&rdp->dynticks) & ~0x1;
381 
382 	smp_rmb(); // Order ->dynticks and *vp reads.
383 	if (READ_ONCE(*vp))
384 		return false;  // Non-zero, so report failure;
385 	smp_rmb(); // Order *vp read and ->dynticks re-read.
386 
387 	// If still in the same extended quiescent state, we are good!
388 	return snap == atomic_read(&rdp->dynticks);
389 }
390 
391 /*
392  * Let the RCU core know that this CPU has gone through the scheduler,
393  * which is a quiescent state.  This is called when the need for a
394  * quiescent state is urgent, so we burn an atomic operation and full
395  * memory barriers to let the RCU core know about it, regardless of what
396  * this CPU might (or might not) do in the near future.
397  *
398  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
399  *
400  * The caller must have disabled interrupts and must not be idle.
401  */
402 notrace void rcu_momentary_dyntick_idle(void)
403 {
404 	int seq;
405 
406 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
407 	seq = rcu_dynticks_inc(2);
408 	/* It is illegal to call this from idle state. */
409 	WARN_ON_ONCE(!(seq & 0x1));
410 	rcu_preempt_deferred_qs(current);
411 }
412 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
413 
414 /**
415  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
416  *
417  * If the current CPU is idle and running at a first-level (not nested)
418  * interrupt, or directly, from idle, return true.
419  *
420  * The caller must have at least disabled IRQs.
421  */
422 static int rcu_is_cpu_rrupt_from_idle(void)
423 {
424 	long nesting;
425 
426 	/*
427 	 * Usually called from the tick; but also used from smp_function_call()
428 	 * for expedited grace periods. This latter can result in running from
429 	 * the idle task, instead of an actual IPI.
430 	 */
431 	lockdep_assert_irqs_disabled();
432 
433 	/* Check for counter underflows */
434 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
435 			 "RCU dynticks_nesting counter underflow!");
436 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
437 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
438 
439 	/* Are we at first interrupt nesting level? */
440 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
441 	if (nesting > 1)
442 		return false;
443 
444 	/*
445 	 * If we're not in an interrupt, we must be in the idle task!
446 	 */
447 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
448 
449 	/* Does CPU appear to be idle from an RCU standpoint? */
450 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
451 }
452 
453 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
454 				// Maximum callbacks per rcu_do_batch ...
455 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
456 static long blimit = DEFAULT_RCU_BLIMIT;
457 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
458 static long qhimark = DEFAULT_RCU_QHIMARK;
459 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
460 static long qlowmark = DEFAULT_RCU_QLOMARK;
461 #define DEFAULT_RCU_QOVLD_MULT 2
462 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
463 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
464 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
465 
466 module_param(blimit, long, 0444);
467 module_param(qhimark, long, 0444);
468 module_param(qlowmark, long, 0444);
469 module_param(qovld, long, 0444);
470 
471 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
472 static ulong jiffies_till_next_fqs = ULONG_MAX;
473 static bool rcu_kick_kthreads;
474 static int rcu_divisor = 7;
475 module_param(rcu_divisor, int, 0644);
476 
477 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
478 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
479 module_param(rcu_resched_ns, long, 0644);
480 
481 /*
482  * How long the grace period must be before we start recruiting
483  * quiescent-state help from rcu_note_context_switch().
484  */
485 static ulong jiffies_till_sched_qs = ULONG_MAX;
486 module_param(jiffies_till_sched_qs, ulong, 0444);
487 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
488 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
489 
490 /*
491  * Make sure that we give the grace-period kthread time to detect any
492  * idle CPUs before taking active measures to force quiescent states.
493  * However, don't go below 100 milliseconds, adjusted upwards for really
494  * large systems.
495  */
496 static void adjust_jiffies_till_sched_qs(void)
497 {
498 	unsigned long j;
499 
500 	/* If jiffies_till_sched_qs was specified, respect the request. */
501 	if (jiffies_till_sched_qs != ULONG_MAX) {
502 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
503 		return;
504 	}
505 	/* Otherwise, set to third fqs scan, but bound below on large system. */
506 	j = READ_ONCE(jiffies_till_first_fqs) +
507 		      2 * READ_ONCE(jiffies_till_next_fqs);
508 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
509 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
510 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
511 	WRITE_ONCE(jiffies_to_sched_qs, j);
512 }
513 
514 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
515 {
516 	ulong j;
517 	int ret = kstrtoul(val, 0, &j);
518 
519 	if (!ret) {
520 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
521 		adjust_jiffies_till_sched_qs();
522 	}
523 	return ret;
524 }
525 
526 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
527 {
528 	ulong j;
529 	int ret = kstrtoul(val, 0, &j);
530 
531 	if (!ret) {
532 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
533 		adjust_jiffies_till_sched_qs();
534 	}
535 	return ret;
536 }
537 
538 static const struct kernel_param_ops first_fqs_jiffies_ops = {
539 	.set = param_set_first_fqs_jiffies,
540 	.get = param_get_ulong,
541 };
542 
543 static const struct kernel_param_ops next_fqs_jiffies_ops = {
544 	.set = param_set_next_fqs_jiffies,
545 	.get = param_get_ulong,
546 };
547 
548 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
549 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
550 module_param(rcu_kick_kthreads, bool, 0644);
551 
552 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
553 static int rcu_pending(int user);
554 
555 /*
556  * Return the number of RCU GPs completed thus far for debug & stats.
557  */
558 unsigned long rcu_get_gp_seq(void)
559 {
560 	return READ_ONCE(rcu_state.gp_seq);
561 }
562 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
563 
564 /*
565  * Return the number of RCU expedited batches completed thus far for
566  * debug & stats.  Odd numbers mean that a batch is in progress, even
567  * numbers mean idle.  The value returned will thus be roughly double
568  * the cumulative batches since boot.
569  */
570 unsigned long rcu_exp_batches_completed(void)
571 {
572 	return rcu_state.expedited_sequence;
573 }
574 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
575 
576 /*
577  * Return the root node of the rcu_state structure.
578  */
579 static struct rcu_node *rcu_get_root(void)
580 {
581 	return &rcu_state.node[0];
582 }
583 
584 /*
585  * Send along grace-period-related data for rcutorture diagnostics.
586  */
587 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
588 			    unsigned long *gp_seq)
589 {
590 	switch (test_type) {
591 	case RCU_FLAVOR:
592 		*flags = READ_ONCE(rcu_state.gp_flags);
593 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
594 		break;
595 	default:
596 		break;
597 	}
598 }
599 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
600 
601 /*
602  * Enter an RCU extended quiescent state, which can be either the
603  * idle loop or adaptive-tickless usermode execution.
604  *
605  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
606  * the possibility of usermode upcalls having messed up our count
607  * of interrupt nesting level during the prior busy period.
608  */
609 static noinstr void rcu_eqs_enter(bool user)
610 {
611 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
612 
613 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
614 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
615 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
616 		     rdp->dynticks_nesting == 0);
617 	if (rdp->dynticks_nesting != 1) {
618 		// RCU will still be watching, so just do accounting and leave.
619 		rdp->dynticks_nesting--;
620 		return;
621 	}
622 
623 	lockdep_assert_irqs_disabled();
624 	instrumentation_begin();
625 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
626 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
627 	rcu_prepare_for_idle();
628 	rcu_preempt_deferred_qs(current);
629 
630 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
631 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
632 
633 	instrumentation_end();
634 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
635 	// RCU is watching here ...
636 	rcu_dynticks_eqs_enter();
637 	// ... but is no longer watching here.
638 	rcu_dynticks_task_enter();
639 }
640 
641 /**
642  * rcu_idle_enter - inform RCU that current CPU is entering idle
643  *
644  * Enter idle mode, in other words, -leave- the mode in which RCU
645  * read-side critical sections can occur.  (Though RCU read-side
646  * critical sections can occur in irq handlers in idle, a possibility
647  * handled by irq_enter() and irq_exit().)
648  *
649  * If you add or remove a call to rcu_idle_enter(), be sure to test with
650  * CONFIG_RCU_EQS_DEBUG=y.
651  */
652 void rcu_idle_enter(void)
653 {
654 	lockdep_assert_irqs_disabled();
655 	rcu_eqs_enter(false);
656 }
657 EXPORT_SYMBOL_GPL(rcu_idle_enter);
658 
659 #ifdef CONFIG_NO_HZ_FULL
660 
661 #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
662 /*
663  * An empty function that will trigger a reschedule on
664  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
665  */
666 static void late_wakeup_func(struct irq_work *work)
667 {
668 }
669 
670 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
671 	IRQ_WORK_INIT(late_wakeup_func);
672 
673 /*
674  * If either:
675  *
676  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
677  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
678  *
679  * In these cases the late RCU wake ups aren't supported in the resched loops and our
680  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
681  * get re-enabled again.
682  */
683 noinstr static void rcu_irq_work_resched(void)
684 {
685 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
686 
687 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
688 		return;
689 
690 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
691 		return;
692 
693 	instrumentation_begin();
694 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
695 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
696 	}
697 	instrumentation_end();
698 }
699 
700 #else
701 static inline void rcu_irq_work_resched(void) { }
702 #endif
703 
704 /**
705  * rcu_user_enter - inform RCU that we are resuming userspace.
706  *
707  * Enter RCU idle mode right before resuming userspace.  No use of RCU
708  * is permitted between this call and rcu_user_exit(). This way the
709  * CPU doesn't need to maintain the tick for RCU maintenance purposes
710  * when the CPU runs in userspace.
711  *
712  * If you add or remove a call to rcu_user_enter(), be sure to test with
713  * CONFIG_RCU_EQS_DEBUG=y.
714  */
715 noinstr void rcu_user_enter(void)
716 {
717 	lockdep_assert_irqs_disabled();
718 
719 	/*
720 	 * Other than generic entry implementation, we may be past the last
721 	 * rescheduling opportunity in the entry code. Trigger a self IPI
722 	 * that will fire and reschedule once we resume in user/guest mode.
723 	 */
724 	rcu_irq_work_resched();
725 	rcu_eqs_enter(true);
726 }
727 
728 #endif /* CONFIG_NO_HZ_FULL */
729 
730 /**
731  * rcu_nmi_exit - inform RCU of exit from NMI context
732  *
733  * If we are returning from the outermost NMI handler that interrupted an
734  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
735  * to let the RCU grace-period handling know that the CPU is back to
736  * being RCU-idle.
737  *
738  * If you add or remove a call to rcu_nmi_exit(), be sure to test
739  * with CONFIG_RCU_EQS_DEBUG=y.
740  */
741 noinstr void rcu_nmi_exit(void)
742 {
743 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
744 
745 	instrumentation_begin();
746 	/*
747 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
748 	 * (We are exiting an NMI handler, so RCU better be paying attention
749 	 * to us!)
750 	 */
751 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
752 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
753 
754 	/*
755 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
756 	 * leave it in non-RCU-idle state.
757 	 */
758 	if (rdp->dynticks_nmi_nesting != 1) {
759 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
760 				  atomic_read(&rdp->dynticks));
761 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
762 			   rdp->dynticks_nmi_nesting - 2);
763 		instrumentation_end();
764 		return;
765 	}
766 
767 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
768 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
769 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
770 
771 	if (!in_nmi())
772 		rcu_prepare_for_idle();
773 
774 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
775 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
776 	instrumentation_end();
777 
778 	// RCU is watching here ...
779 	rcu_dynticks_eqs_enter();
780 	// ... but is no longer watching here.
781 
782 	if (!in_nmi())
783 		rcu_dynticks_task_enter();
784 }
785 
786 /**
787  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
788  *
789  * Exit from an interrupt handler, which might possibly result in entering
790  * idle mode, in other words, leaving the mode in which read-side critical
791  * sections can occur.  The caller must have disabled interrupts.
792  *
793  * This code assumes that the idle loop never does anything that might
794  * result in unbalanced calls to irq_enter() and irq_exit().  If your
795  * architecture's idle loop violates this assumption, RCU will give you what
796  * you deserve, good and hard.  But very infrequently and irreproducibly.
797  *
798  * Use things like work queues to work around this limitation.
799  *
800  * You have been warned.
801  *
802  * If you add or remove a call to rcu_irq_exit(), be sure to test with
803  * CONFIG_RCU_EQS_DEBUG=y.
804  */
805 void noinstr rcu_irq_exit(void)
806 {
807 	lockdep_assert_irqs_disabled();
808 	rcu_nmi_exit();
809 }
810 
811 #ifdef CONFIG_PROVE_RCU
812 /**
813  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
814  */
815 void rcu_irq_exit_check_preempt(void)
816 {
817 	lockdep_assert_irqs_disabled();
818 
819 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
820 			 "RCU dynticks_nesting counter underflow/zero!");
821 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
822 			 DYNTICK_IRQ_NONIDLE,
823 			 "Bad RCU  dynticks_nmi_nesting counter\n");
824 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
825 			 "RCU in extended quiescent state!");
826 }
827 #endif /* #ifdef CONFIG_PROVE_RCU */
828 
829 /*
830  * Wrapper for rcu_irq_exit() where interrupts are enabled.
831  *
832  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
833  * with CONFIG_RCU_EQS_DEBUG=y.
834  */
835 void rcu_irq_exit_irqson(void)
836 {
837 	unsigned long flags;
838 
839 	local_irq_save(flags);
840 	rcu_irq_exit();
841 	local_irq_restore(flags);
842 }
843 
844 /*
845  * Exit an RCU extended quiescent state, which can be either the
846  * idle loop or adaptive-tickless usermode execution.
847  *
848  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
849  * allow for the possibility of usermode upcalls messing up our count of
850  * interrupt nesting level during the busy period that is just now starting.
851  */
852 static void noinstr rcu_eqs_exit(bool user)
853 {
854 	struct rcu_data *rdp;
855 	long oldval;
856 
857 	lockdep_assert_irqs_disabled();
858 	rdp = this_cpu_ptr(&rcu_data);
859 	oldval = rdp->dynticks_nesting;
860 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
861 	if (oldval) {
862 		// RCU was already watching, so just do accounting and leave.
863 		rdp->dynticks_nesting++;
864 		return;
865 	}
866 	rcu_dynticks_task_exit();
867 	// RCU is not watching here ...
868 	rcu_dynticks_eqs_exit();
869 	// ... but is watching here.
870 	instrumentation_begin();
871 
872 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
873 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
874 
875 	rcu_cleanup_after_idle();
876 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
877 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
878 	WRITE_ONCE(rdp->dynticks_nesting, 1);
879 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
880 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
881 	instrumentation_end();
882 }
883 
884 /**
885  * rcu_idle_exit - inform RCU that current CPU is leaving idle
886  *
887  * Exit idle mode, in other words, -enter- the mode in which RCU
888  * read-side critical sections can occur.
889  *
890  * If you add or remove a call to rcu_idle_exit(), be sure to test with
891  * CONFIG_RCU_EQS_DEBUG=y.
892  */
893 void rcu_idle_exit(void)
894 {
895 	unsigned long flags;
896 
897 	local_irq_save(flags);
898 	rcu_eqs_exit(false);
899 	local_irq_restore(flags);
900 }
901 EXPORT_SYMBOL_GPL(rcu_idle_exit);
902 
903 #ifdef CONFIG_NO_HZ_FULL
904 /**
905  * rcu_user_exit - inform RCU that we are exiting userspace.
906  *
907  * Exit RCU idle mode while entering the kernel because it can
908  * run a RCU read side critical section anytime.
909  *
910  * If you add or remove a call to rcu_user_exit(), be sure to test with
911  * CONFIG_RCU_EQS_DEBUG=y.
912  */
913 void noinstr rcu_user_exit(void)
914 {
915 	rcu_eqs_exit(true);
916 }
917 
918 /**
919  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
920  *
921  * The scheduler tick is not normally enabled when CPUs enter the kernel
922  * from nohz_full userspace execution.  After all, nohz_full userspace
923  * execution is an RCU quiescent state and the time executing in the kernel
924  * is quite short.  Except of course when it isn't.  And it is not hard to
925  * cause a large system to spend tens of seconds or even minutes looping
926  * in the kernel, which can cause a number of problems, include RCU CPU
927  * stall warnings.
928  *
929  * Therefore, if a nohz_full CPU fails to report a quiescent state
930  * in a timely manner, the RCU grace-period kthread sets that CPU's
931  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
932  * exception will invoke this function, which will turn on the scheduler
933  * tick, which will enable RCU to detect that CPU's quiescent states,
934  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
935  * The tick will be disabled once a quiescent state is reported for
936  * this CPU.
937  *
938  * Of course, in carefully tuned systems, there might never be an
939  * interrupt or exception.  In that case, the RCU grace-period kthread
940  * will eventually cause one to happen.  However, in less carefully
941  * controlled environments, this function allows RCU to get what it
942  * needs without creating otherwise useless interruptions.
943  */
944 void __rcu_irq_enter_check_tick(void)
945 {
946 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
947 
948 	// If we're here from NMI there's nothing to do.
949 	if (in_nmi())
950 		return;
951 
952 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
953 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
954 
955 	if (!tick_nohz_full_cpu(rdp->cpu) ||
956 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
957 	    READ_ONCE(rdp->rcu_forced_tick)) {
958 		// RCU doesn't need nohz_full help from this CPU, or it is
959 		// already getting that help.
960 		return;
961 	}
962 
963 	// We get here only when not in an extended quiescent state and
964 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
965 	// already watching and (2) The fact that we are in an interrupt
966 	// handler and that the rcu_node lock is an irq-disabled lock
967 	// prevents self-deadlock.  So we can safely recheck under the lock.
968 	// Note that the nohz_full state currently cannot change.
969 	raw_spin_lock_rcu_node(rdp->mynode);
970 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
971 		// A nohz_full CPU is in the kernel and RCU needs a
972 		// quiescent state.  Turn on the tick!
973 		WRITE_ONCE(rdp->rcu_forced_tick, true);
974 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
975 	}
976 	raw_spin_unlock_rcu_node(rdp->mynode);
977 }
978 #endif /* CONFIG_NO_HZ_FULL */
979 
980 /**
981  * rcu_nmi_enter - inform RCU of entry to NMI context
982  *
983  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
984  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
985  * that the CPU is active.  This implementation permits nested NMIs, as
986  * long as the nesting level does not overflow an int.  (You will probably
987  * run out of stack space first.)
988  *
989  * If you add or remove a call to rcu_nmi_enter(), be sure to test
990  * with CONFIG_RCU_EQS_DEBUG=y.
991  */
992 noinstr void rcu_nmi_enter(void)
993 {
994 	long incby = 2;
995 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
996 
997 	/* Complain about underflow. */
998 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
999 
1000 	/*
1001 	 * If idle from RCU viewpoint, atomically increment ->dynticks
1002 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
1003 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
1004 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
1005 	 * to be in the outermost NMI handler that interrupted an RCU-idle
1006 	 * period (observation due to Andy Lutomirski).
1007 	 */
1008 	if (rcu_dynticks_curr_cpu_in_eqs()) {
1009 
1010 		if (!in_nmi())
1011 			rcu_dynticks_task_exit();
1012 
1013 		// RCU is not watching here ...
1014 		rcu_dynticks_eqs_exit();
1015 		// ... but is watching here.
1016 
1017 		if (!in_nmi()) {
1018 			instrumentation_begin();
1019 			rcu_cleanup_after_idle();
1020 			instrumentation_end();
1021 		}
1022 
1023 		instrumentation_begin();
1024 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1025 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1026 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1027 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1028 
1029 		incby = 1;
1030 	} else if (!in_nmi()) {
1031 		instrumentation_begin();
1032 		rcu_irq_enter_check_tick();
1033 	} else  {
1034 		instrumentation_begin();
1035 	}
1036 
1037 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1038 			  rdp->dynticks_nmi_nesting,
1039 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1040 	instrumentation_end();
1041 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1042 		   rdp->dynticks_nmi_nesting + incby);
1043 	barrier();
1044 }
1045 
1046 /**
1047  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1048  *
1049  * Enter an interrupt handler, which might possibly result in exiting
1050  * idle mode, in other words, entering the mode in which read-side critical
1051  * sections can occur.  The caller must have disabled interrupts.
1052  *
1053  * Note that the Linux kernel is fully capable of entering an interrupt
1054  * handler that it never exits, for example when doing upcalls to user mode!
1055  * This code assumes that the idle loop never does upcalls to user mode.
1056  * If your architecture's idle loop does do upcalls to user mode (or does
1057  * anything else that results in unbalanced calls to the irq_enter() and
1058  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1059  * But very infrequently and irreproducibly.
1060  *
1061  * Use things like work queues to work around this limitation.
1062  *
1063  * You have been warned.
1064  *
1065  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1066  * CONFIG_RCU_EQS_DEBUG=y.
1067  */
1068 noinstr void rcu_irq_enter(void)
1069 {
1070 	lockdep_assert_irqs_disabled();
1071 	rcu_nmi_enter();
1072 }
1073 
1074 /*
1075  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1076  *
1077  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1078  * with CONFIG_RCU_EQS_DEBUG=y.
1079  */
1080 void rcu_irq_enter_irqson(void)
1081 {
1082 	unsigned long flags;
1083 
1084 	local_irq_save(flags);
1085 	rcu_irq_enter();
1086 	local_irq_restore(flags);
1087 }
1088 
1089 /*
1090  * If any sort of urgency was applied to the current CPU (for example,
1091  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1092  * to get to a quiescent state, disable it.
1093  */
1094 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1095 {
1096 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1097 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1098 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1099 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1100 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1101 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1102 	}
1103 }
1104 
1105 /**
1106  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1107  *
1108  * Return true if RCU is watching the running CPU, which means that this
1109  * CPU can safely enter RCU read-side critical sections.  In other words,
1110  * if the current CPU is not in its idle loop or is in an interrupt or
1111  * NMI handler, return true.
1112  *
1113  * Make notrace because it can be called by the internal functions of
1114  * ftrace, and making this notrace removes unnecessary recursion calls.
1115  */
1116 notrace bool rcu_is_watching(void)
1117 {
1118 	bool ret;
1119 
1120 	preempt_disable_notrace();
1121 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1122 	preempt_enable_notrace();
1123 	return ret;
1124 }
1125 EXPORT_SYMBOL_GPL(rcu_is_watching);
1126 
1127 /*
1128  * If a holdout task is actually running, request an urgent quiescent
1129  * state from its CPU.  This is unsynchronized, so migrations can cause
1130  * the request to go to the wrong CPU.  Which is OK, all that will happen
1131  * is that the CPU's next context switch will be a bit slower and next
1132  * time around this task will generate another request.
1133  */
1134 void rcu_request_urgent_qs_task(struct task_struct *t)
1135 {
1136 	int cpu;
1137 
1138 	barrier();
1139 	cpu = task_cpu(t);
1140 	if (!task_curr(t))
1141 		return; /* This task is not running on that CPU. */
1142 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1143 }
1144 
1145 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1146 
1147 /*
1148  * Is the current CPU online as far as RCU is concerned?
1149  *
1150  * Disable preemption to avoid false positives that could otherwise
1151  * happen due to the current CPU number being sampled, this task being
1152  * preempted, its old CPU being taken offline, resuming on some other CPU,
1153  * then determining that its old CPU is now offline.
1154  *
1155  * Disable checking if in an NMI handler because we cannot safely
1156  * report errors from NMI handlers anyway.  In addition, it is OK to use
1157  * RCU on an offline processor during initial boot, hence the check for
1158  * rcu_scheduler_fully_active.
1159  */
1160 bool rcu_lockdep_current_cpu_online(void)
1161 {
1162 	struct rcu_data *rdp;
1163 	struct rcu_node *rnp;
1164 	bool ret = false;
1165 
1166 	if (in_nmi() || !rcu_scheduler_fully_active)
1167 		return true;
1168 	preempt_disable_notrace();
1169 	rdp = this_cpu_ptr(&rcu_data);
1170 	rnp = rdp->mynode;
1171 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1172 		ret = true;
1173 	preempt_enable_notrace();
1174 	return ret;
1175 }
1176 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1177 
1178 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1179 
1180 /*
1181  * When trying to report a quiescent state on behalf of some other CPU,
1182  * it is our responsibility to check for and handle potential overflow
1183  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1184  * After all, the CPU might be in deep idle state, and thus executing no
1185  * code whatsoever.
1186  */
1187 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1188 {
1189 	raw_lockdep_assert_held_rcu_node(rnp);
1190 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1191 			 rnp->gp_seq))
1192 		WRITE_ONCE(rdp->gpwrap, true);
1193 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1194 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1195 }
1196 
1197 /*
1198  * Snapshot the specified CPU's dynticks counter so that we can later
1199  * credit them with an implicit quiescent state.  Return 1 if this CPU
1200  * is in dynticks idle mode, which is an extended quiescent state.
1201  */
1202 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1203 {
1204 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1205 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1206 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1207 		rcu_gpnum_ovf(rdp->mynode, rdp);
1208 		return 1;
1209 	}
1210 	return 0;
1211 }
1212 
1213 /*
1214  * Return true if the specified CPU has passed through a quiescent
1215  * state by virtue of being in or having passed through an dynticks
1216  * idle state since the last call to dyntick_save_progress_counter()
1217  * for this same CPU, or by virtue of having been offline.
1218  */
1219 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1220 {
1221 	unsigned long jtsq;
1222 	struct rcu_node *rnp = rdp->mynode;
1223 
1224 	/*
1225 	 * If the CPU passed through or entered a dynticks idle phase with
1226 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1227 	 * already acknowledged the request to pass through a quiescent
1228 	 * state.  Either way, that CPU cannot possibly be in an RCU
1229 	 * read-side critical section that started before the beginning
1230 	 * of the current RCU grace period.
1231 	 */
1232 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1233 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1234 		rcu_gpnum_ovf(rnp, rdp);
1235 		return 1;
1236 	}
1237 
1238 	/*
1239 	 * Complain if a CPU that is considered to be offline from RCU's
1240 	 * perspective has not yet reported a quiescent state.  After all,
1241 	 * the offline CPU should have reported a quiescent state during
1242 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1243 	 * if it ran concurrently with either the CPU going offline or the
1244 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1245 	 * critical section while all CPUs corresponding to that structure
1246 	 * are offline.  This added warning detects bugs in any of these
1247 	 * code paths.
1248 	 *
1249 	 * The rcu_node structure's ->lock is held here, which excludes
1250 	 * the relevant portions the CPU-hotplug code, the grace-period
1251 	 * initialization code, and the rcu_read_unlock() code paths.
1252 	 *
1253 	 * For more detail, please refer to the "Hotplug CPU" section
1254 	 * of RCU's Requirements documentation.
1255 	 */
1256 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1257 		bool onl;
1258 		struct rcu_node *rnp1;
1259 
1260 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1261 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1262 			(long)rnp->gp_seq, (long)rnp->completedqs);
1263 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1264 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1265 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1266 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1267 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1268 			__func__, rdp->cpu, ".o"[onl],
1269 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1270 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1271 		return 1; /* Break things loose after complaining. */
1272 	}
1273 
1274 	/*
1275 	 * A CPU running for an extended time within the kernel can
1276 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1277 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1278 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1279 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1280 	 * variable are safe because the assignments are repeated if this
1281 	 * CPU failed to pass through a quiescent state.  This code
1282 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1283 	 * is set way high.
1284 	 */
1285 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1286 	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
1287 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1288 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1289 	     rcu_state.cbovld)) {
1290 		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
1291 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1292 		smp_store_release(&rdp->rcu_urgent_qs, true);
1293 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1294 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
1295 	}
1296 
1297 	/*
1298 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1299 	 * The above code handles this, but only for straight cond_resched().
1300 	 * And some in-kernel loops check need_resched() before calling
1301 	 * cond_resched(), which defeats the above code for CPUs that are
1302 	 * running in-kernel with scheduling-clock interrupts disabled.
1303 	 * So hit them over the head with the resched_cpu() hammer!
1304 	 */
1305 	if (tick_nohz_full_cpu(rdp->cpu) &&
1306 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1307 	     rcu_state.cbovld)) {
1308 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
1309 		resched_cpu(rdp->cpu);
1310 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1311 	}
1312 
1313 	/*
1314 	 * If more than halfway to RCU CPU stall-warning time, invoke
1315 	 * resched_cpu() more frequently to try to loosen things up a bit.
1316 	 * Also check to see if the CPU is getting hammered with interrupts,
1317 	 * but only once per grace period, just to keep the IPIs down to
1318 	 * a dull roar.
1319 	 */
1320 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1321 		if (time_after(jiffies,
1322 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1323 			resched_cpu(rdp->cpu);
1324 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1325 		}
1326 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1327 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1328 		    (rnp->ffmask & rdp->grpmask)) {
1329 			rdp->rcu_iw_pending = true;
1330 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1331 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1332 		}
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
1339 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1340 			      unsigned long gp_seq_req, const char *s)
1341 {
1342 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1343 				      gp_seq_req, rnp->level,
1344 				      rnp->grplo, rnp->grphi, s);
1345 }
1346 
1347 /*
1348  * rcu_start_this_gp - Request the start of a particular grace period
1349  * @rnp_start: The leaf node of the CPU from which to start.
1350  * @rdp: The rcu_data corresponding to the CPU from which to start.
1351  * @gp_seq_req: The gp_seq of the grace period to start.
1352  *
1353  * Start the specified grace period, as needed to handle newly arrived
1354  * callbacks.  The required future grace periods are recorded in each
1355  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1356  * is reason to awaken the grace-period kthread.
1357  *
1358  * The caller must hold the specified rcu_node structure's ->lock, which
1359  * is why the caller is responsible for waking the grace-period kthread.
1360  *
1361  * Returns true if the GP thread needs to be awakened else false.
1362  */
1363 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1364 			      unsigned long gp_seq_req)
1365 {
1366 	bool ret = false;
1367 	struct rcu_node *rnp;
1368 
1369 	/*
1370 	 * Use funnel locking to either acquire the root rcu_node
1371 	 * structure's lock or bail out if the need for this grace period
1372 	 * has already been recorded -- or if that grace period has in
1373 	 * fact already started.  If there is already a grace period in
1374 	 * progress in a non-leaf node, no recording is needed because the
1375 	 * end of the grace period will scan the leaf rcu_node structures.
1376 	 * Note that rnp_start->lock must not be released.
1377 	 */
1378 	raw_lockdep_assert_held_rcu_node(rnp_start);
1379 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1380 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1381 		if (rnp != rnp_start)
1382 			raw_spin_lock_rcu_node(rnp);
1383 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1384 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1385 		    (rnp != rnp_start &&
1386 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1387 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1388 					  TPS("Prestarted"));
1389 			goto unlock_out;
1390 		}
1391 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1392 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1393 			/*
1394 			 * We just marked the leaf or internal node, and a
1395 			 * grace period is in progress, which means that
1396 			 * rcu_gp_cleanup() will see the marking.  Bail to
1397 			 * reduce contention.
1398 			 */
1399 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1400 					  TPS("Startedleaf"));
1401 			goto unlock_out;
1402 		}
1403 		if (rnp != rnp_start && rnp->parent != NULL)
1404 			raw_spin_unlock_rcu_node(rnp);
1405 		if (!rnp->parent)
1406 			break;  /* At root, and perhaps also leaf. */
1407 	}
1408 
1409 	/* If GP already in progress, just leave, otherwise start one. */
1410 	if (rcu_gp_in_progress()) {
1411 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1412 		goto unlock_out;
1413 	}
1414 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1415 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1416 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1417 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1418 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1419 		goto unlock_out;
1420 	}
1421 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1422 	ret = true;  /* Caller must wake GP kthread. */
1423 unlock_out:
1424 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1425 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1426 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1427 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1428 	}
1429 	if (rnp != rnp_start)
1430 		raw_spin_unlock_rcu_node(rnp);
1431 	return ret;
1432 }
1433 
1434 /*
1435  * Clean up any old requests for the just-ended grace period.  Also return
1436  * whether any additional grace periods have been requested.
1437  */
1438 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1439 {
1440 	bool needmore;
1441 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1442 
1443 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1444 	if (!needmore)
1445 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1446 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1447 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1448 	return needmore;
1449 }
1450 
1451 /*
1452  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1453  * interrupt or softirq handler, in which case we just might immediately
1454  * sleep upon return, resulting in a grace-period hang), and don't bother
1455  * awakening when there is nothing for the grace-period kthread to do
1456  * (as in several CPUs raced to awaken, we lost), and finally don't try
1457  * to awaken a kthread that has not yet been created.  If all those checks
1458  * are passed, track some debug information and awaken.
1459  *
1460  * So why do the self-wakeup when in an interrupt or softirq handler
1461  * in the grace-period kthread's context?  Because the kthread might have
1462  * been interrupted just as it was going to sleep, and just after the final
1463  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1464  * is required, and is therefore supplied.
1465  */
1466 static void rcu_gp_kthread_wake(void)
1467 {
1468 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1469 
1470 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1471 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1472 		return;
1473 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1474 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1475 	swake_up_one(&rcu_state.gp_wq);
1476 }
1477 
1478 /*
1479  * If there is room, assign a ->gp_seq number to any callbacks on this
1480  * CPU that have not already been assigned.  Also accelerate any callbacks
1481  * that were previously assigned a ->gp_seq number that has since proven
1482  * to be too conservative, which can happen if callbacks get assigned a
1483  * ->gp_seq number while RCU is idle, but with reference to a non-root
1484  * rcu_node structure.  This function is idempotent, so it does not hurt
1485  * to call it repeatedly.  Returns an flag saying that we should awaken
1486  * the RCU grace-period kthread.
1487  *
1488  * The caller must hold rnp->lock with interrupts disabled.
1489  */
1490 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1491 {
1492 	unsigned long gp_seq_req;
1493 	bool ret = false;
1494 
1495 	rcu_lockdep_assert_cblist_protected(rdp);
1496 	raw_lockdep_assert_held_rcu_node(rnp);
1497 
1498 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1499 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1500 		return false;
1501 
1502 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1503 
1504 	/*
1505 	 * Callbacks are often registered with incomplete grace-period
1506 	 * information.  Something about the fact that getting exact
1507 	 * information requires acquiring a global lock...  RCU therefore
1508 	 * makes a conservative estimate of the grace period number at which
1509 	 * a given callback will become ready to invoke.	The following
1510 	 * code checks this estimate and improves it when possible, thus
1511 	 * accelerating callback invocation to an earlier grace-period
1512 	 * number.
1513 	 */
1514 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1515 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1516 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1517 
1518 	/* Trace depending on how much we were able to accelerate. */
1519 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1520 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1521 	else
1522 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1523 
1524 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1525 
1526 	return ret;
1527 }
1528 
1529 /*
1530  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1531  * rcu_node structure's ->lock be held.  It consults the cached value
1532  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1533  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1534  * while holding the leaf rcu_node structure's ->lock.
1535  */
1536 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1537 					struct rcu_data *rdp)
1538 {
1539 	unsigned long c;
1540 	bool needwake;
1541 
1542 	rcu_lockdep_assert_cblist_protected(rdp);
1543 	c = rcu_seq_snap(&rcu_state.gp_seq);
1544 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1545 		/* Old request still live, so mark recent callbacks. */
1546 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1547 		return;
1548 	}
1549 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1550 	needwake = rcu_accelerate_cbs(rnp, rdp);
1551 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1552 	if (needwake)
1553 		rcu_gp_kthread_wake();
1554 }
1555 
1556 /*
1557  * Move any callbacks whose grace period has completed to the
1558  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1559  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1560  * sublist.  This function is idempotent, so it does not hurt to
1561  * invoke it repeatedly.  As long as it is not invoked -too- often...
1562  * Returns true if the RCU grace-period kthread needs to be awakened.
1563  *
1564  * The caller must hold rnp->lock with interrupts disabled.
1565  */
1566 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1567 {
1568 	rcu_lockdep_assert_cblist_protected(rdp);
1569 	raw_lockdep_assert_held_rcu_node(rnp);
1570 
1571 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1572 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1573 		return false;
1574 
1575 	/*
1576 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1577 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1578 	 */
1579 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1580 
1581 	/* Classify any remaining callbacks. */
1582 	return rcu_accelerate_cbs(rnp, rdp);
1583 }
1584 
1585 /*
1586  * Move and classify callbacks, but only if doing so won't require
1587  * that the RCU grace-period kthread be awakened.
1588  */
1589 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1590 						  struct rcu_data *rdp)
1591 {
1592 	rcu_lockdep_assert_cblist_protected(rdp);
1593 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1594 	    !raw_spin_trylock_rcu_node(rnp))
1595 		return;
1596 	WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1597 	raw_spin_unlock_rcu_node(rnp);
1598 }
1599 
1600 /*
1601  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1602  * quiescent state.  This is intended to be invoked when the CPU notices
1603  * a new grace period.
1604  */
1605 static void rcu_strict_gp_check_qs(void)
1606 {
1607 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1608 		rcu_read_lock();
1609 		rcu_read_unlock();
1610 	}
1611 }
1612 
1613 /*
1614  * Update CPU-local rcu_data state to record the beginnings and ends of
1615  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1616  * structure corresponding to the current CPU, and must have irqs disabled.
1617  * Returns true if the grace-period kthread needs to be awakened.
1618  */
1619 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1620 {
1621 	bool ret = false;
1622 	bool need_qs;
1623 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1624 
1625 	raw_lockdep_assert_held_rcu_node(rnp);
1626 
1627 	if (rdp->gp_seq == rnp->gp_seq)
1628 		return false; /* Nothing to do. */
1629 
1630 	/* Handle the ends of any preceding grace periods first. */
1631 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1632 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1633 		if (!offloaded)
1634 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1635 		rdp->core_needs_qs = false;
1636 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1637 	} else {
1638 		if (!offloaded)
1639 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1640 		if (rdp->core_needs_qs)
1641 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1642 	}
1643 
1644 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1645 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1646 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1647 		/*
1648 		 * If the current grace period is waiting for this CPU,
1649 		 * set up to detect a quiescent state, otherwise don't
1650 		 * go looking for one.
1651 		 */
1652 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1653 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1654 		rdp->cpu_no_qs.b.norm = need_qs;
1655 		rdp->core_needs_qs = need_qs;
1656 		zero_cpu_stall_ticks(rdp);
1657 	}
1658 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1659 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1660 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1661 	WRITE_ONCE(rdp->gpwrap, false);
1662 	rcu_gpnum_ovf(rnp, rdp);
1663 	return ret;
1664 }
1665 
1666 static void note_gp_changes(struct rcu_data *rdp)
1667 {
1668 	unsigned long flags;
1669 	bool needwake;
1670 	struct rcu_node *rnp;
1671 
1672 	local_irq_save(flags);
1673 	rnp = rdp->mynode;
1674 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1675 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1676 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1677 		local_irq_restore(flags);
1678 		return;
1679 	}
1680 	needwake = __note_gp_changes(rnp, rdp);
1681 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1682 	rcu_strict_gp_check_qs();
1683 	if (needwake)
1684 		rcu_gp_kthread_wake();
1685 }
1686 
1687 static void rcu_gp_slow(int delay)
1688 {
1689 	if (delay > 0 &&
1690 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1691 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1692 		schedule_timeout_idle(delay);
1693 }
1694 
1695 static unsigned long sleep_duration;
1696 
1697 /* Allow rcutorture to stall the grace-period kthread. */
1698 void rcu_gp_set_torture_wait(int duration)
1699 {
1700 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1701 		WRITE_ONCE(sleep_duration, duration);
1702 }
1703 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1704 
1705 /* Actually implement the aforementioned wait. */
1706 static void rcu_gp_torture_wait(void)
1707 {
1708 	unsigned long duration;
1709 
1710 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1711 		return;
1712 	duration = xchg(&sleep_duration, 0UL);
1713 	if (duration > 0) {
1714 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1715 		schedule_timeout_idle(duration);
1716 		pr_alert("%s: Wait complete\n", __func__);
1717 	}
1718 }
1719 
1720 /*
1721  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1722  * processing.
1723  */
1724 static void rcu_strict_gp_boundary(void *unused)
1725 {
1726 	invoke_rcu_core();
1727 }
1728 
1729 /*
1730  * Initialize a new grace period.  Return false if no grace period required.
1731  */
1732 static noinline_for_stack bool rcu_gp_init(void)
1733 {
1734 	unsigned long firstseq;
1735 	unsigned long flags;
1736 	unsigned long oldmask;
1737 	unsigned long mask;
1738 	struct rcu_data *rdp;
1739 	struct rcu_node *rnp = rcu_get_root();
1740 
1741 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1742 	raw_spin_lock_irq_rcu_node(rnp);
1743 	if (!READ_ONCE(rcu_state.gp_flags)) {
1744 		/* Spurious wakeup, tell caller to go back to sleep.  */
1745 		raw_spin_unlock_irq_rcu_node(rnp);
1746 		return false;
1747 	}
1748 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1749 
1750 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1751 		/*
1752 		 * Grace period already in progress, don't start another.
1753 		 * Not supposed to be able to happen.
1754 		 */
1755 		raw_spin_unlock_irq_rcu_node(rnp);
1756 		return false;
1757 	}
1758 
1759 	/* Advance to a new grace period and initialize state. */
1760 	record_gp_stall_check_time();
1761 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1762 	rcu_seq_start(&rcu_state.gp_seq);
1763 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1764 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1765 	raw_spin_unlock_irq_rcu_node(rnp);
1766 
1767 	/*
1768 	 * Apply per-leaf buffered online and offline operations to
1769 	 * the rcu_node tree. Note that this new grace period need not
1770 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1771 	 * offlining path, when combined with checks in this function,
1772 	 * will handle CPUs that are currently going offline or that will
1773 	 * go offline later.  Please also refer to "Hotplug CPU" section
1774 	 * of RCU's Requirements documentation.
1775 	 */
1776 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1777 	rcu_for_each_leaf_node(rnp) {
1778 		// Wait for CPU-hotplug operations that might have
1779 		// started before this grace period did.
1780 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1781 		firstseq = READ_ONCE(rnp->ofl_seq);
1782 		if (firstseq & 0x1)
1783 			while (firstseq == READ_ONCE(rnp->ofl_seq))
1784 				schedule_timeout_idle(1);  // Can't wake unless RCU is watching.
1785 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1786 		raw_spin_lock(&rcu_state.ofl_lock);
1787 		raw_spin_lock_irq_rcu_node(rnp);
1788 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1789 		    !rnp->wait_blkd_tasks) {
1790 			/* Nothing to do on this leaf rcu_node structure. */
1791 			raw_spin_unlock_irq_rcu_node(rnp);
1792 			raw_spin_unlock(&rcu_state.ofl_lock);
1793 			continue;
1794 		}
1795 
1796 		/* Record old state, apply changes to ->qsmaskinit field. */
1797 		oldmask = rnp->qsmaskinit;
1798 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1799 
1800 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1801 		if (!oldmask != !rnp->qsmaskinit) {
1802 			if (!oldmask) { /* First online CPU for rcu_node. */
1803 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1804 					rcu_init_new_rnp(rnp);
1805 			} else if (rcu_preempt_has_tasks(rnp)) {
1806 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1807 			} else { /* Last offline CPU and can propagate. */
1808 				rcu_cleanup_dead_rnp(rnp);
1809 			}
1810 		}
1811 
1812 		/*
1813 		 * If all waited-on tasks from prior grace period are
1814 		 * done, and if all this rcu_node structure's CPUs are
1815 		 * still offline, propagate up the rcu_node tree and
1816 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1817 		 * rcu_node structure's CPUs has since come back online,
1818 		 * simply clear ->wait_blkd_tasks.
1819 		 */
1820 		if (rnp->wait_blkd_tasks &&
1821 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1822 			rnp->wait_blkd_tasks = false;
1823 			if (!rnp->qsmaskinit)
1824 				rcu_cleanup_dead_rnp(rnp);
1825 		}
1826 
1827 		raw_spin_unlock_irq_rcu_node(rnp);
1828 		raw_spin_unlock(&rcu_state.ofl_lock);
1829 	}
1830 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1831 
1832 	/*
1833 	 * Set the quiescent-state-needed bits in all the rcu_node
1834 	 * structures for all currently online CPUs in breadth-first
1835 	 * order, starting from the root rcu_node structure, relying on the
1836 	 * layout of the tree within the rcu_state.node[] array.  Note that
1837 	 * other CPUs will access only the leaves of the hierarchy, thus
1838 	 * seeing that no grace period is in progress, at least until the
1839 	 * corresponding leaf node has been initialized.
1840 	 *
1841 	 * The grace period cannot complete until the initialization
1842 	 * process finishes, because this kthread handles both.
1843 	 */
1844 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1845 	rcu_for_each_node_breadth_first(rnp) {
1846 		rcu_gp_slow(gp_init_delay);
1847 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1848 		rdp = this_cpu_ptr(&rcu_data);
1849 		rcu_preempt_check_blocked_tasks(rnp);
1850 		rnp->qsmask = rnp->qsmaskinit;
1851 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1852 		if (rnp == rdp->mynode)
1853 			(void)__note_gp_changes(rnp, rdp);
1854 		rcu_preempt_boost_start_gp(rnp);
1855 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1856 					    rnp->level, rnp->grplo,
1857 					    rnp->grphi, rnp->qsmask);
1858 		/* Quiescent states for tasks on any now-offline CPUs. */
1859 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1860 		rnp->rcu_gp_init_mask = mask;
1861 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1862 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1863 		else
1864 			raw_spin_unlock_irq_rcu_node(rnp);
1865 		cond_resched_tasks_rcu_qs();
1866 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1867 	}
1868 
1869 	// If strict, make all CPUs aware of new grace period.
1870 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1871 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1872 
1873 	return true;
1874 }
1875 
1876 /*
1877  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1878  * time.
1879  */
1880 static bool rcu_gp_fqs_check_wake(int *gfp)
1881 {
1882 	struct rcu_node *rnp = rcu_get_root();
1883 
1884 	// If under overload conditions, force an immediate FQS scan.
1885 	if (*gfp & RCU_GP_FLAG_OVLD)
1886 		return true;
1887 
1888 	// Someone like call_rcu() requested a force-quiescent-state scan.
1889 	*gfp = READ_ONCE(rcu_state.gp_flags);
1890 	if (*gfp & RCU_GP_FLAG_FQS)
1891 		return true;
1892 
1893 	// The current grace period has completed.
1894 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1895 		return true;
1896 
1897 	return false;
1898 }
1899 
1900 /*
1901  * Do one round of quiescent-state forcing.
1902  */
1903 static void rcu_gp_fqs(bool first_time)
1904 {
1905 	struct rcu_node *rnp = rcu_get_root();
1906 
1907 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1908 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1909 	if (first_time) {
1910 		/* Collect dyntick-idle snapshots. */
1911 		force_qs_rnp(dyntick_save_progress_counter);
1912 	} else {
1913 		/* Handle dyntick-idle and offline CPUs. */
1914 		force_qs_rnp(rcu_implicit_dynticks_qs);
1915 	}
1916 	/* Clear flag to prevent immediate re-entry. */
1917 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1918 		raw_spin_lock_irq_rcu_node(rnp);
1919 		WRITE_ONCE(rcu_state.gp_flags,
1920 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1921 		raw_spin_unlock_irq_rcu_node(rnp);
1922 	}
1923 }
1924 
1925 /*
1926  * Loop doing repeated quiescent-state forcing until the grace period ends.
1927  */
1928 static noinline_for_stack void rcu_gp_fqs_loop(void)
1929 {
1930 	bool first_gp_fqs;
1931 	int gf = 0;
1932 	unsigned long j;
1933 	int ret;
1934 	struct rcu_node *rnp = rcu_get_root();
1935 
1936 	first_gp_fqs = true;
1937 	j = READ_ONCE(jiffies_till_first_fqs);
1938 	if (rcu_state.cbovld)
1939 		gf = RCU_GP_FLAG_OVLD;
1940 	ret = 0;
1941 	for (;;) {
1942 		if (!ret) {
1943 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1944 			/*
1945 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1946 			 * update; required for stall checks.
1947 			 */
1948 			smp_wmb();
1949 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1950 				   jiffies + (j ? 3 * j : 2));
1951 		}
1952 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1953 				       TPS("fqswait"));
1954 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1955 		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1956 				 rcu_gp_fqs_check_wake(&gf), j);
1957 		rcu_gp_torture_wait();
1958 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1959 		/* Locking provides needed memory barriers. */
1960 		/* If grace period done, leave loop. */
1961 		if (!READ_ONCE(rnp->qsmask) &&
1962 		    !rcu_preempt_blocked_readers_cgp(rnp))
1963 			break;
1964 		/* If time for quiescent-state forcing, do it. */
1965 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1966 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1967 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1968 					       TPS("fqsstart"));
1969 			rcu_gp_fqs(first_gp_fqs);
1970 			gf = 0;
1971 			if (first_gp_fqs) {
1972 				first_gp_fqs = false;
1973 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1974 			}
1975 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1976 					       TPS("fqsend"));
1977 			cond_resched_tasks_rcu_qs();
1978 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1979 			ret = 0; /* Force full wait till next FQS. */
1980 			j = READ_ONCE(jiffies_till_next_fqs);
1981 		} else {
1982 			/* Deal with stray signal. */
1983 			cond_resched_tasks_rcu_qs();
1984 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1985 			WARN_ON(signal_pending(current));
1986 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1987 					       TPS("fqswaitsig"));
1988 			ret = 1; /* Keep old FQS timing. */
1989 			j = jiffies;
1990 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1991 				j = 1;
1992 			else
1993 				j = rcu_state.jiffies_force_qs - j;
1994 			gf = 0;
1995 		}
1996 	}
1997 }
1998 
1999 /*
2000  * Clean up after the old grace period.
2001  */
2002 static noinline void rcu_gp_cleanup(void)
2003 {
2004 	int cpu;
2005 	bool needgp = false;
2006 	unsigned long gp_duration;
2007 	unsigned long new_gp_seq;
2008 	bool offloaded;
2009 	struct rcu_data *rdp;
2010 	struct rcu_node *rnp = rcu_get_root();
2011 	struct swait_queue_head *sq;
2012 
2013 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2014 	raw_spin_lock_irq_rcu_node(rnp);
2015 	rcu_state.gp_end = jiffies;
2016 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2017 	if (gp_duration > rcu_state.gp_max)
2018 		rcu_state.gp_max = gp_duration;
2019 
2020 	/*
2021 	 * We know the grace period is complete, but to everyone else
2022 	 * it appears to still be ongoing.  But it is also the case
2023 	 * that to everyone else it looks like there is nothing that
2024 	 * they can do to advance the grace period.  It is therefore
2025 	 * safe for us to drop the lock in order to mark the grace
2026 	 * period as completed in all of the rcu_node structures.
2027 	 */
2028 	raw_spin_unlock_irq_rcu_node(rnp);
2029 
2030 	/*
2031 	 * Propagate new ->gp_seq value to rcu_node structures so that
2032 	 * other CPUs don't have to wait until the start of the next grace
2033 	 * period to process their callbacks.  This also avoids some nasty
2034 	 * RCU grace-period initialization races by forcing the end of
2035 	 * the current grace period to be completely recorded in all of
2036 	 * the rcu_node structures before the beginning of the next grace
2037 	 * period is recorded in any of the rcu_node structures.
2038 	 */
2039 	new_gp_seq = rcu_state.gp_seq;
2040 	rcu_seq_end(&new_gp_seq);
2041 	rcu_for_each_node_breadth_first(rnp) {
2042 		raw_spin_lock_irq_rcu_node(rnp);
2043 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2044 			dump_blkd_tasks(rnp, 10);
2045 		WARN_ON_ONCE(rnp->qsmask);
2046 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2047 		rdp = this_cpu_ptr(&rcu_data);
2048 		if (rnp == rdp->mynode)
2049 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2050 		/* smp_mb() provided by prior unlock-lock pair. */
2051 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2052 		// Reset overload indication for CPUs no longer overloaded
2053 		if (rcu_is_leaf_node(rnp))
2054 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2055 				rdp = per_cpu_ptr(&rcu_data, cpu);
2056 				check_cb_ovld_locked(rdp, rnp);
2057 			}
2058 		sq = rcu_nocb_gp_get(rnp);
2059 		raw_spin_unlock_irq_rcu_node(rnp);
2060 		rcu_nocb_gp_cleanup(sq);
2061 		cond_resched_tasks_rcu_qs();
2062 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2063 		rcu_gp_slow(gp_cleanup_delay);
2064 	}
2065 	rnp = rcu_get_root();
2066 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2067 
2068 	/* Declare grace period done, trace first to use old GP number. */
2069 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2070 	rcu_seq_end(&rcu_state.gp_seq);
2071 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2072 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2073 	/* Check for GP requests since above loop. */
2074 	rdp = this_cpu_ptr(&rcu_data);
2075 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2076 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2077 				  TPS("CleanupMore"));
2078 		needgp = true;
2079 	}
2080 	/* Advance CBs to reduce false positives below. */
2081 	offloaded = rcu_rdp_is_offloaded(rdp);
2082 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2083 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2084 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2085 		trace_rcu_grace_period(rcu_state.name,
2086 				       rcu_state.gp_seq,
2087 				       TPS("newreq"));
2088 	} else {
2089 		WRITE_ONCE(rcu_state.gp_flags,
2090 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2091 	}
2092 	raw_spin_unlock_irq_rcu_node(rnp);
2093 
2094 	// If strict, make all CPUs aware of the end of the old grace period.
2095 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2096 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2097 }
2098 
2099 /*
2100  * Body of kthread that handles grace periods.
2101  */
2102 static int __noreturn rcu_gp_kthread(void *unused)
2103 {
2104 	rcu_bind_gp_kthread();
2105 	for (;;) {
2106 
2107 		/* Handle grace-period start. */
2108 		for (;;) {
2109 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2110 					       TPS("reqwait"));
2111 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2112 			swait_event_idle_exclusive(rcu_state.gp_wq,
2113 					 READ_ONCE(rcu_state.gp_flags) &
2114 					 RCU_GP_FLAG_INIT);
2115 			rcu_gp_torture_wait();
2116 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2117 			/* Locking provides needed memory barrier. */
2118 			if (rcu_gp_init())
2119 				break;
2120 			cond_resched_tasks_rcu_qs();
2121 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2122 			WARN_ON(signal_pending(current));
2123 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2124 					       TPS("reqwaitsig"));
2125 		}
2126 
2127 		/* Handle quiescent-state forcing. */
2128 		rcu_gp_fqs_loop();
2129 
2130 		/* Handle grace-period end. */
2131 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2132 		rcu_gp_cleanup();
2133 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2134 	}
2135 }
2136 
2137 /*
2138  * Report a full set of quiescent states to the rcu_state data structure.
2139  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2140  * another grace period is required.  Whether we wake the grace-period
2141  * kthread or it awakens itself for the next round of quiescent-state
2142  * forcing, that kthread will clean up after the just-completed grace
2143  * period.  Note that the caller must hold rnp->lock, which is released
2144  * before return.
2145  */
2146 static void rcu_report_qs_rsp(unsigned long flags)
2147 	__releases(rcu_get_root()->lock)
2148 {
2149 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2150 	WARN_ON_ONCE(!rcu_gp_in_progress());
2151 	WRITE_ONCE(rcu_state.gp_flags,
2152 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2153 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2154 	rcu_gp_kthread_wake();
2155 }
2156 
2157 /*
2158  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2159  * Allows quiescent states for a group of CPUs to be reported at one go
2160  * to the specified rcu_node structure, though all the CPUs in the group
2161  * must be represented by the same rcu_node structure (which need not be a
2162  * leaf rcu_node structure, though it often will be).  The gps parameter
2163  * is the grace-period snapshot, which means that the quiescent states
2164  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2165  * must be held upon entry, and it is released before return.
2166  *
2167  * As a special case, if mask is zero, the bit-already-cleared check is
2168  * disabled.  This allows propagating quiescent state due to resumed tasks
2169  * during grace-period initialization.
2170  */
2171 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2172 			      unsigned long gps, unsigned long flags)
2173 	__releases(rnp->lock)
2174 {
2175 	unsigned long oldmask = 0;
2176 	struct rcu_node *rnp_c;
2177 
2178 	raw_lockdep_assert_held_rcu_node(rnp);
2179 
2180 	/* Walk up the rcu_node hierarchy. */
2181 	for (;;) {
2182 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2183 
2184 			/*
2185 			 * Our bit has already been cleared, or the
2186 			 * relevant grace period is already over, so done.
2187 			 */
2188 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2189 			return;
2190 		}
2191 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2192 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2193 			     rcu_preempt_blocked_readers_cgp(rnp));
2194 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2195 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2196 						 mask, rnp->qsmask, rnp->level,
2197 						 rnp->grplo, rnp->grphi,
2198 						 !!rnp->gp_tasks);
2199 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2200 
2201 			/* Other bits still set at this level, so done. */
2202 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2203 			return;
2204 		}
2205 		rnp->completedqs = rnp->gp_seq;
2206 		mask = rnp->grpmask;
2207 		if (rnp->parent == NULL) {
2208 
2209 			/* No more levels.  Exit loop holding root lock. */
2210 
2211 			break;
2212 		}
2213 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2214 		rnp_c = rnp;
2215 		rnp = rnp->parent;
2216 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2217 		oldmask = READ_ONCE(rnp_c->qsmask);
2218 	}
2219 
2220 	/*
2221 	 * Get here if we are the last CPU to pass through a quiescent
2222 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2223 	 * to clean up and start the next grace period if one is needed.
2224 	 */
2225 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2226 }
2227 
2228 /*
2229  * Record a quiescent state for all tasks that were previously queued
2230  * on the specified rcu_node structure and that were blocking the current
2231  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2232  * irqs disabled, and this lock is released upon return, but irqs remain
2233  * disabled.
2234  */
2235 static void __maybe_unused
2236 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2237 	__releases(rnp->lock)
2238 {
2239 	unsigned long gps;
2240 	unsigned long mask;
2241 	struct rcu_node *rnp_p;
2242 
2243 	raw_lockdep_assert_held_rcu_node(rnp);
2244 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2245 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2246 	    rnp->qsmask != 0) {
2247 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2248 		return;  /* Still need more quiescent states! */
2249 	}
2250 
2251 	rnp->completedqs = rnp->gp_seq;
2252 	rnp_p = rnp->parent;
2253 	if (rnp_p == NULL) {
2254 		/*
2255 		 * Only one rcu_node structure in the tree, so don't
2256 		 * try to report up to its nonexistent parent!
2257 		 */
2258 		rcu_report_qs_rsp(flags);
2259 		return;
2260 	}
2261 
2262 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2263 	gps = rnp->gp_seq;
2264 	mask = rnp->grpmask;
2265 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2266 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2267 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2268 }
2269 
2270 /*
2271  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2272  * structure.  This must be called from the specified CPU.
2273  */
2274 static void
2275 rcu_report_qs_rdp(struct rcu_data *rdp)
2276 {
2277 	unsigned long flags;
2278 	unsigned long mask;
2279 	bool needwake = false;
2280 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
2281 	struct rcu_node *rnp;
2282 
2283 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2284 	rnp = rdp->mynode;
2285 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2286 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2287 	    rdp->gpwrap) {
2288 
2289 		/*
2290 		 * The grace period in which this quiescent state was
2291 		 * recorded has ended, so don't report it upwards.
2292 		 * We will instead need a new quiescent state that lies
2293 		 * within the current grace period.
2294 		 */
2295 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2296 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2297 		return;
2298 	}
2299 	mask = rdp->grpmask;
2300 	rdp->core_needs_qs = false;
2301 	if ((rnp->qsmask & mask) == 0) {
2302 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2303 	} else {
2304 		/*
2305 		 * This GP can't end until cpu checks in, so all of our
2306 		 * callbacks can be processed during the next GP.
2307 		 */
2308 		if (!offloaded)
2309 			needwake = rcu_accelerate_cbs(rnp, rdp);
2310 
2311 		rcu_disable_urgency_upon_qs(rdp);
2312 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2313 		/* ^^^ Released rnp->lock */
2314 		if (needwake)
2315 			rcu_gp_kthread_wake();
2316 	}
2317 }
2318 
2319 /*
2320  * Check to see if there is a new grace period of which this CPU
2321  * is not yet aware, and if so, set up local rcu_data state for it.
2322  * Otherwise, see if this CPU has just passed through its first
2323  * quiescent state for this grace period, and record that fact if so.
2324  */
2325 static void
2326 rcu_check_quiescent_state(struct rcu_data *rdp)
2327 {
2328 	/* Check for grace-period ends and beginnings. */
2329 	note_gp_changes(rdp);
2330 
2331 	/*
2332 	 * Does this CPU still need to do its part for current grace period?
2333 	 * If no, return and let the other CPUs do their part as well.
2334 	 */
2335 	if (!rdp->core_needs_qs)
2336 		return;
2337 
2338 	/*
2339 	 * Was there a quiescent state since the beginning of the grace
2340 	 * period? If no, then exit and wait for the next call.
2341 	 */
2342 	if (rdp->cpu_no_qs.b.norm)
2343 		return;
2344 
2345 	/*
2346 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2347 	 * judge of that).
2348 	 */
2349 	rcu_report_qs_rdp(rdp);
2350 }
2351 
2352 /*
2353  * Near the end of the offline process.  Trace the fact that this CPU
2354  * is going offline.
2355  */
2356 int rcutree_dying_cpu(unsigned int cpu)
2357 {
2358 	bool blkd;
2359 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2360 	struct rcu_node *rnp = rdp->mynode;
2361 
2362 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2363 		return 0;
2364 
2365 	blkd = !!(rnp->qsmask & rdp->grpmask);
2366 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2367 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2368 	return 0;
2369 }
2370 
2371 /*
2372  * All CPUs for the specified rcu_node structure have gone offline,
2373  * and all tasks that were preempted within an RCU read-side critical
2374  * section while running on one of those CPUs have since exited their RCU
2375  * read-side critical section.  Some other CPU is reporting this fact with
2376  * the specified rcu_node structure's ->lock held and interrupts disabled.
2377  * This function therefore goes up the tree of rcu_node structures,
2378  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2379  * the leaf rcu_node structure's ->qsmaskinit field has already been
2380  * updated.
2381  *
2382  * This function does check that the specified rcu_node structure has
2383  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2384  * prematurely.  That said, invoking it after the fact will cost you
2385  * a needless lock acquisition.  So once it has done its work, don't
2386  * invoke it again.
2387  */
2388 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2389 {
2390 	long mask;
2391 	struct rcu_node *rnp = rnp_leaf;
2392 
2393 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2394 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2395 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2396 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2397 		return;
2398 	for (;;) {
2399 		mask = rnp->grpmask;
2400 		rnp = rnp->parent;
2401 		if (!rnp)
2402 			break;
2403 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2404 		rnp->qsmaskinit &= ~mask;
2405 		/* Between grace periods, so better already be zero! */
2406 		WARN_ON_ONCE(rnp->qsmask);
2407 		if (rnp->qsmaskinit) {
2408 			raw_spin_unlock_rcu_node(rnp);
2409 			/* irqs remain disabled. */
2410 			return;
2411 		}
2412 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2413 	}
2414 }
2415 
2416 /*
2417  * The CPU has been completely removed, and some other CPU is reporting
2418  * this fact from process context.  Do the remainder of the cleanup.
2419  * There can only be one CPU hotplug operation at a time, so no need for
2420  * explicit locking.
2421  */
2422 int rcutree_dead_cpu(unsigned int cpu)
2423 {
2424 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2425 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2426 
2427 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2428 		return 0;
2429 
2430 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2431 	/* Adjust any no-longer-needed kthreads. */
2432 	rcu_boost_kthread_setaffinity(rnp, -1);
2433 	// Stop-machine done, so allow nohz_full to disable tick.
2434 	tick_dep_clear(TICK_DEP_BIT_RCU);
2435 	return 0;
2436 }
2437 
2438 /*
2439  * Invoke any RCU callbacks that have made it to the end of their grace
2440  * period.  Throttle as specified by rdp->blimit.
2441  */
2442 static void rcu_do_batch(struct rcu_data *rdp)
2443 {
2444 	int div;
2445 	bool __maybe_unused empty;
2446 	unsigned long flags;
2447 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
2448 	struct rcu_head *rhp;
2449 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2450 	long bl, count = 0;
2451 	long pending, tlimit = 0;
2452 
2453 	/* If no callbacks are ready, just return. */
2454 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2455 		trace_rcu_batch_start(rcu_state.name,
2456 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2457 		trace_rcu_batch_end(rcu_state.name, 0,
2458 				    !rcu_segcblist_empty(&rdp->cblist),
2459 				    need_resched(), is_idle_task(current),
2460 				    rcu_is_callbacks_kthread());
2461 		return;
2462 	}
2463 
2464 	/*
2465 	 * Extract the list of ready callbacks, disabling to prevent
2466 	 * races with call_rcu() from interrupt handlers.  Leave the
2467 	 * callback counts, as rcu_barrier() needs to be conservative.
2468 	 */
2469 	local_irq_save(flags);
2470 	rcu_nocb_lock(rdp);
2471 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2472 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2473 	div = READ_ONCE(rcu_divisor);
2474 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2475 	bl = max(rdp->blimit, pending >> div);
2476 	if (unlikely(bl > 100)) {
2477 		long rrn = READ_ONCE(rcu_resched_ns);
2478 
2479 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2480 		tlimit = local_clock() + rrn;
2481 	}
2482 	trace_rcu_batch_start(rcu_state.name,
2483 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2484 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2485 	if (offloaded)
2486 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2487 
2488 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2489 	rcu_nocb_unlock_irqrestore(rdp, flags);
2490 
2491 	/* Invoke callbacks. */
2492 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2493 	rhp = rcu_cblist_dequeue(&rcl);
2494 
2495 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2496 		rcu_callback_t f;
2497 
2498 		count++;
2499 		debug_rcu_head_unqueue(rhp);
2500 
2501 		rcu_lock_acquire(&rcu_callback_map);
2502 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2503 
2504 		f = rhp->func;
2505 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2506 		f(rhp);
2507 
2508 		rcu_lock_release(&rcu_callback_map);
2509 
2510 		/*
2511 		 * Stop only if limit reached and CPU has something to do.
2512 		 */
2513 		if (count >= bl && !offloaded &&
2514 		    (need_resched() ||
2515 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2516 			break;
2517 		if (unlikely(tlimit)) {
2518 			/* only call local_clock() every 32 callbacks */
2519 			if (likely((count & 31) || local_clock() < tlimit))
2520 				continue;
2521 			/* Exceeded the time limit, so leave. */
2522 			break;
2523 		}
2524 		if (!in_serving_softirq()) {
2525 			local_bh_enable();
2526 			lockdep_assert_irqs_enabled();
2527 			cond_resched_tasks_rcu_qs();
2528 			lockdep_assert_irqs_enabled();
2529 			local_bh_disable();
2530 		}
2531 	}
2532 
2533 	local_irq_save(flags);
2534 	rcu_nocb_lock(rdp);
2535 	rdp->n_cbs_invoked += count;
2536 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2537 			    is_idle_task(current), rcu_is_callbacks_kthread());
2538 
2539 	/* Update counts and requeue any remaining callbacks. */
2540 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2541 	rcu_segcblist_add_len(&rdp->cblist, -count);
2542 
2543 	/* Reinstate batch limit if we have worked down the excess. */
2544 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2545 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2546 		rdp->blimit = blimit;
2547 
2548 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2549 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2550 		rdp->qlen_last_fqs_check = 0;
2551 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2552 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2553 		rdp->qlen_last_fqs_check = count;
2554 
2555 	/*
2556 	 * The following usually indicates a double call_rcu().  To track
2557 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2558 	 */
2559 	empty = rcu_segcblist_empty(&rdp->cblist);
2560 	WARN_ON_ONCE(count == 0 && !empty);
2561 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2562 		     count != 0 && empty);
2563 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2564 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2565 
2566 	rcu_nocb_unlock_irqrestore(rdp, flags);
2567 
2568 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2569 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2570 		invoke_rcu_core();
2571 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2572 }
2573 
2574 /*
2575  * This function is invoked from each scheduling-clock interrupt,
2576  * and checks to see if this CPU is in a non-context-switch quiescent
2577  * state, for example, user mode or idle loop.  It also schedules RCU
2578  * core processing.  If the current grace period has gone on too long,
2579  * it will ask the scheduler to manufacture a context switch for the sole
2580  * purpose of providing the needed quiescent state.
2581  */
2582 void rcu_sched_clock_irq(int user)
2583 {
2584 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2585 	lockdep_assert_irqs_disabled();
2586 	raw_cpu_inc(rcu_data.ticks_this_gp);
2587 	/* The load-acquire pairs with the store-release setting to true. */
2588 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2589 		/* Idle and userspace execution already are quiescent states. */
2590 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2591 			set_tsk_need_resched(current);
2592 			set_preempt_need_resched();
2593 		}
2594 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2595 	}
2596 	rcu_flavor_sched_clock_irq(user);
2597 	if (rcu_pending(user))
2598 		invoke_rcu_core();
2599 	lockdep_assert_irqs_disabled();
2600 
2601 	trace_rcu_utilization(TPS("End scheduler-tick"));
2602 }
2603 
2604 /*
2605  * Scan the leaf rcu_node structures.  For each structure on which all
2606  * CPUs have reported a quiescent state and on which there are tasks
2607  * blocking the current grace period, initiate RCU priority boosting.
2608  * Otherwise, invoke the specified function to check dyntick state for
2609  * each CPU that has not yet reported a quiescent state.
2610  */
2611 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2612 {
2613 	int cpu;
2614 	unsigned long flags;
2615 	unsigned long mask;
2616 	struct rcu_data *rdp;
2617 	struct rcu_node *rnp;
2618 
2619 	rcu_state.cbovld = rcu_state.cbovldnext;
2620 	rcu_state.cbovldnext = false;
2621 	rcu_for_each_leaf_node(rnp) {
2622 		cond_resched_tasks_rcu_qs();
2623 		mask = 0;
2624 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2625 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2626 		if (rnp->qsmask == 0) {
2627 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2628 				/*
2629 				 * No point in scanning bits because they
2630 				 * are all zero.  But we might need to
2631 				 * priority-boost blocked readers.
2632 				 */
2633 				rcu_initiate_boost(rnp, flags);
2634 				/* rcu_initiate_boost() releases rnp->lock */
2635 				continue;
2636 			}
2637 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2638 			continue;
2639 		}
2640 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2641 			rdp = per_cpu_ptr(&rcu_data, cpu);
2642 			if (f(rdp)) {
2643 				mask |= rdp->grpmask;
2644 				rcu_disable_urgency_upon_qs(rdp);
2645 			}
2646 		}
2647 		if (mask != 0) {
2648 			/* Idle/offline CPUs, report (releases rnp->lock). */
2649 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2650 		} else {
2651 			/* Nothing to do here, so just drop the lock. */
2652 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2653 		}
2654 	}
2655 }
2656 
2657 /*
2658  * Force quiescent states on reluctant CPUs, and also detect which
2659  * CPUs are in dyntick-idle mode.
2660  */
2661 void rcu_force_quiescent_state(void)
2662 {
2663 	unsigned long flags;
2664 	bool ret;
2665 	struct rcu_node *rnp;
2666 	struct rcu_node *rnp_old = NULL;
2667 
2668 	/* Funnel through hierarchy to reduce memory contention. */
2669 	rnp = __this_cpu_read(rcu_data.mynode);
2670 	for (; rnp != NULL; rnp = rnp->parent) {
2671 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2672 		       !raw_spin_trylock(&rnp->fqslock);
2673 		if (rnp_old != NULL)
2674 			raw_spin_unlock(&rnp_old->fqslock);
2675 		if (ret)
2676 			return;
2677 		rnp_old = rnp;
2678 	}
2679 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2680 
2681 	/* Reached the root of the rcu_node tree, acquire lock. */
2682 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2683 	raw_spin_unlock(&rnp_old->fqslock);
2684 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2685 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2686 		return;  /* Someone beat us to it. */
2687 	}
2688 	WRITE_ONCE(rcu_state.gp_flags,
2689 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2690 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2691 	rcu_gp_kthread_wake();
2692 }
2693 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2694 
2695 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2696 // grace periods.
2697 static void strict_work_handler(struct work_struct *work)
2698 {
2699 	rcu_read_lock();
2700 	rcu_read_unlock();
2701 }
2702 
2703 /* Perform RCU core processing work for the current CPU.  */
2704 static __latent_entropy void rcu_core(void)
2705 {
2706 	unsigned long flags;
2707 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2708 	struct rcu_node *rnp = rdp->mynode;
2709 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2710 
2711 	if (cpu_is_offline(smp_processor_id()))
2712 		return;
2713 	trace_rcu_utilization(TPS("Start RCU core"));
2714 	WARN_ON_ONCE(!rdp->beenonline);
2715 
2716 	/* Report any deferred quiescent states if preemption enabled. */
2717 	if (!(preempt_count() & PREEMPT_MASK)) {
2718 		rcu_preempt_deferred_qs(current);
2719 	} else if (rcu_preempt_need_deferred_qs(current)) {
2720 		set_tsk_need_resched(current);
2721 		set_preempt_need_resched();
2722 	}
2723 
2724 	/* Update RCU state based on any recent quiescent states. */
2725 	rcu_check_quiescent_state(rdp);
2726 
2727 	/* No grace period and unregistered callbacks? */
2728 	if (!rcu_gp_in_progress() &&
2729 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2730 		rcu_nocb_lock_irqsave(rdp, flags);
2731 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2732 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2733 		rcu_nocb_unlock_irqrestore(rdp, flags);
2734 	}
2735 
2736 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2737 
2738 	/* If there are callbacks ready, invoke them. */
2739 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2740 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2741 		rcu_do_batch(rdp);
2742 
2743 	/* Do any needed deferred wakeups of rcuo kthreads. */
2744 	do_nocb_deferred_wakeup(rdp);
2745 	trace_rcu_utilization(TPS("End RCU core"));
2746 
2747 	// If strict GPs, schedule an RCU reader in a clean environment.
2748 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2749 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2750 }
2751 
2752 static void rcu_core_si(struct softirq_action *h)
2753 {
2754 	rcu_core();
2755 }
2756 
2757 static void rcu_wake_cond(struct task_struct *t, int status)
2758 {
2759 	/*
2760 	 * If the thread is yielding, only wake it when this
2761 	 * is invoked from idle
2762 	 */
2763 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2764 		wake_up_process(t);
2765 }
2766 
2767 static void invoke_rcu_core_kthread(void)
2768 {
2769 	struct task_struct *t;
2770 	unsigned long flags;
2771 
2772 	local_irq_save(flags);
2773 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2774 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2775 	if (t != NULL && t != current)
2776 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2777 	local_irq_restore(flags);
2778 }
2779 
2780 /*
2781  * Wake up this CPU's rcuc kthread to do RCU core processing.
2782  */
2783 static void invoke_rcu_core(void)
2784 {
2785 	if (!cpu_online(smp_processor_id()))
2786 		return;
2787 	if (use_softirq)
2788 		raise_softirq(RCU_SOFTIRQ);
2789 	else
2790 		invoke_rcu_core_kthread();
2791 }
2792 
2793 static void rcu_cpu_kthread_park(unsigned int cpu)
2794 {
2795 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2796 }
2797 
2798 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2799 {
2800 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2801 }
2802 
2803 /*
2804  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2805  * the RCU softirq used in configurations of RCU that do not support RCU
2806  * priority boosting.
2807  */
2808 static void rcu_cpu_kthread(unsigned int cpu)
2809 {
2810 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2811 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2812 	int spincnt;
2813 
2814 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2815 	for (spincnt = 0; spincnt < 10; spincnt++) {
2816 		local_bh_disable();
2817 		*statusp = RCU_KTHREAD_RUNNING;
2818 		local_irq_disable();
2819 		work = *workp;
2820 		*workp = 0;
2821 		local_irq_enable();
2822 		if (work)
2823 			rcu_core();
2824 		local_bh_enable();
2825 		if (*workp == 0) {
2826 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2827 			*statusp = RCU_KTHREAD_WAITING;
2828 			return;
2829 		}
2830 	}
2831 	*statusp = RCU_KTHREAD_YIELDING;
2832 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2833 	schedule_timeout_idle(2);
2834 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2835 	*statusp = RCU_KTHREAD_WAITING;
2836 }
2837 
2838 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2839 	.store			= &rcu_data.rcu_cpu_kthread_task,
2840 	.thread_should_run	= rcu_cpu_kthread_should_run,
2841 	.thread_fn		= rcu_cpu_kthread,
2842 	.thread_comm		= "rcuc/%u",
2843 	.setup			= rcu_cpu_kthread_setup,
2844 	.park			= rcu_cpu_kthread_park,
2845 };
2846 
2847 /*
2848  * Spawn per-CPU RCU core processing kthreads.
2849  */
2850 static int __init rcu_spawn_core_kthreads(void)
2851 {
2852 	int cpu;
2853 
2854 	for_each_possible_cpu(cpu)
2855 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2856 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2857 		return 0;
2858 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2859 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2860 	return 0;
2861 }
2862 
2863 /*
2864  * Handle any core-RCU processing required by a call_rcu() invocation.
2865  */
2866 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2867 			    unsigned long flags)
2868 {
2869 	/*
2870 	 * If called from an extended quiescent state, invoke the RCU
2871 	 * core in order to force a re-evaluation of RCU's idleness.
2872 	 */
2873 	if (!rcu_is_watching())
2874 		invoke_rcu_core();
2875 
2876 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2877 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2878 		return;
2879 
2880 	/*
2881 	 * Force the grace period if too many callbacks or too long waiting.
2882 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2883 	 * if some other CPU has recently done so.  Also, don't bother
2884 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2885 	 * is the only one waiting for a grace period to complete.
2886 	 */
2887 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2888 		     rdp->qlen_last_fqs_check + qhimark)) {
2889 
2890 		/* Are we ignoring a completed grace period? */
2891 		note_gp_changes(rdp);
2892 
2893 		/* Start a new grace period if one not already started. */
2894 		if (!rcu_gp_in_progress()) {
2895 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2896 		} else {
2897 			/* Give the grace period a kick. */
2898 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2899 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2900 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2901 				rcu_force_quiescent_state();
2902 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2903 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2904 		}
2905 	}
2906 }
2907 
2908 /*
2909  * RCU callback function to leak a callback.
2910  */
2911 static void rcu_leak_callback(struct rcu_head *rhp)
2912 {
2913 }
2914 
2915 /*
2916  * Check and if necessary update the leaf rcu_node structure's
2917  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2918  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2919  * structure's ->lock.
2920  */
2921 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2922 {
2923 	raw_lockdep_assert_held_rcu_node(rnp);
2924 	if (qovld_calc <= 0)
2925 		return; // Early boot and wildcard value set.
2926 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2927 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2928 	else
2929 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2930 }
2931 
2932 /*
2933  * Check and if necessary update the leaf rcu_node structure's
2934  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2935  * number of queued RCU callbacks.  No locks need be held, but the
2936  * caller must have disabled interrupts.
2937  *
2938  * Note that this function ignores the possibility that there are a lot
2939  * of callbacks all of which have already seen the end of their respective
2940  * grace periods.  This omission is due to the need for no-CBs CPUs to
2941  * be holding ->nocb_lock to do this check, which is too heavy for a
2942  * common-case operation.
2943  */
2944 static void check_cb_ovld(struct rcu_data *rdp)
2945 {
2946 	struct rcu_node *const rnp = rdp->mynode;
2947 
2948 	if (qovld_calc <= 0 ||
2949 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2950 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2951 		return; // Early boot wildcard value or already set correctly.
2952 	raw_spin_lock_rcu_node(rnp);
2953 	check_cb_ovld_locked(rdp, rnp);
2954 	raw_spin_unlock_rcu_node(rnp);
2955 }
2956 
2957 /* Helper function for call_rcu() and friends.  */
2958 static void
2959 __call_rcu(struct rcu_head *head, rcu_callback_t func)
2960 {
2961 	static atomic_t doublefrees;
2962 	unsigned long flags;
2963 	struct rcu_data *rdp;
2964 	bool was_alldone;
2965 
2966 	/* Misaligned rcu_head! */
2967 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2968 
2969 	if (debug_rcu_head_queue(head)) {
2970 		/*
2971 		 * Probable double call_rcu(), so leak the callback.
2972 		 * Use rcu:rcu_callback trace event to find the previous
2973 		 * time callback was passed to __call_rcu().
2974 		 */
2975 		if (atomic_inc_return(&doublefrees) < 4) {
2976 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2977 			mem_dump_obj(head);
2978 		}
2979 		WRITE_ONCE(head->func, rcu_leak_callback);
2980 		return;
2981 	}
2982 	head->func = func;
2983 	head->next = NULL;
2984 	local_irq_save(flags);
2985 	kasan_record_aux_stack(head);
2986 	rdp = this_cpu_ptr(&rcu_data);
2987 
2988 	/* Add the callback to our list. */
2989 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2990 		// This can trigger due to call_rcu() from offline CPU:
2991 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2992 		WARN_ON_ONCE(!rcu_is_watching());
2993 		// Very early boot, before rcu_init().  Initialize if needed
2994 		// and then drop through to queue the callback.
2995 		if (rcu_segcblist_empty(&rdp->cblist))
2996 			rcu_segcblist_init(&rdp->cblist);
2997 	}
2998 
2999 	check_cb_ovld(rdp);
3000 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
3001 		return; // Enqueued onto ->nocb_bypass, so just leave.
3002 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
3003 	rcu_segcblist_enqueue(&rdp->cblist, head);
3004 	if (__is_kvfree_rcu_offset((unsigned long)func))
3005 		trace_rcu_kvfree_callback(rcu_state.name, head,
3006 					 (unsigned long)func,
3007 					 rcu_segcblist_n_cbs(&rdp->cblist));
3008 	else
3009 		trace_rcu_callback(rcu_state.name, head,
3010 				   rcu_segcblist_n_cbs(&rdp->cblist));
3011 
3012 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
3013 
3014 	/* Go handle any RCU core processing required. */
3015 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
3016 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3017 	} else {
3018 		__call_rcu_core(rdp, head, flags);
3019 		local_irq_restore(flags);
3020 	}
3021 }
3022 
3023 /**
3024  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3025  * @head: structure to be used for queueing the RCU updates.
3026  * @func: actual callback function to be invoked after the grace period
3027  *
3028  * The callback function will be invoked some time after a full grace
3029  * period elapses, in other words after all pre-existing RCU read-side
3030  * critical sections have completed.  However, the callback function
3031  * might well execute concurrently with RCU read-side critical sections
3032  * that started after call_rcu() was invoked.
3033  *
3034  * RCU read-side critical sections are delimited by rcu_read_lock()
3035  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3036  * v5.0 and later, regions of code across which interrupts, preemption,
3037  * or softirqs have been disabled also serve as RCU read-side critical
3038  * sections.  This includes hardware interrupt handlers, softirq handlers,
3039  * and NMI handlers.
3040  *
3041  * Note that all CPUs must agree that the grace period extended beyond
3042  * all pre-existing RCU read-side critical section.  On systems with more
3043  * than one CPU, this means that when "func()" is invoked, each CPU is
3044  * guaranteed to have executed a full memory barrier since the end of its
3045  * last RCU read-side critical section whose beginning preceded the call
3046  * to call_rcu().  It also means that each CPU executing an RCU read-side
3047  * critical section that continues beyond the start of "func()" must have
3048  * executed a memory barrier after the call_rcu() but before the beginning
3049  * of that RCU read-side critical section.  Note that these guarantees
3050  * include CPUs that are offline, idle, or executing in user mode, as
3051  * well as CPUs that are executing in the kernel.
3052  *
3053  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3054  * resulting RCU callback function "func()", then both CPU A and CPU B are
3055  * guaranteed to execute a full memory barrier during the time interval
3056  * between the call to call_rcu() and the invocation of "func()" -- even
3057  * if CPU A and CPU B are the same CPU (but again only if the system has
3058  * more than one CPU).
3059  *
3060  * Implementation of these memory-ordering guarantees is described here:
3061  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3062  */
3063 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3064 {
3065 	__call_rcu(head, func);
3066 }
3067 EXPORT_SYMBOL_GPL(call_rcu);
3068 
3069 
3070 /* Maximum number of jiffies to wait before draining a batch. */
3071 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3072 #define KFREE_N_BATCHES 2
3073 #define FREE_N_CHANNELS 2
3074 
3075 /**
3076  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3077  * @nr_records: Number of active pointers in the array
3078  * @next: Next bulk object in the block chain
3079  * @records: Array of the kvfree_rcu() pointers
3080  */
3081 struct kvfree_rcu_bulk_data {
3082 	unsigned long nr_records;
3083 	struct kvfree_rcu_bulk_data *next;
3084 	void *records[];
3085 };
3086 
3087 /*
3088  * This macro defines how many entries the "records" array
3089  * will contain. It is based on the fact that the size of
3090  * kvfree_rcu_bulk_data structure becomes exactly one page.
3091  */
3092 #define KVFREE_BULK_MAX_ENTR \
3093 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3094 
3095 /**
3096  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3097  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3098  * @head_free: List of kfree_rcu() objects waiting for a grace period
3099  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3100  * @krcp: Pointer to @kfree_rcu_cpu structure
3101  */
3102 
3103 struct kfree_rcu_cpu_work {
3104 	struct rcu_work rcu_work;
3105 	struct rcu_head *head_free;
3106 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3107 	struct kfree_rcu_cpu *krcp;
3108 };
3109 
3110 /**
3111  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3112  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3113  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3114  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3115  * @lock: Synchronize access to this structure
3116  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3117  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3118  * @initialized: The @rcu_work fields have been initialized
3119  * @count: Number of objects for which GP not started
3120  * @bkvcache:
3121  *	A simple cache list that contains objects for reuse purpose.
3122  *	In order to save some per-cpu space the list is singular.
3123  *	Even though it is lockless an access has to be protected by the
3124  *	per-cpu lock.
3125  * @page_cache_work: A work to refill the cache when it is empty
3126  * @backoff_page_cache_fill: Delay cache refills
3127  * @work_in_progress: Indicates that page_cache_work is running
3128  * @hrtimer: A hrtimer for scheduling a page_cache_work
3129  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3130  *
3131  * This is a per-CPU structure.  The reason that it is not included in
3132  * the rcu_data structure is to permit this code to be extracted from
3133  * the RCU files.  Such extraction could allow further optimization of
3134  * the interactions with the slab allocators.
3135  */
3136 struct kfree_rcu_cpu {
3137 	struct rcu_head *head;
3138 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3139 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3140 	raw_spinlock_t lock;
3141 	struct delayed_work monitor_work;
3142 	bool monitor_todo;
3143 	bool initialized;
3144 	int count;
3145 
3146 	struct delayed_work page_cache_work;
3147 	atomic_t backoff_page_cache_fill;
3148 	atomic_t work_in_progress;
3149 	struct hrtimer hrtimer;
3150 
3151 	struct llist_head bkvcache;
3152 	int nr_bkv_objs;
3153 };
3154 
3155 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3156 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3157 };
3158 
3159 static __always_inline void
3160 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3161 {
3162 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3163 	int i;
3164 
3165 	for (i = 0; i < bhead->nr_records; i++)
3166 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3167 #endif
3168 }
3169 
3170 static inline struct kfree_rcu_cpu *
3171 krc_this_cpu_lock(unsigned long *flags)
3172 {
3173 	struct kfree_rcu_cpu *krcp;
3174 
3175 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3176 	krcp = this_cpu_ptr(&krc);
3177 	raw_spin_lock(&krcp->lock);
3178 
3179 	return krcp;
3180 }
3181 
3182 static inline void
3183 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3184 {
3185 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3186 }
3187 
3188 static inline struct kvfree_rcu_bulk_data *
3189 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3190 {
3191 	if (!krcp->nr_bkv_objs)
3192 		return NULL;
3193 
3194 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
3195 	return (struct kvfree_rcu_bulk_data *)
3196 		llist_del_first(&krcp->bkvcache);
3197 }
3198 
3199 static inline bool
3200 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3201 	struct kvfree_rcu_bulk_data *bnode)
3202 {
3203 	// Check the limit.
3204 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3205 		return false;
3206 
3207 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3208 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3209 	return true;
3210 }
3211 
3212 static int
3213 drain_page_cache(struct kfree_rcu_cpu *krcp)
3214 {
3215 	unsigned long flags;
3216 	struct llist_node *page_list, *pos, *n;
3217 	int freed = 0;
3218 
3219 	raw_spin_lock_irqsave(&krcp->lock, flags);
3220 	page_list = llist_del_all(&krcp->bkvcache);
3221 	WRITE_ONCE(krcp->nr_bkv_objs, 0);
3222 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3223 
3224 	llist_for_each_safe(pos, n, page_list) {
3225 		free_page((unsigned long)pos);
3226 		freed++;
3227 	}
3228 
3229 	return freed;
3230 }
3231 
3232 /*
3233  * This function is invoked in workqueue context after a grace period.
3234  * It frees all the objects queued on ->bkvhead_free or ->head_free.
3235  */
3236 static void kfree_rcu_work(struct work_struct *work)
3237 {
3238 	unsigned long flags;
3239 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3240 	struct rcu_head *head, *next;
3241 	struct kfree_rcu_cpu *krcp;
3242 	struct kfree_rcu_cpu_work *krwp;
3243 	int i, j;
3244 
3245 	krwp = container_of(to_rcu_work(work),
3246 			    struct kfree_rcu_cpu_work, rcu_work);
3247 	krcp = krwp->krcp;
3248 
3249 	raw_spin_lock_irqsave(&krcp->lock, flags);
3250 	// Channels 1 and 2.
3251 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3252 		bkvhead[i] = krwp->bkvhead_free[i];
3253 		krwp->bkvhead_free[i] = NULL;
3254 	}
3255 
3256 	// Channel 3.
3257 	head = krwp->head_free;
3258 	krwp->head_free = NULL;
3259 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3260 
3261 	// Handle the first two channels.
3262 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3263 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3264 			bnext = bkvhead[i]->next;
3265 			debug_rcu_bhead_unqueue(bkvhead[i]);
3266 
3267 			rcu_lock_acquire(&rcu_callback_map);
3268 			if (i == 0) { // kmalloc() / kfree().
3269 				trace_rcu_invoke_kfree_bulk_callback(
3270 					rcu_state.name, bkvhead[i]->nr_records,
3271 					bkvhead[i]->records);
3272 
3273 				kfree_bulk(bkvhead[i]->nr_records,
3274 					bkvhead[i]->records);
3275 			} else { // vmalloc() / vfree().
3276 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3277 					trace_rcu_invoke_kvfree_callback(
3278 						rcu_state.name,
3279 						bkvhead[i]->records[j], 0);
3280 
3281 					vfree(bkvhead[i]->records[j]);
3282 				}
3283 			}
3284 			rcu_lock_release(&rcu_callback_map);
3285 
3286 			raw_spin_lock_irqsave(&krcp->lock, flags);
3287 			if (put_cached_bnode(krcp, bkvhead[i]))
3288 				bkvhead[i] = NULL;
3289 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3290 
3291 			if (bkvhead[i])
3292 				free_page((unsigned long) bkvhead[i]);
3293 
3294 			cond_resched_tasks_rcu_qs();
3295 		}
3296 	}
3297 
3298 	/*
3299 	 * This is used when the "bulk" path can not be used for the
3300 	 * double-argument of kvfree_rcu().  This happens when the
3301 	 * page-cache is empty, which means that objects are instead
3302 	 * queued on a linked list through their rcu_head structures.
3303 	 * This list is named "Channel 3".
3304 	 */
3305 	for (; head; head = next) {
3306 		unsigned long offset = (unsigned long)head->func;
3307 		void *ptr = (void *)head - offset;
3308 
3309 		next = head->next;
3310 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3311 		rcu_lock_acquire(&rcu_callback_map);
3312 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3313 
3314 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3315 			kvfree(ptr);
3316 
3317 		rcu_lock_release(&rcu_callback_map);
3318 		cond_resched_tasks_rcu_qs();
3319 	}
3320 }
3321 
3322 /*
3323  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3324  */
3325 static void kfree_rcu_monitor(struct work_struct *work)
3326 {
3327 	struct kfree_rcu_cpu *krcp = container_of(work,
3328 		struct kfree_rcu_cpu, monitor_work.work);
3329 	unsigned long flags;
3330 	int i, j;
3331 
3332 	raw_spin_lock_irqsave(&krcp->lock, flags);
3333 
3334 	// Attempt to start a new batch.
3335 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3336 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3337 
3338 		// Try to detach bkvhead or head and attach it over any
3339 		// available corresponding free channel. It can be that
3340 		// a previous RCU batch is in progress, it means that
3341 		// immediately to queue another one is not possible so
3342 		// in that case the monitor work is rearmed.
3343 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3344 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3345 				(krcp->head && !krwp->head_free)) {
3346 			// Channel 1 corresponds to the SLAB-pointer bulk path.
3347 			// Channel 2 corresponds to vmalloc-pointer bulk path.
3348 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3349 				if (!krwp->bkvhead_free[j]) {
3350 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3351 					krcp->bkvhead[j] = NULL;
3352 				}
3353 			}
3354 
3355 			// Channel 3 corresponds to both SLAB and vmalloc
3356 			// objects queued on the linked list.
3357 			if (!krwp->head_free) {
3358 				krwp->head_free = krcp->head;
3359 				krcp->head = NULL;
3360 			}
3361 
3362 			WRITE_ONCE(krcp->count, 0);
3363 
3364 			// One work is per one batch, so there are three
3365 			// "free channels", the batch can handle. It can
3366 			// be that the work is in the pending state when
3367 			// channels have been detached following by each
3368 			// other.
3369 			queue_rcu_work(system_wq, &krwp->rcu_work);
3370 		}
3371 	}
3372 
3373 	// If there is nothing to detach, it means that our job is
3374 	// successfully done here. In case of having at least one
3375 	// of the channels that is still busy we should rearm the
3376 	// work to repeat an attempt. Because previous batches are
3377 	// still in progress.
3378 	if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
3379 		krcp->monitor_todo = false;
3380 	else
3381 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3382 
3383 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3384 }
3385 
3386 static enum hrtimer_restart
3387 schedule_page_work_fn(struct hrtimer *t)
3388 {
3389 	struct kfree_rcu_cpu *krcp =
3390 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3391 
3392 	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3393 	return HRTIMER_NORESTART;
3394 }
3395 
3396 static void fill_page_cache_func(struct work_struct *work)
3397 {
3398 	struct kvfree_rcu_bulk_data *bnode;
3399 	struct kfree_rcu_cpu *krcp =
3400 		container_of(work, struct kfree_rcu_cpu,
3401 			page_cache_work.work);
3402 	unsigned long flags;
3403 	int nr_pages;
3404 	bool pushed;
3405 	int i;
3406 
3407 	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3408 		1 : rcu_min_cached_objs;
3409 
3410 	for (i = 0; i < nr_pages; i++) {
3411 		bnode = (struct kvfree_rcu_bulk_data *)
3412 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3413 
3414 		if (bnode) {
3415 			raw_spin_lock_irqsave(&krcp->lock, flags);
3416 			pushed = put_cached_bnode(krcp, bnode);
3417 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3418 
3419 			if (!pushed) {
3420 				free_page((unsigned long) bnode);
3421 				break;
3422 			}
3423 		}
3424 	}
3425 
3426 	atomic_set(&krcp->work_in_progress, 0);
3427 	atomic_set(&krcp->backoff_page_cache_fill, 0);
3428 }
3429 
3430 static void
3431 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3432 {
3433 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3434 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3435 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
3436 			queue_delayed_work(system_wq,
3437 				&krcp->page_cache_work,
3438 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3439 		} else {
3440 			hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3441 			krcp->hrtimer.function = schedule_page_work_fn;
3442 			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3443 		}
3444 	}
3445 }
3446 
3447 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3448 // state specified by flags.  If can_alloc is true, the caller must
3449 // be schedulable and not be holding any locks or mutexes that might be
3450 // acquired by the memory allocator or anything that it might invoke.
3451 // Returns true if ptr was successfully recorded, else the caller must
3452 // use a fallback.
3453 static inline bool
3454 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3455 	unsigned long *flags, void *ptr, bool can_alloc)
3456 {
3457 	struct kvfree_rcu_bulk_data *bnode;
3458 	int idx;
3459 
3460 	*krcp = krc_this_cpu_lock(flags);
3461 	if (unlikely(!(*krcp)->initialized))
3462 		return false;
3463 
3464 	idx = !!is_vmalloc_addr(ptr);
3465 
3466 	/* Check if a new block is required. */
3467 	if (!(*krcp)->bkvhead[idx] ||
3468 			(*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3469 		bnode = get_cached_bnode(*krcp);
3470 		if (!bnode && can_alloc) {
3471 			krc_this_cpu_unlock(*krcp, *flags);
3472 
3473 			// __GFP_NORETRY - allows a light-weight direct reclaim
3474 			// what is OK from minimizing of fallback hitting point of
3475 			// view. Apart of that it forbids any OOM invoking what is
3476 			// also beneficial since we are about to release memory soon.
3477 			//
3478 			// __GFP_NOMEMALLOC - prevents from consuming of all the
3479 			// memory reserves. Please note we have a fallback path.
3480 			//
3481 			// __GFP_NOWARN - it is supposed that an allocation can
3482 			// be failed under low memory or high memory pressure
3483 			// scenarios.
3484 			bnode = (struct kvfree_rcu_bulk_data *)
3485 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3486 			*krcp = krc_this_cpu_lock(flags);
3487 		}
3488 
3489 		if (!bnode)
3490 			return false;
3491 
3492 		/* Initialize the new block. */
3493 		bnode->nr_records = 0;
3494 		bnode->next = (*krcp)->bkvhead[idx];
3495 
3496 		/* Attach it to the head. */
3497 		(*krcp)->bkvhead[idx] = bnode;
3498 	}
3499 
3500 	/* Finally insert. */
3501 	(*krcp)->bkvhead[idx]->records
3502 		[(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3503 
3504 	return true;
3505 }
3506 
3507 /*
3508  * Queue a request for lazy invocation of the appropriate free routine
3509  * after a grace period.  Please note that three paths are maintained,
3510  * two for the common case using arrays of pointers and a third one that
3511  * is used only when the main paths cannot be used, for example, due to
3512  * memory pressure.
3513  *
3514  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3515  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3516  * be free'd in workqueue context. This allows us to: batch requests together to
3517  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3518  */
3519 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3520 {
3521 	unsigned long flags;
3522 	struct kfree_rcu_cpu *krcp;
3523 	bool success;
3524 	void *ptr;
3525 
3526 	if (head) {
3527 		ptr = (void *) head - (unsigned long) func;
3528 	} else {
3529 		/*
3530 		 * Please note there is a limitation for the head-less
3531 		 * variant, that is why there is a clear rule for such
3532 		 * objects: it can be used from might_sleep() context
3533 		 * only. For other places please embed an rcu_head to
3534 		 * your data.
3535 		 */
3536 		might_sleep();
3537 		ptr = (unsigned long *) func;
3538 	}
3539 
3540 	// Queue the object but don't yet schedule the batch.
3541 	if (debug_rcu_head_queue(ptr)) {
3542 		// Probable double kfree_rcu(), just leak.
3543 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3544 			  __func__, head);
3545 
3546 		// Mark as success and leave.
3547 		return;
3548 	}
3549 
3550 	kasan_record_aux_stack(ptr);
3551 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3552 	if (!success) {
3553 		run_page_cache_worker(krcp);
3554 
3555 		if (head == NULL)
3556 			// Inline if kvfree_rcu(one_arg) call.
3557 			goto unlock_return;
3558 
3559 		head->func = func;
3560 		head->next = krcp->head;
3561 		krcp->head = head;
3562 		success = true;
3563 	}
3564 
3565 	WRITE_ONCE(krcp->count, krcp->count + 1);
3566 
3567 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3568 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3569 	    !krcp->monitor_todo) {
3570 		krcp->monitor_todo = true;
3571 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3572 	}
3573 
3574 unlock_return:
3575 	krc_this_cpu_unlock(krcp, flags);
3576 
3577 	/*
3578 	 * Inline kvfree() after synchronize_rcu(). We can do
3579 	 * it from might_sleep() context only, so the current
3580 	 * CPU can pass the QS state.
3581 	 */
3582 	if (!success) {
3583 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3584 		synchronize_rcu();
3585 		kvfree(ptr);
3586 	}
3587 }
3588 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3589 
3590 static unsigned long
3591 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3592 {
3593 	int cpu;
3594 	unsigned long count = 0;
3595 
3596 	/* Snapshot count of all CPUs */
3597 	for_each_possible_cpu(cpu) {
3598 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3599 
3600 		count += READ_ONCE(krcp->count);
3601 		count += READ_ONCE(krcp->nr_bkv_objs);
3602 		atomic_set(&krcp->backoff_page_cache_fill, 1);
3603 	}
3604 
3605 	return count;
3606 }
3607 
3608 static unsigned long
3609 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3610 {
3611 	int cpu, freed = 0;
3612 
3613 	for_each_possible_cpu(cpu) {
3614 		int count;
3615 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3616 
3617 		count = krcp->count;
3618 		count += drain_page_cache(krcp);
3619 		kfree_rcu_monitor(&krcp->monitor_work.work);
3620 
3621 		sc->nr_to_scan -= count;
3622 		freed += count;
3623 
3624 		if (sc->nr_to_scan <= 0)
3625 			break;
3626 	}
3627 
3628 	return freed == 0 ? SHRINK_STOP : freed;
3629 }
3630 
3631 static struct shrinker kfree_rcu_shrinker = {
3632 	.count_objects = kfree_rcu_shrink_count,
3633 	.scan_objects = kfree_rcu_shrink_scan,
3634 	.batch = 0,
3635 	.seeks = DEFAULT_SEEKS,
3636 };
3637 
3638 void __init kfree_rcu_scheduler_running(void)
3639 {
3640 	int cpu;
3641 	unsigned long flags;
3642 
3643 	for_each_possible_cpu(cpu) {
3644 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3645 
3646 		raw_spin_lock_irqsave(&krcp->lock, flags);
3647 		if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
3648 				krcp->monitor_todo) {
3649 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3650 			continue;
3651 		}
3652 		krcp->monitor_todo = true;
3653 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3654 					 KFREE_DRAIN_JIFFIES);
3655 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3656 	}
3657 }
3658 
3659 /*
3660  * During early boot, any blocking grace-period wait automatically
3661  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3662  *
3663  * However, because a context switch is a grace period for !PREEMPTION, any
3664  * blocking grace-period wait automatically implies a grace period if
3665  * there is only one CPU online at any point time during execution of
3666  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3667  * occasionally incorrectly indicate that there are multiple CPUs online
3668  * when there was in fact only one the whole time, as this just adds some
3669  * overhead: RCU still operates correctly.
3670  */
3671 static int rcu_blocking_is_gp(void)
3672 {
3673 	int ret;
3674 
3675 	if (IS_ENABLED(CONFIG_PREEMPTION))
3676 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3677 	might_sleep();  /* Check for RCU read-side critical section. */
3678 	preempt_disable();
3679 	/*
3680 	 * If the rcu_state.n_online_cpus counter is equal to one,
3681 	 * there is only one CPU, and that CPU sees all prior accesses
3682 	 * made by any CPU that was online at the time of its access.
3683 	 * Furthermore, if this counter is equal to one, its value cannot
3684 	 * change until after the preempt_enable() below.
3685 	 *
3686 	 * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3687 	 * all later CPUs (both this one and any that come online later
3688 	 * on) are guaranteed to see all accesses prior to this point
3689 	 * in the code, without the need for additional memory barriers.
3690 	 * Those memory barriers are provided by CPU-hotplug code.
3691 	 */
3692 	ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3693 	preempt_enable();
3694 	return ret;
3695 }
3696 
3697 /**
3698  * synchronize_rcu - wait until a grace period has elapsed.
3699  *
3700  * Control will return to the caller some time after a full grace
3701  * period has elapsed, in other words after all currently executing RCU
3702  * read-side critical sections have completed.  Note, however, that
3703  * upon return from synchronize_rcu(), the caller might well be executing
3704  * concurrently with new RCU read-side critical sections that began while
3705  * synchronize_rcu() was waiting.
3706  *
3707  * RCU read-side critical sections are delimited by rcu_read_lock()
3708  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3709  * v5.0 and later, regions of code across which interrupts, preemption,
3710  * or softirqs have been disabled also serve as RCU read-side critical
3711  * sections.  This includes hardware interrupt handlers, softirq handlers,
3712  * and NMI handlers.
3713  *
3714  * Note that this guarantee implies further memory-ordering guarantees.
3715  * On systems with more than one CPU, when synchronize_rcu() returns,
3716  * each CPU is guaranteed to have executed a full memory barrier since
3717  * the end of its last RCU read-side critical section whose beginning
3718  * preceded the call to synchronize_rcu().  In addition, each CPU having
3719  * an RCU read-side critical section that extends beyond the return from
3720  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3721  * after the beginning of synchronize_rcu() and before the beginning of
3722  * that RCU read-side critical section.  Note that these guarantees include
3723  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3724  * that are executing in the kernel.
3725  *
3726  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3727  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3728  * to have executed a full memory barrier during the execution of
3729  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3730  * again only if the system has more than one CPU).
3731  *
3732  * Implementation of these memory-ordering guarantees is described here:
3733  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3734  */
3735 void synchronize_rcu(void)
3736 {
3737 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3738 			 lock_is_held(&rcu_lock_map) ||
3739 			 lock_is_held(&rcu_sched_lock_map),
3740 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3741 	if (rcu_blocking_is_gp())
3742 		return;  // Context allows vacuous grace periods.
3743 	if (rcu_gp_is_expedited())
3744 		synchronize_rcu_expedited();
3745 	else
3746 		wait_rcu_gp(call_rcu);
3747 }
3748 EXPORT_SYMBOL_GPL(synchronize_rcu);
3749 
3750 /**
3751  * get_state_synchronize_rcu - Snapshot current RCU state
3752  *
3753  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3754  * or poll_state_synchronize_rcu() to determine whether or not a full
3755  * grace period has elapsed in the meantime.
3756  */
3757 unsigned long get_state_synchronize_rcu(void)
3758 {
3759 	/*
3760 	 * Any prior manipulation of RCU-protected data must happen
3761 	 * before the load from ->gp_seq.
3762 	 */
3763 	smp_mb();  /* ^^^ */
3764 	return rcu_seq_snap(&rcu_state.gp_seq);
3765 }
3766 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3767 
3768 /**
3769  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3770  *
3771  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3772  * or poll_state_synchronize_rcu() to determine whether or not a full
3773  * grace period has elapsed in the meantime.  If the needed grace period
3774  * is not already slated to start, notifies RCU core of the need for that
3775  * grace period.
3776  *
3777  * Interrupts must be enabled for the case where it is necessary to awaken
3778  * the grace-period kthread.
3779  */
3780 unsigned long start_poll_synchronize_rcu(void)
3781 {
3782 	unsigned long flags;
3783 	unsigned long gp_seq = get_state_synchronize_rcu();
3784 	bool needwake;
3785 	struct rcu_data *rdp;
3786 	struct rcu_node *rnp;
3787 
3788 	lockdep_assert_irqs_enabled();
3789 	local_irq_save(flags);
3790 	rdp = this_cpu_ptr(&rcu_data);
3791 	rnp = rdp->mynode;
3792 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3793 	needwake = rcu_start_this_gp(rnp, rdp, gp_seq);
3794 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3795 	if (needwake)
3796 		rcu_gp_kthread_wake();
3797 	return gp_seq;
3798 }
3799 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3800 
3801 /**
3802  * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
3803  *
3804  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3805  *
3806  * If a full RCU grace period has elapsed since the earlier call from
3807  * which oldstate was obtained, return @true, otherwise return @false.
3808  * If @false is returned, it is the caller's responsibility to invoke this
3809  * function later on until it does return @true.  Alternatively, the caller
3810  * can explicitly wait for a grace period, for example, by passing @oldstate
3811  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3812  *
3813  * Yes, this function does not take counter wrap into account.
3814  * But counter wrap is harmless.  If the counter wraps, we have waited for
3815  * more than 2 billion grace periods (and way more on a 64-bit system!).
3816  * Those needing to keep oldstate values for very long time periods
3817  * (many hours even on 32-bit systems) should check them occasionally
3818  * and either refresh them or set a flag indicating that the grace period
3819  * has completed.
3820  *
3821  * This function provides the same memory-ordering guarantees that
3822  * would be provided by a synchronize_rcu() that was invoked at the call
3823  * to the function that provided @oldstate, and that returned at the end
3824  * of this function.
3825  */
3826 bool poll_state_synchronize_rcu(unsigned long oldstate)
3827 {
3828 	if (rcu_seq_done(&rcu_state.gp_seq, oldstate)) {
3829 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3830 		return true;
3831 	}
3832 	return false;
3833 }
3834 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3835 
3836 /**
3837  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3838  *
3839  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3840  *
3841  * If a full RCU grace period has elapsed since the earlier call to
3842  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3843  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3844  *
3845  * Yes, this function does not take counter wrap into account.  But
3846  * counter wrap is harmless.  If the counter wraps, we have waited for
3847  * more than 2 billion grace periods (and way more on a 64-bit system!),
3848  * so waiting for one additional grace period should be just fine.
3849  *
3850  * This function provides the same memory-ordering guarantees that
3851  * would be provided by a synchronize_rcu() that was invoked at the call
3852  * to the function that provided @oldstate, and that returned at the end
3853  * of this function.
3854  */
3855 void cond_synchronize_rcu(unsigned long oldstate)
3856 {
3857 	if (!poll_state_synchronize_rcu(oldstate))
3858 		synchronize_rcu();
3859 }
3860 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3861 
3862 /*
3863  * Check to see if there is any immediate RCU-related work to be done by
3864  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3865  * in order of increasing expense: checks that can be carried out against
3866  * CPU-local state are performed first.  However, we must check for CPU
3867  * stalls first, else we might not get a chance.
3868  */
3869 static int rcu_pending(int user)
3870 {
3871 	bool gp_in_progress;
3872 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3873 	struct rcu_node *rnp = rdp->mynode;
3874 
3875 	lockdep_assert_irqs_disabled();
3876 
3877 	/* Check for CPU stalls, if enabled. */
3878 	check_cpu_stall(rdp);
3879 
3880 	/* Does this CPU need a deferred NOCB wakeup? */
3881 	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3882 		return 1;
3883 
3884 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3885 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3886 		return 0;
3887 
3888 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3889 	gp_in_progress = rcu_gp_in_progress();
3890 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3891 		return 1;
3892 
3893 	/* Does this CPU have callbacks ready to invoke? */
3894 	if (!rcu_rdp_is_offloaded(rdp) &&
3895 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3896 		return 1;
3897 
3898 	/* Has RCU gone idle with this CPU needing another grace period? */
3899 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3900 	    !rcu_rdp_is_offloaded(rdp) &&
3901 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3902 		return 1;
3903 
3904 	/* Have RCU grace period completed or started?  */
3905 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3906 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3907 		return 1;
3908 
3909 	/* nothing to do */
3910 	return 0;
3911 }
3912 
3913 /*
3914  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3915  * the compiler is expected to optimize this away.
3916  */
3917 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3918 {
3919 	trace_rcu_barrier(rcu_state.name, s, cpu,
3920 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3921 }
3922 
3923 /*
3924  * RCU callback function for rcu_barrier().  If we are last, wake
3925  * up the task executing rcu_barrier().
3926  *
3927  * Note that the value of rcu_state.barrier_sequence must be captured
3928  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3929  * other CPUs might count the value down to zero before this CPU gets
3930  * around to invoking rcu_barrier_trace(), which might result in bogus
3931  * data from the next instance of rcu_barrier().
3932  */
3933 static void rcu_barrier_callback(struct rcu_head *rhp)
3934 {
3935 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3936 
3937 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3938 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3939 		complete(&rcu_state.barrier_completion);
3940 	} else {
3941 		rcu_barrier_trace(TPS("CB"), -1, s);
3942 	}
3943 }
3944 
3945 /*
3946  * Called with preemption disabled, and from cross-cpu IRQ context.
3947  */
3948 static void rcu_barrier_func(void *cpu_in)
3949 {
3950 	uintptr_t cpu = (uintptr_t)cpu_in;
3951 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3952 
3953 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3954 	rdp->barrier_head.func = rcu_barrier_callback;
3955 	debug_rcu_head_queue(&rdp->barrier_head);
3956 	rcu_nocb_lock(rdp);
3957 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3958 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3959 		atomic_inc(&rcu_state.barrier_cpu_count);
3960 	} else {
3961 		debug_rcu_head_unqueue(&rdp->barrier_head);
3962 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3963 				  rcu_state.barrier_sequence);
3964 	}
3965 	rcu_nocb_unlock(rdp);
3966 }
3967 
3968 /**
3969  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3970  *
3971  * Note that this primitive does not necessarily wait for an RCU grace period
3972  * to complete.  For example, if there are no RCU callbacks queued anywhere
3973  * in the system, then rcu_barrier() is within its rights to return
3974  * immediately, without waiting for anything, much less an RCU grace period.
3975  */
3976 void rcu_barrier(void)
3977 {
3978 	uintptr_t cpu;
3979 	struct rcu_data *rdp;
3980 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3981 
3982 	rcu_barrier_trace(TPS("Begin"), -1, s);
3983 
3984 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3985 	mutex_lock(&rcu_state.barrier_mutex);
3986 
3987 	/* Did someone else do our work for us? */
3988 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3989 		rcu_barrier_trace(TPS("EarlyExit"), -1,
3990 				  rcu_state.barrier_sequence);
3991 		smp_mb(); /* caller's subsequent code after above check. */
3992 		mutex_unlock(&rcu_state.barrier_mutex);
3993 		return;
3994 	}
3995 
3996 	/* Mark the start of the barrier operation. */
3997 	rcu_seq_start(&rcu_state.barrier_sequence);
3998 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3999 
4000 	/*
4001 	 * Initialize the count to two rather than to zero in order
4002 	 * to avoid a too-soon return to zero in case of an immediate
4003 	 * invocation of the just-enqueued callback (or preemption of
4004 	 * this task).  Exclude CPU-hotplug operations to ensure that no
4005 	 * offline non-offloaded CPU has callbacks queued.
4006 	 */
4007 	init_completion(&rcu_state.barrier_completion);
4008 	atomic_set(&rcu_state.barrier_cpu_count, 2);
4009 	cpus_read_lock();
4010 
4011 	/*
4012 	 * Force each CPU with callbacks to register a new callback.
4013 	 * When that callback is invoked, we will know that all of the
4014 	 * corresponding CPU's preceding callbacks have been invoked.
4015 	 */
4016 	for_each_possible_cpu(cpu) {
4017 		rdp = per_cpu_ptr(&rcu_data, cpu);
4018 		if (cpu_is_offline(cpu) &&
4019 		    !rcu_rdp_is_offloaded(rdp))
4020 			continue;
4021 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
4022 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
4023 					  rcu_state.barrier_sequence);
4024 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
4025 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
4026 			   cpu_is_offline(cpu)) {
4027 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
4028 					  rcu_state.barrier_sequence);
4029 			local_irq_disable();
4030 			rcu_barrier_func((void *)cpu);
4031 			local_irq_enable();
4032 		} else if (cpu_is_offline(cpu)) {
4033 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
4034 					  rcu_state.barrier_sequence);
4035 		} else {
4036 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
4037 					  rcu_state.barrier_sequence);
4038 		}
4039 	}
4040 	cpus_read_unlock();
4041 
4042 	/*
4043 	 * Now that we have an rcu_barrier_callback() callback on each
4044 	 * CPU, and thus each counted, remove the initial count.
4045 	 */
4046 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4047 		complete(&rcu_state.barrier_completion);
4048 
4049 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4050 	wait_for_completion(&rcu_state.barrier_completion);
4051 
4052 	/* Mark the end of the barrier operation. */
4053 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4054 	rcu_seq_end(&rcu_state.barrier_sequence);
4055 
4056 	/* Other rcu_barrier() invocations can now safely proceed. */
4057 	mutex_unlock(&rcu_state.barrier_mutex);
4058 }
4059 EXPORT_SYMBOL_GPL(rcu_barrier);
4060 
4061 /*
4062  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4063  * first CPU in a given leaf rcu_node structure coming online.  The caller
4064  * must hold the corresponding leaf rcu_node ->lock with interrupts
4065  * disabled.
4066  */
4067 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4068 {
4069 	long mask;
4070 	long oldmask;
4071 	struct rcu_node *rnp = rnp_leaf;
4072 
4073 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4074 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4075 	for (;;) {
4076 		mask = rnp->grpmask;
4077 		rnp = rnp->parent;
4078 		if (rnp == NULL)
4079 			return;
4080 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4081 		oldmask = rnp->qsmaskinit;
4082 		rnp->qsmaskinit |= mask;
4083 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4084 		if (oldmask)
4085 			return;
4086 	}
4087 }
4088 
4089 /*
4090  * Do boot-time initialization of a CPU's per-CPU RCU data.
4091  */
4092 static void __init
4093 rcu_boot_init_percpu_data(int cpu)
4094 {
4095 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4096 
4097 	/* Set up local state, ensuring consistent view of global state. */
4098 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4099 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4100 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4101 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4102 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4103 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4104 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4105 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4106 	rdp->cpu = cpu;
4107 	rcu_boot_init_nocb_percpu_data(rdp);
4108 }
4109 
4110 /*
4111  * Invoked early in the CPU-online process, when pretty much all services
4112  * are available.  The incoming CPU is not present.
4113  *
4114  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4115  * offline event can be happening at a given time.  Note also that we can
4116  * accept some slop in the rsp->gp_seq access due to the fact that this
4117  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4118  * And any offloaded callbacks are being numbered elsewhere.
4119  */
4120 int rcutree_prepare_cpu(unsigned int cpu)
4121 {
4122 	unsigned long flags;
4123 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4124 	struct rcu_node *rnp = rcu_get_root();
4125 
4126 	/* Set up local state, ensuring consistent view of global state. */
4127 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4128 	rdp->qlen_last_fqs_check = 0;
4129 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4130 	rdp->blimit = blimit;
4131 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4132 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4133 
4134 	/*
4135 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4136 	 * (re-)initialized.
4137 	 */
4138 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4139 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4140 
4141 	/*
4142 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4143 	 * propagation up the rcu_node tree will happen at the beginning
4144 	 * of the next grace period.
4145 	 */
4146 	rnp = rdp->mynode;
4147 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4148 	rdp->beenonline = true;	 /* We have now been online. */
4149 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4150 	rdp->gp_seq_needed = rdp->gp_seq;
4151 	rdp->cpu_no_qs.b.norm = true;
4152 	rdp->core_needs_qs = false;
4153 	rdp->rcu_iw_pending = false;
4154 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4155 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4156 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4157 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4158 	rcu_spawn_one_boost_kthread(rnp);
4159 	rcu_spawn_cpu_nocb_kthread(cpu);
4160 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4161 
4162 	return 0;
4163 }
4164 
4165 /*
4166  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4167  */
4168 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4169 {
4170 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4171 
4172 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4173 }
4174 
4175 /*
4176  * Near the end of the CPU-online process.  Pretty much all services
4177  * enabled, and the CPU is now very much alive.
4178  */
4179 int rcutree_online_cpu(unsigned int cpu)
4180 {
4181 	unsigned long flags;
4182 	struct rcu_data *rdp;
4183 	struct rcu_node *rnp;
4184 
4185 	rdp = per_cpu_ptr(&rcu_data, cpu);
4186 	rnp = rdp->mynode;
4187 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4188 	rnp->ffmask |= rdp->grpmask;
4189 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4190 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4191 		return 0; /* Too early in boot for scheduler work. */
4192 	sync_sched_exp_online_cleanup(cpu);
4193 	rcutree_affinity_setting(cpu, -1);
4194 
4195 	// Stop-machine done, so allow nohz_full to disable tick.
4196 	tick_dep_clear(TICK_DEP_BIT_RCU);
4197 	return 0;
4198 }
4199 
4200 /*
4201  * Near the beginning of the process.  The CPU is still very much alive
4202  * with pretty much all services enabled.
4203  */
4204 int rcutree_offline_cpu(unsigned int cpu)
4205 {
4206 	unsigned long flags;
4207 	struct rcu_data *rdp;
4208 	struct rcu_node *rnp;
4209 
4210 	rdp = per_cpu_ptr(&rcu_data, cpu);
4211 	rnp = rdp->mynode;
4212 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4213 	rnp->ffmask &= ~rdp->grpmask;
4214 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4215 
4216 	rcutree_affinity_setting(cpu, cpu);
4217 
4218 	// nohz_full CPUs need the tick for stop-machine to work quickly
4219 	tick_dep_set(TICK_DEP_BIT_RCU);
4220 	return 0;
4221 }
4222 
4223 /*
4224  * Mark the specified CPU as being online so that subsequent grace periods
4225  * (both expedited and normal) will wait on it.  Note that this means that
4226  * incoming CPUs are not allowed to use RCU read-side critical sections
4227  * until this function is called.  Failing to observe this restriction
4228  * will result in lockdep splats.
4229  *
4230  * Note that this function is special in that it is invoked directly
4231  * from the incoming CPU rather than from the cpuhp_step mechanism.
4232  * This is because this function must be invoked at a precise location.
4233  */
4234 void rcu_cpu_starting(unsigned int cpu)
4235 {
4236 	unsigned long flags;
4237 	unsigned long mask;
4238 	struct rcu_data *rdp;
4239 	struct rcu_node *rnp;
4240 	bool newcpu;
4241 
4242 	rdp = per_cpu_ptr(&rcu_data, cpu);
4243 	if (rdp->cpu_started)
4244 		return;
4245 	rdp->cpu_started = true;
4246 
4247 	rnp = rdp->mynode;
4248 	mask = rdp->grpmask;
4249 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4250 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4251 	rcu_dynticks_eqs_online();
4252 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4253 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4254 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4255 	newcpu = !(rnp->expmaskinitnext & mask);
4256 	rnp->expmaskinitnext |= mask;
4257 	/* Allow lockless access for expedited grace periods. */
4258 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4259 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4260 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4261 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4262 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4263 
4264 	/* An incoming CPU should never be blocking a grace period. */
4265 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4266 		rcu_disable_urgency_upon_qs(rdp);
4267 		/* Report QS -after- changing ->qsmaskinitnext! */
4268 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4269 	} else {
4270 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4271 	}
4272 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4273 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4274 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4275 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4276 }
4277 
4278 /*
4279  * The outgoing function has no further need of RCU, so remove it from
4280  * the rcu_node tree's ->qsmaskinitnext bit masks.
4281  *
4282  * Note that this function is special in that it is invoked directly
4283  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4284  * This is because this function must be invoked at a precise location.
4285  */
4286 void rcu_report_dead(unsigned int cpu)
4287 {
4288 	unsigned long flags;
4289 	unsigned long mask;
4290 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4291 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4292 
4293 	// Do any dangling deferred wakeups.
4294 	do_nocb_deferred_wakeup(rdp);
4295 
4296 	/* QS for any half-done expedited grace period. */
4297 	rcu_report_exp_rdp(rdp);
4298 	rcu_preempt_deferred_qs(current);
4299 
4300 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4301 	mask = rdp->grpmask;
4302 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4303 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4304 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4305 	raw_spin_lock(&rcu_state.ofl_lock);
4306 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4307 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4308 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4309 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4310 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4311 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4312 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4313 	}
4314 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4315 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4316 	raw_spin_unlock(&rcu_state.ofl_lock);
4317 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4318 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4319 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4320 
4321 	rdp->cpu_started = false;
4322 }
4323 
4324 #ifdef CONFIG_HOTPLUG_CPU
4325 /*
4326  * The outgoing CPU has just passed through the dying-idle state, and we
4327  * are being invoked from the CPU that was IPIed to continue the offline
4328  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4329  */
4330 void rcutree_migrate_callbacks(int cpu)
4331 {
4332 	unsigned long flags;
4333 	struct rcu_data *my_rdp;
4334 	struct rcu_node *my_rnp;
4335 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4336 	bool needwake;
4337 
4338 	if (rcu_rdp_is_offloaded(rdp) ||
4339 	    rcu_segcblist_empty(&rdp->cblist))
4340 		return;  /* No callbacks to migrate. */
4341 
4342 	local_irq_save(flags);
4343 	my_rdp = this_cpu_ptr(&rcu_data);
4344 	my_rnp = my_rdp->mynode;
4345 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4346 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4347 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4348 	/* Leverage recent GPs and set GP for new callbacks. */
4349 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4350 		   rcu_advance_cbs(my_rnp, my_rdp);
4351 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4352 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4353 	rcu_segcblist_disable(&rdp->cblist);
4354 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4355 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4356 	if (rcu_rdp_is_offloaded(my_rdp)) {
4357 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4358 		__call_rcu_nocb_wake(my_rdp, true, flags);
4359 	} else {
4360 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4361 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4362 	}
4363 	if (needwake)
4364 		rcu_gp_kthread_wake();
4365 	lockdep_assert_irqs_enabled();
4366 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4367 		  !rcu_segcblist_empty(&rdp->cblist),
4368 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4369 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4370 		  rcu_segcblist_first_cb(&rdp->cblist));
4371 }
4372 #endif
4373 
4374 /*
4375  * On non-huge systems, use expedited RCU grace periods to make suspend
4376  * and hibernation run faster.
4377  */
4378 static int rcu_pm_notify(struct notifier_block *self,
4379 			 unsigned long action, void *hcpu)
4380 {
4381 	switch (action) {
4382 	case PM_HIBERNATION_PREPARE:
4383 	case PM_SUSPEND_PREPARE:
4384 		rcu_expedite_gp();
4385 		break;
4386 	case PM_POST_HIBERNATION:
4387 	case PM_POST_SUSPEND:
4388 		rcu_unexpedite_gp();
4389 		break;
4390 	default:
4391 		break;
4392 	}
4393 	return NOTIFY_OK;
4394 }
4395 
4396 /*
4397  * Spawn the kthreads that handle RCU's grace periods.
4398  */
4399 static int __init rcu_spawn_gp_kthread(void)
4400 {
4401 	unsigned long flags;
4402 	int kthread_prio_in = kthread_prio;
4403 	struct rcu_node *rnp;
4404 	struct sched_param sp;
4405 	struct task_struct *t;
4406 
4407 	/* Force priority into range. */
4408 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4409 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4410 		kthread_prio = 2;
4411 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4412 		kthread_prio = 1;
4413 	else if (kthread_prio < 0)
4414 		kthread_prio = 0;
4415 	else if (kthread_prio > 99)
4416 		kthread_prio = 99;
4417 
4418 	if (kthread_prio != kthread_prio_in)
4419 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4420 			 kthread_prio, kthread_prio_in);
4421 
4422 	rcu_scheduler_fully_active = 1;
4423 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4424 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4425 		return 0;
4426 	if (kthread_prio) {
4427 		sp.sched_priority = kthread_prio;
4428 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4429 	}
4430 	rnp = rcu_get_root();
4431 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4432 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4433 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4434 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4435 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4436 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4437 	wake_up_process(t);
4438 	rcu_spawn_nocb_kthreads();
4439 	rcu_spawn_boost_kthreads();
4440 	rcu_spawn_core_kthreads();
4441 	return 0;
4442 }
4443 early_initcall(rcu_spawn_gp_kthread);
4444 
4445 /*
4446  * This function is invoked towards the end of the scheduler's
4447  * initialization process.  Before this is called, the idle task might
4448  * contain synchronous grace-period primitives (during which time, this idle
4449  * task is booting the system, and such primitives are no-ops).  After this
4450  * function is called, any synchronous grace-period primitives are run as
4451  * expedited, with the requesting task driving the grace period forward.
4452  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4453  * runtime RCU functionality.
4454  */
4455 void rcu_scheduler_starting(void)
4456 {
4457 	WARN_ON(num_online_cpus() != 1);
4458 	WARN_ON(nr_context_switches() > 0);
4459 	rcu_test_sync_prims();
4460 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4461 	rcu_test_sync_prims();
4462 }
4463 
4464 /*
4465  * Helper function for rcu_init() that initializes the rcu_state structure.
4466  */
4467 static void __init rcu_init_one(void)
4468 {
4469 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4470 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4471 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4472 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4473 
4474 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4475 	int cpustride = 1;
4476 	int i;
4477 	int j;
4478 	struct rcu_node *rnp;
4479 
4480 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4481 
4482 	/* Silence gcc 4.8 false positive about array index out of range. */
4483 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4484 		panic("rcu_init_one: rcu_num_lvls out of range");
4485 
4486 	/* Initialize the level-tracking arrays. */
4487 
4488 	for (i = 1; i < rcu_num_lvls; i++)
4489 		rcu_state.level[i] =
4490 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4491 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4492 
4493 	/* Initialize the elements themselves, starting from the leaves. */
4494 
4495 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4496 		cpustride *= levelspread[i];
4497 		rnp = rcu_state.level[i];
4498 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4499 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4500 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4501 						   &rcu_node_class[i], buf[i]);
4502 			raw_spin_lock_init(&rnp->fqslock);
4503 			lockdep_set_class_and_name(&rnp->fqslock,
4504 						   &rcu_fqs_class[i], fqs[i]);
4505 			rnp->gp_seq = rcu_state.gp_seq;
4506 			rnp->gp_seq_needed = rcu_state.gp_seq;
4507 			rnp->completedqs = rcu_state.gp_seq;
4508 			rnp->qsmask = 0;
4509 			rnp->qsmaskinit = 0;
4510 			rnp->grplo = j * cpustride;
4511 			rnp->grphi = (j + 1) * cpustride - 1;
4512 			if (rnp->grphi >= nr_cpu_ids)
4513 				rnp->grphi = nr_cpu_ids - 1;
4514 			if (i == 0) {
4515 				rnp->grpnum = 0;
4516 				rnp->grpmask = 0;
4517 				rnp->parent = NULL;
4518 			} else {
4519 				rnp->grpnum = j % levelspread[i - 1];
4520 				rnp->grpmask = BIT(rnp->grpnum);
4521 				rnp->parent = rcu_state.level[i - 1] +
4522 					      j / levelspread[i - 1];
4523 			}
4524 			rnp->level = i;
4525 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4526 			rcu_init_one_nocb(rnp);
4527 			init_waitqueue_head(&rnp->exp_wq[0]);
4528 			init_waitqueue_head(&rnp->exp_wq[1]);
4529 			init_waitqueue_head(&rnp->exp_wq[2]);
4530 			init_waitqueue_head(&rnp->exp_wq[3]);
4531 			spin_lock_init(&rnp->exp_lock);
4532 		}
4533 	}
4534 
4535 	init_swait_queue_head(&rcu_state.gp_wq);
4536 	init_swait_queue_head(&rcu_state.expedited_wq);
4537 	rnp = rcu_first_leaf_node();
4538 	for_each_possible_cpu(i) {
4539 		while (i > rnp->grphi)
4540 			rnp++;
4541 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4542 		rcu_boot_init_percpu_data(i);
4543 	}
4544 }
4545 
4546 /*
4547  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4548  * replace the definitions in tree.h because those are needed to size
4549  * the ->node array in the rcu_state structure.
4550  */
4551 void rcu_init_geometry(void)
4552 {
4553 	ulong d;
4554 	int i;
4555 	static unsigned long old_nr_cpu_ids;
4556 	int rcu_capacity[RCU_NUM_LVLS];
4557 	static bool initialized;
4558 
4559 	if (initialized) {
4560 		/*
4561 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4562 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4563 		 */
4564 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4565 		return;
4566 	}
4567 
4568 	old_nr_cpu_ids = nr_cpu_ids;
4569 	initialized = true;
4570 
4571 	/*
4572 	 * Initialize any unspecified boot parameters.
4573 	 * The default values of jiffies_till_first_fqs and
4574 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4575 	 * value, which is a function of HZ, then adding one for each
4576 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4577 	 */
4578 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4579 	if (jiffies_till_first_fqs == ULONG_MAX)
4580 		jiffies_till_first_fqs = d;
4581 	if (jiffies_till_next_fqs == ULONG_MAX)
4582 		jiffies_till_next_fqs = d;
4583 	adjust_jiffies_till_sched_qs();
4584 
4585 	/* If the compile-time values are accurate, just leave. */
4586 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4587 	    nr_cpu_ids == NR_CPUS)
4588 		return;
4589 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4590 		rcu_fanout_leaf, nr_cpu_ids);
4591 
4592 	/*
4593 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4594 	 * and cannot exceed the number of bits in the rcu_node masks.
4595 	 * Complain and fall back to the compile-time values if this
4596 	 * limit is exceeded.
4597 	 */
4598 	if (rcu_fanout_leaf < 2 ||
4599 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4600 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4601 		WARN_ON(1);
4602 		return;
4603 	}
4604 
4605 	/*
4606 	 * Compute number of nodes that can be handled an rcu_node tree
4607 	 * with the given number of levels.
4608 	 */
4609 	rcu_capacity[0] = rcu_fanout_leaf;
4610 	for (i = 1; i < RCU_NUM_LVLS; i++)
4611 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4612 
4613 	/*
4614 	 * The tree must be able to accommodate the configured number of CPUs.
4615 	 * If this limit is exceeded, fall back to the compile-time values.
4616 	 */
4617 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4618 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4619 		WARN_ON(1);
4620 		return;
4621 	}
4622 
4623 	/* Calculate the number of levels in the tree. */
4624 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4625 	}
4626 	rcu_num_lvls = i + 1;
4627 
4628 	/* Calculate the number of rcu_nodes at each level of the tree. */
4629 	for (i = 0; i < rcu_num_lvls; i++) {
4630 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4631 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4632 	}
4633 
4634 	/* Calculate the total number of rcu_node structures. */
4635 	rcu_num_nodes = 0;
4636 	for (i = 0; i < rcu_num_lvls; i++)
4637 		rcu_num_nodes += num_rcu_lvl[i];
4638 }
4639 
4640 /*
4641  * Dump out the structure of the rcu_node combining tree associated
4642  * with the rcu_state structure.
4643  */
4644 static void __init rcu_dump_rcu_node_tree(void)
4645 {
4646 	int level = 0;
4647 	struct rcu_node *rnp;
4648 
4649 	pr_info("rcu_node tree layout dump\n");
4650 	pr_info(" ");
4651 	rcu_for_each_node_breadth_first(rnp) {
4652 		if (rnp->level != level) {
4653 			pr_cont("\n");
4654 			pr_info(" ");
4655 			level = rnp->level;
4656 		}
4657 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4658 	}
4659 	pr_cont("\n");
4660 }
4661 
4662 struct workqueue_struct *rcu_gp_wq;
4663 struct workqueue_struct *rcu_par_gp_wq;
4664 
4665 static void __init kfree_rcu_batch_init(void)
4666 {
4667 	int cpu;
4668 	int i;
4669 
4670 	/* Clamp it to [0:100] seconds interval. */
4671 	if (rcu_delay_page_cache_fill_msec < 0 ||
4672 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4673 
4674 		rcu_delay_page_cache_fill_msec =
4675 			clamp(rcu_delay_page_cache_fill_msec, 0,
4676 				(int) (100 * MSEC_PER_SEC));
4677 
4678 		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4679 			rcu_delay_page_cache_fill_msec);
4680 	}
4681 
4682 	for_each_possible_cpu(cpu) {
4683 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4684 
4685 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4686 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4687 			krcp->krw_arr[i].krcp = krcp;
4688 		}
4689 
4690 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4691 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4692 		krcp->initialized = true;
4693 	}
4694 	if (register_shrinker(&kfree_rcu_shrinker))
4695 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4696 }
4697 
4698 void __init rcu_init(void)
4699 {
4700 	int cpu;
4701 
4702 	rcu_early_boot_tests();
4703 
4704 	kfree_rcu_batch_init();
4705 	rcu_bootup_announce();
4706 	rcu_init_geometry();
4707 	rcu_init_one();
4708 	if (dump_tree)
4709 		rcu_dump_rcu_node_tree();
4710 	if (use_softirq)
4711 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4712 
4713 	/*
4714 	 * We don't need protection against CPU-hotplug here because
4715 	 * this is called early in boot, before either interrupts
4716 	 * or the scheduler are operational.
4717 	 */
4718 	pm_notifier(rcu_pm_notify, 0);
4719 	for_each_online_cpu(cpu) {
4720 		rcutree_prepare_cpu(cpu);
4721 		rcu_cpu_starting(cpu);
4722 		rcutree_online_cpu(cpu);
4723 	}
4724 
4725 	/* Create workqueue for Tree SRCU and for expedited GPs. */
4726 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4727 	WARN_ON(!rcu_gp_wq);
4728 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4729 	WARN_ON(!rcu_par_gp_wq);
4730 
4731 	/* Fill in default value for rcutree.qovld boot parameter. */
4732 	/* -After- the rcu_node ->lock fields are initialized! */
4733 	if (qovld < 0)
4734 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4735 	else
4736 		qovld_calc = qovld;
4737 }
4738 
4739 #include "tree_stall.h"
4740 #include "tree_exp.h"
4741 #include "tree_nocb.h"
4742 #include "tree_plugin.h"
4743