xref: /openbmc/linux/kernel/rcu/tree.c (revision 6a143a7c)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/percpu.h>
36 #include <linux/notifier.h>
37 #include <linux/cpu.h>
38 #include <linux/mutex.h>
39 #include <linux/time.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/wait.h>
42 #include <linux/kthread.h>
43 #include <uapi/linux/sched/types.h>
44 #include <linux/prefetch.h>
45 #include <linux/delay.h>
46 #include <linux/random.h>
47 #include <linux/trace_events.h>
48 #include <linux/suspend.h>
49 #include <linux/ftrace.h>
50 #include <linux/tick.h>
51 #include <linux/sysrq.h>
52 #include <linux/kprobes.h>
53 #include <linux/gfp.h>
54 #include <linux/oom.h>
55 #include <linux/smpboot.h>
56 #include <linux/jiffies.h>
57 #include <linux/slab.h>
58 #include <linux/sched/isolation.h>
59 #include <linux/sched/clock.h>
60 #include <linux/vmalloc.h>
61 #include <linux/mm.h>
62 #include <linux/kasan.h>
63 #include "../time/tick-internal.h"
64 
65 #include "tree.h"
66 #include "rcu.h"
67 
68 #ifdef MODULE_PARAM_PREFIX
69 #undef MODULE_PARAM_PREFIX
70 #endif
71 #define MODULE_PARAM_PREFIX "rcutree."
72 
73 /* Data structures. */
74 
75 /*
76  * Steal a bit from the bottom of ->dynticks for idle entry/exit
77  * control.  Initially this is for TLB flushing.
78  */
79 #define RCU_DYNTICK_CTRL_MASK 0x1
80 #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
81 
82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 	.dynticks_nesting = 1,
84 	.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
86 #ifdef CONFIG_RCU_NOCB_CPU
87 	.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
88 #endif
89 };
90 static struct rcu_state rcu_state = {
91 	.level = { &rcu_state.node[0] },
92 	.gp_state = RCU_GP_IDLE,
93 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
94 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
95 	.name = RCU_NAME,
96 	.abbr = RCU_ABBR,
97 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
98 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
99 	.ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
100 };
101 
102 /* Dump rcu_node combining tree at boot to verify correct setup. */
103 static bool dump_tree;
104 module_param(dump_tree, bool, 0444);
105 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
106 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
107 #ifndef CONFIG_PREEMPT_RT
108 module_param(use_softirq, bool, 0444);
109 #endif
110 /* Control rcu_node-tree auto-balancing at boot time. */
111 static bool rcu_fanout_exact;
112 module_param(rcu_fanout_exact, bool, 0444);
113 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
114 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
115 module_param(rcu_fanout_leaf, int, 0444);
116 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
117 /* Number of rcu_nodes at specified level. */
118 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
119 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
120 
121 /*
122  * The rcu_scheduler_active variable is initialized to the value
123  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
124  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
125  * RCU can assume that there is but one task, allowing RCU to (for example)
126  * optimize synchronize_rcu() to a simple barrier().  When this variable
127  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
128  * to detect real grace periods.  This variable is also used to suppress
129  * boot-time false positives from lockdep-RCU error checking.  Finally, it
130  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
131  * is fully initialized, including all of its kthreads having been spawned.
132  */
133 int rcu_scheduler_active __read_mostly;
134 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
135 
136 /*
137  * The rcu_scheduler_fully_active variable transitions from zero to one
138  * during the early_initcall() processing, which is after the scheduler
139  * is capable of creating new tasks.  So RCU processing (for example,
140  * creating tasks for RCU priority boosting) must be delayed until after
141  * rcu_scheduler_fully_active transitions from zero to one.  We also
142  * currently delay invocation of any RCU callbacks until after this point.
143  *
144  * It might later prove better for people registering RCU callbacks during
145  * early boot to take responsibility for these callbacks, but one step at
146  * a time.
147  */
148 static int rcu_scheduler_fully_active __read_mostly;
149 
150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
151 			      unsigned long gps, unsigned long flags);
152 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
153 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
154 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
155 static void invoke_rcu_core(void);
156 static void rcu_report_exp_rdp(struct rcu_data *rdp);
157 static void sync_sched_exp_online_cleanup(int cpu);
158 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
159 
160 /* rcuc/rcub kthread realtime priority */
161 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
162 module_param(kthread_prio, int, 0444);
163 
164 /* Delay in jiffies for grace-period initialization delays, debug only. */
165 
166 static int gp_preinit_delay;
167 module_param(gp_preinit_delay, int, 0444);
168 static int gp_init_delay;
169 module_param(gp_init_delay, int, 0444);
170 static int gp_cleanup_delay;
171 module_param(gp_cleanup_delay, int, 0444);
172 
173 // Add delay to rcu_read_unlock() for strict grace periods.
174 static int rcu_unlock_delay;
175 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
176 module_param(rcu_unlock_delay, int, 0444);
177 #endif
178 
179 /*
180  * This rcu parameter is runtime-read-only. It reflects
181  * a minimum allowed number of objects which can be cached
182  * per-CPU. Object size is equal to one page. This value
183  * can be changed at boot time.
184  */
185 static int rcu_min_cached_objs = 5;
186 module_param(rcu_min_cached_objs, int, 0444);
187 
188 /* Retrieve RCU kthreads priority for rcutorture */
189 int rcu_get_gp_kthreads_prio(void)
190 {
191 	return kthread_prio;
192 }
193 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
194 
195 /*
196  * Number of grace periods between delays, normalized by the duration of
197  * the delay.  The longer the delay, the more the grace periods between
198  * each delay.  The reason for this normalization is that it means that,
199  * for non-zero delays, the overall slowdown of grace periods is constant
200  * regardless of the duration of the delay.  This arrangement balances
201  * the need for long delays to increase some race probabilities with the
202  * need for fast grace periods to increase other race probabilities.
203  */
204 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
205 
206 /*
207  * Compute the mask of online CPUs for the specified rcu_node structure.
208  * This will not be stable unless the rcu_node structure's ->lock is
209  * held, but the bit corresponding to the current CPU will be stable
210  * in most contexts.
211  */
212 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
213 {
214 	return READ_ONCE(rnp->qsmaskinitnext);
215 }
216 
217 /*
218  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
219  * permit this function to be invoked without holding the root rcu_node
220  * structure's ->lock, but of course results can be subject to change.
221  */
222 static int rcu_gp_in_progress(void)
223 {
224 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
225 }
226 
227 /*
228  * Return the number of callbacks queued on the specified CPU.
229  * Handles both the nocbs and normal cases.
230  */
231 static long rcu_get_n_cbs_cpu(int cpu)
232 {
233 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
234 
235 	if (rcu_segcblist_is_enabled(&rdp->cblist))
236 		return rcu_segcblist_n_cbs(&rdp->cblist);
237 	return 0;
238 }
239 
240 void rcu_softirq_qs(void)
241 {
242 	rcu_qs();
243 	rcu_preempt_deferred_qs(current);
244 }
245 
246 /*
247  * Record entry into an extended quiescent state.  This is only to be
248  * called when not already in an extended quiescent state, that is,
249  * RCU is watching prior to the call to this function and is no longer
250  * watching upon return.
251  */
252 static noinstr void rcu_dynticks_eqs_enter(void)
253 {
254 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
255 	int seq;
256 
257 	/*
258 	 * CPUs seeing atomic_add_return() must see prior RCU read-side
259 	 * critical sections, and we also must force ordering with the
260 	 * next idle sojourn.
261 	 */
262 	rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
263 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
264 	// RCU is no longer watching.  Better be in extended quiescent state!
265 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
266 		     (seq & RCU_DYNTICK_CTRL_CTR));
267 	/* Better not have special action (TLB flush) pending! */
268 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
269 		     (seq & RCU_DYNTICK_CTRL_MASK));
270 }
271 
272 /*
273  * Record exit from an extended quiescent state.  This is only to be
274  * called from an extended quiescent state, that is, RCU is not watching
275  * prior to the call to this function and is watching upon return.
276  */
277 static noinstr void rcu_dynticks_eqs_exit(void)
278 {
279 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
280 	int seq;
281 
282 	/*
283 	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
284 	 * and we also must force ordering with the next RCU read-side
285 	 * critical section.
286 	 */
287 	seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
288 	// RCU is now watching.  Better not be in an extended quiescent state!
289 	rcu_dynticks_task_trace_exit();  // After ->dynticks update!
290 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
291 		     !(seq & RCU_DYNTICK_CTRL_CTR));
292 	if (seq & RCU_DYNTICK_CTRL_MASK) {
293 		arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
294 		smp_mb__after_atomic(); /* _exit after clearing mask. */
295 	}
296 }
297 
298 /*
299  * Reset the current CPU's ->dynticks counter to indicate that the
300  * newly onlined CPU is no longer in an extended quiescent state.
301  * This will either leave the counter unchanged, or increment it
302  * to the next non-quiescent value.
303  *
304  * The non-atomic test/increment sequence works because the upper bits
305  * of the ->dynticks counter are manipulated only by the corresponding CPU,
306  * or when the corresponding CPU is offline.
307  */
308 static void rcu_dynticks_eqs_online(void)
309 {
310 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
311 
312 	if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
313 		return;
314 	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
315 }
316 
317 /*
318  * Is the current CPU in an extended quiescent state?
319  *
320  * No ordering, as we are sampling CPU-local information.
321  */
322 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
323 {
324 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
325 
326 	return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
327 }
328 
329 /*
330  * Snapshot the ->dynticks counter with full ordering so as to allow
331  * stable comparison of this counter with past and future snapshots.
332  */
333 static int rcu_dynticks_snap(struct rcu_data *rdp)
334 {
335 	int snap = atomic_add_return(0, &rdp->dynticks);
336 
337 	return snap & ~RCU_DYNTICK_CTRL_MASK;
338 }
339 
340 /*
341  * Return true if the snapshot returned from rcu_dynticks_snap()
342  * indicates that RCU is in an extended quiescent state.
343  */
344 static bool rcu_dynticks_in_eqs(int snap)
345 {
346 	return !(snap & RCU_DYNTICK_CTRL_CTR);
347 }
348 
349 /* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
350 bool rcu_is_idle_cpu(int cpu)
351 {
352 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
353 
354 	return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
355 }
356 
357 /*
358  * Return true if the CPU corresponding to the specified rcu_data
359  * structure has spent some time in an extended quiescent state since
360  * rcu_dynticks_snap() returned the specified snapshot.
361  */
362 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
363 {
364 	return snap != rcu_dynticks_snap(rdp);
365 }
366 
367 /*
368  * Return true if the referenced integer is zero while the specified
369  * CPU remains within a single extended quiescent state.
370  */
371 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
372 {
373 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
374 	int snap;
375 
376 	// If not quiescent, force back to earlier extended quiescent state.
377 	snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
378 					       RCU_DYNTICK_CTRL_CTR);
379 
380 	smp_rmb(); // Order ->dynticks and *vp reads.
381 	if (READ_ONCE(*vp))
382 		return false;  // Non-zero, so report failure;
383 	smp_rmb(); // Order *vp read and ->dynticks re-read.
384 
385 	// If still in the same extended quiescent state, we are good!
386 	return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
387 }
388 
389 /*
390  * Set the special (bottom) bit of the specified CPU so that it
391  * will take special action (such as flushing its TLB) on the
392  * next exit from an extended quiescent state.  Returns true if
393  * the bit was successfully set, or false if the CPU was not in
394  * an extended quiescent state.
395  */
396 bool rcu_eqs_special_set(int cpu)
397 {
398 	int old;
399 	int new;
400 	int new_old;
401 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
402 
403 	new_old = atomic_read(&rdp->dynticks);
404 	do {
405 		old = new_old;
406 		if (old & RCU_DYNTICK_CTRL_CTR)
407 			return false;
408 		new = old | RCU_DYNTICK_CTRL_MASK;
409 		new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
410 	} while (new_old != old);
411 	return true;
412 }
413 
414 /*
415  * Let the RCU core know that this CPU has gone through the scheduler,
416  * which is a quiescent state.  This is called when the need for a
417  * quiescent state is urgent, so we burn an atomic operation and full
418  * memory barriers to let the RCU core know about it, regardless of what
419  * this CPU might (or might not) do in the near future.
420  *
421  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
422  *
423  * The caller must have disabled interrupts and must not be idle.
424  */
425 notrace void rcu_momentary_dyntick_idle(void)
426 {
427 	int special;
428 
429 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
430 	special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
431 				    &this_cpu_ptr(&rcu_data)->dynticks);
432 	/* It is illegal to call this from idle state. */
433 	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
434 	rcu_preempt_deferred_qs(current);
435 }
436 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
437 
438 /**
439  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
440  *
441  * If the current CPU is idle and running at a first-level (not nested)
442  * interrupt, or directly, from idle, return true.
443  *
444  * The caller must have at least disabled IRQs.
445  */
446 static int rcu_is_cpu_rrupt_from_idle(void)
447 {
448 	long nesting;
449 
450 	/*
451 	 * Usually called from the tick; but also used from smp_function_call()
452 	 * for expedited grace periods. This latter can result in running from
453 	 * the idle task, instead of an actual IPI.
454 	 */
455 	lockdep_assert_irqs_disabled();
456 
457 	/* Check for counter underflows */
458 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
459 			 "RCU dynticks_nesting counter underflow!");
460 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
461 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
462 
463 	/* Are we at first interrupt nesting level? */
464 	nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
465 	if (nesting > 1)
466 		return false;
467 
468 	/*
469 	 * If we're not in an interrupt, we must be in the idle task!
470 	 */
471 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
472 
473 	/* Does CPU appear to be idle from an RCU standpoint? */
474 	return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
475 }
476 
477 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
478 				// Maximum callbacks per rcu_do_batch ...
479 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
480 static long blimit = DEFAULT_RCU_BLIMIT;
481 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
482 static long qhimark = DEFAULT_RCU_QHIMARK;
483 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
484 static long qlowmark = DEFAULT_RCU_QLOMARK;
485 #define DEFAULT_RCU_QOVLD_MULT 2
486 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
487 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
488 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
489 
490 module_param(blimit, long, 0444);
491 module_param(qhimark, long, 0444);
492 module_param(qlowmark, long, 0444);
493 module_param(qovld, long, 0444);
494 
495 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
496 static ulong jiffies_till_next_fqs = ULONG_MAX;
497 static bool rcu_kick_kthreads;
498 static int rcu_divisor = 7;
499 module_param(rcu_divisor, int, 0644);
500 
501 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
502 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
503 module_param(rcu_resched_ns, long, 0644);
504 
505 /*
506  * How long the grace period must be before we start recruiting
507  * quiescent-state help from rcu_note_context_switch().
508  */
509 static ulong jiffies_till_sched_qs = ULONG_MAX;
510 module_param(jiffies_till_sched_qs, ulong, 0444);
511 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
512 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
513 
514 /*
515  * Make sure that we give the grace-period kthread time to detect any
516  * idle CPUs before taking active measures to force quiescent states.
517  * However, don't go below 100 milliseconds, adjusted upwards for really
518  * large systems.
519  */
520 static void adjust_jiffies_till_sched_qs(void)
521 {
522 	unsigned long j;
523 
524 	/* If jiffies_till_sched_qs was specified, respect the request. */
525 	if (jiffies_till_sched_qs != ULONG_MAX) {
526 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
527 		return;
528 	}
529 	/* Otherwise, set to third fqs scan, but bound below on large system. */
530 	j = READ_ONCE(jiffies_till_first_fqs) +
531 		      2 * READ_ONCE(jiffies_till_next_fqs);
532 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
533 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
534 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
535 	WRITE_ONCE(jiffies_to_sched_qs, j);
536 }
537 
538 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
539 {
540 	ulong j;
541 	int ret = kstrtoul(val, 0, &j);
542 
543 	if (!ret) {
544 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
545 		adjust_jiffies_till_sched_qs();
546 	}
547 	return ret;
548 }
549 
550 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
551 {
552 	ulong j;
553 	int ret = kstrtoul(val, 0, &j);
554 
555 	if (!ret) {
556 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
557 		adjust_jiffies_till_sched_qs();
558 	}
559 	return ret;
560 }
561 
562 static const struct kernel_param_ops first_fqs_jiffies_ops = {
563 	.set = param_set_first_fqs_jiffies,
564 	.get = param_get_ulong,
565 };
566 
567 static const struct kernel_param_ops next_fqs_jiffies_ops = {
568 	.set = param_set_next_fqs_jiffies,
569 	.get = param_get_ulong,
570 };
571 
572 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
573 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
574 module_param(rcu_kick_kthreads, bool, 0644);
575 
576 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
577 static int rcu_pending(int user);
578 
579 /*
580  * Return the number of RCU GPs completed thus far for debug & stats.
581  */
582 unsigned long rcu_get_gp_seq(void)
583 {
584 	return READ_ONCE(rcu_state.gp_seq);
585 }
586 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
587 
588 /*
589  * Return the number of RCU expedited batches completed thus far for
590  * debug & stats.  Odd numbers mean that a batch is in progress, even
591  * numbers mean idle.  The value returned will thus be roughly double
592  * the cumulative batches since boot.
593  */
594 unsigned long rcu_exp_batches_completed(void)
595 {
596 	return rcu_state.expedited_sequence;
597 }
598 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
599 
600 /*
601  * Return the root node of the rcu_state structure.
602  */
603 static struct rcu_node *rcu_get_root(void)
604 {
605 	return &rcu_state.node[0];
606 }
607 
608 /*
609  * Send along grace-period-related data for rcutorture diagnostics.
610  */
611 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
612 			    unsigned long *gp_seq)
613 {
614 	switch (test_type) {
615 	case RCU_FLAVOR:
616 		*flags = READ_ONCE(rcu_state.gp_flags);
617 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
618 		break;
619 	default:
620 		break;
621 	}
622 }
623 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
624 
625 /*
626  * Enter an RCU extended quiescent state, which can be either the
627  * idle loop or adaptive-tickless usermode execution.
628  *
629  * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
630  * the possibility of usermode upcalls having messed up our count
631  * of interrupt nesting level during the prior busy period.
632  */
633 static noinstr void rcu_eqs_enter(bool user)
634 {
635 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
636 
637 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
638 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
639 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
640 		     rdp->dynticks_nesting == 0);
641 	if (rdp->dynticks_nesting != 1) {
642 		// RCU will still be watching, so just do accounting and leave.
643 		rdp->dynticks_nesting--;
644 		return;
645 	}
646 
647 	lockdep_assert_irqs_disabled();
648 	instrumentation_begin();
649 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
650 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
651 	rdp = this_cpu_ptr(&rcu_data);
652 	rcu_prepare_for_idle();
653 	rcu_preempt_deferred_qs(current);
654 
655 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
656 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
657 
658 	instrumentation_end();
659 	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
660 	// RCU is watching here ...
661 	rcu_dynticks_eqs_enter();
662 	// ... but is no longer watching here.
663 	rcu_dynticks_task_enter();
664 }
665 
666 /**
667  * rcu_idle_enter - inform RCU that current CPU is entering idle
668  *
669  * Enter idle mode, in other words, -leave- the mode in which RCU
670  * read-side critical sections can occur.  (Though RCU read-side
671  * critical sections can occur in irq handlers in idle, a possibility
672  * handled by irq_enter() and irq_exit().)
673  *
674  * If you add or remove a call to rcu_idle_enter(), be sure to test with
675  * CONFIG_RCU_EQS_DEBUG=y.
676  */
677 void rcu_idle_enter(void)
678 {
679 	lockdep_assert_irqs_disabled();
680 	rcu_eqs_enter(false);
681 }
682 EXPORT_SYMBOL_GPL(rcu_idle_enter);
683 
684 #ifdef CONFIG_NO_HZ_FULL
685 
686 #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
687 /*
688  * An empty function that will trigger a reschedule on
689  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
690  */
691 static void late_wakeup_func(struct irq_work *work)
692 {
693 }
694 
695 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
696 	IRQ_WORK_INIT(late_wakeup_func);
697 
698 /*
699  * If either:
700  *
701  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
702  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
703  *
704  * In these cases the late RCU wake ups aren't supported in the resched loops and our
705  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
706  * get re-enabled again.
707  */
708 noinstr static void rcu_irq_work_resched(void)
709 {
710 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
711 
712 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
713 		return;
714 
715 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
716 		return;
717 
718 	instrumentation_begin();
719 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
720 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
721 	}
722 	instrumentation_end();
723 }
724 
725 #else
726 static inline void rcu_irq_work_resched(void) { }
727 #endif
728 
729 /**
730  * rcu_user_enter - inform RCU that we are resuming userspace.
731  *
732  * Enter RCU idle mode right before resuming userspace.  No use of RCU
733  * is permitted between this call and rcu_user_exit(). This way the
734  * CPU doesn't need to maintain the tick for RCU maintenance purposes
735  * when the CPU runs in userspace.
736  *
737  * If you add or remove a call to rcu_user_enter(), be sure to test with
738  * CONFIG_RCU_EQS_DEBUG=y.
739  */
740 noinstr void rcu_user_enter(void)
741 {
742 	lockdep_assert_irqs_disabled();
743 
744 	/*
745 	 * Other than generic entry implementation, we may be past the last
746 	 * rescheduling opportunity in the entry code. Trigger a self IPI
747 	 * that will fire and reschedule once we resume in user/guest mode.
748 	 */
749 	rcu_irq_work_resched();
750 	rcu_eqs_enter(true);
751 }
752 
753 #endif /* CONFIG_NO_HZ_FULL */
754 
755 /**
756  * rcu_nmi_exit - inform RCU of exit from NMI context
757  *
758  * If we are returning from the outermost NMI handler that interrupted an
759  * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
760  * to let the RCU grace-period handling know that the CPU is back to
761  * being RCU-idle.
762  *
763  * If you add or remove a call to rcu_nmi_exit(), be sure to test
764  * with CONFIG_RCU_EQS_DEBUG=y.
765  */
766 noinstr void rcu_nmi_exit(void)
767 {
768 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
769 
770 	instrumentation_begin();
771 	/*
772 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
773 	 * (We are exiting an NMI handler, so RCU better be paying attention
774 	 * to us!)
775 	 */
776 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
777 	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
778 
779 	/*
780 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
781 	 * leave it in non-RCU-idle state.
782 	 */
783 	if (rdp->dynticks_nmi_nesting != 1) {
784 		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
785 				  atomic_read(&rdp->dynticks));
786 		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
787 			   rdp->dynticks_nmi_nesting - 2);
788 		instrumentation_end();
789 		return;
790 	}
791 
792 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
793 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
794 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
795 
796 	if (!in_nmi())
797 		rcu_prepare_for_idle();
798 
799 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
800 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
801 	instrumentation_end();
802 
803 	// RCU is watching here ...
804 	rcu_dynticks_eqs_enter();
805 	// ... but is no longer watching here.
806 
807 	if (!in_nmi())
808 		rcu_dynticks_task_enter();
809 }
810 
811 /**
812  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
813  *
814  * Exit from an interrupt handler, which might possibly result in entering
815  * idle mode, in other words, leaving the mode in which read-side critical
816  * sections can occur.  The caller must have disabled interrupts.
817  *
818  * This code assumes that the idle loop never does anything that might
819  * result in unbalanced calls to irq_enter() and irq_exit().  If your
820  * architecture's idle loop violates this assumption, RCU will give you what
821  * you deserve, good and hard.  But very infrequently and irreproducibly.
822  *
823  * Use things like work queues to work around this limitation.
824  *
825  * You have been warned.
826  *
827  * If you add or remove a call to rcu_irq_exit(), be sure to test with
828  * CONFIG_RCU_EQS_DEBUG=y.
829  */
830 void noinstr rcu_irq_exit(void)
831 {
832 	lockdep_assert_irqs_disabled();
833 	rcu_nmi_exit();
834 }
835 
836 /**
837  * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq
838  *			  towards in kernel preemption
839  *
840  * Same as rcu_irq_exit() but has a sanity check that scheduling is safe
841  * from RCU point of view. Invoked from return from interrupt before kernel
842  * preemption.
843  */
844 void rcu_irq_exit_preempt(void)
845 {
846 	lockdep_assert_irqs_disabled();
847 	rcu_nmi_exit();
848 
849 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
850 			 "RCU dynticks_nesting counter underflow/zero!");
851 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
852 			 DYNTICK_IRQ_NONIDLE,
853 			 "Bad RCU  dynticks_nmi_nesting counter\n");
854 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
855 			 "RCU in extended quiescent state!");
856 }
857 
858 #ifdef CONFIG_PROVE_RCU
859 /**
860  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
861  */
862 void rcu_irq_exit_check_preempt(void)
863 {
864 	lockdep_assert_irqs_disabled();
865 
866 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
867 			 "RCU dynticks_nesting counter underflow/zero!");
868 	RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
869 			 DYNTICK_IRQ_NONIDLE,
870 			 "Bad RCU  dynticks_nmi_nesting counter\n");
871 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
872 			 "RCU in extended quiescent state!");
873 }
874 #endif /* #ifdef CONFIG_PROVE_RCU */
875 
876 /*
877  * Wrapper for rcu_irq_exit() where interrupts are enabled.
878  *
879  * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
880  * with CONFIG_RCU_EQS_DEBUG=y.
881  */
882 void rcu_irq_exit_irqson(void)
883 {
884 	unsigned long flags;
885 
886 	local_irq_save(flags);
887 	rcu_irq_exit();
888 	local_irq_restore(flags);
889 }
890 
891 /*
892  * Exit an RCU extended quiescent state, which can be either the
893  * idle loop or adaptive-tickless usermode execution.
894  *
895  * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
896  * allow for the possibility of usermode upcalls messing up our count of
897  * interrupt nesting level during the busy period that is just now starting.
898  */
899 static void noinstr rcu_eqs_exit(bool user)
900 {
901 	struct rcu_data *rdp;
902 	long oldval;
903 
904 	lockdep_assert_irqs_disabled();
905 	rdp = this_cpu_ptr(&rcu_data);
906 	oldval = rdp->dynticks_nesting;
907 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
908 	if (oldval) {
909 		// RCU was already watching, so just do accounting and leave.
910 		rdp->dynticks_nesting++;
911 		return;
912 	}
913 	rcu_dynticks_task_exit();
914 	// RCU is not watching here ...
915 	rcu_dynticks_eqs_exit();
916 	// ... but is watching here.
917 	instrumentation_begin();
918 
919 	// instrumentation for the noinstr rcu_dynticks_eqs_exit()
920 	instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
921 
922 	rcu_cleanup_after_idle();
923 	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
924 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
925 	WRITE_ONCE(rdp->dynticks_nesting, 1);
926 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
927 	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
928 	instrumentation_end();
929 }
930 
931 /**
932  * rcu_idle_exit - inform RCU that current CPU is leaving idle
933  *
934  * Exit idle mode, in other words, -enter- the mode in which RCU
935  * read-side critical sections can occur.
936  *
937  * If you add or remove a call to rcu_idle_exit(), be sure to test with
938  * CONFIG_RCU_EQS_DEBUG=y.
939  */
940 void rcu_idle_exit(void)
941 {
942 	unsigned long flags;
943 
944 	local_irq_save(flags);
945 	rcu_eqs_exit(false);
946 	local_irq_restore(flags);
947 }
948 EXPORT_SYMBOL_GPL(rcu_idle_exit);
949 
950 #ifdef CONFIG_NO_HZ_FULL
951 /**
952  * rcu_user_exit - inform RCU that we are exiting userspace.
953  *
954  * Exit RCU idle mode while entering the kernel because it can
955  * run a RCU read side critical section anytime.
956  *
957  * If you add or remove a call to rcu_user_exit(), be sure to test with
958  * CONFIG_RCU_EQS_DEBUG=y.
959  */
960 void noinstr rcu_user_exit(void)
961 {
962 	rcu_eqs_exit(1);
963 }
964 
965 /**
966  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
967  *
968  * The scheduler tick is not normally enabled when CPUs enter the kernel
969  * from nohz_full userspace execution.  After all, nohz_full userspace
970  * execution is an RCU quiescent state and the time executing in the kernel
971  * is quite short.  Except of course when it isn't.  And it is not hard to
972  * cause a large system to spend tens of seconds or even minutes looping
973  * in the kernel, which can cause a number of problems, include RCU CPU
974  * stall warnings.
975  *
976  * Therefore, if a nohz_full CPU fails to report a quiescent state
977  * in a timely manner, the RCU grace-period kthread sets that CPU's
978  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
979  * exception will invoke this function, which will turn on the scheduler
980  * tick, which will enable RCU to detect that CPU's quiescent states,
981  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
982  * The tick will be disabled once a quiescent state is reported for
983  * this CPU.
984  *
985  * Of course, in carefully tuned systems, there might never be an
986  * interrupt or exception.  In that case, the RCU grace-period kthread
987  * will eventually cause one to happen.  However, in less carefully
988  * controlled environments, this function allows RCU to get what it
989  * needs without creating otherwise useless interruptions.
990  */
991 void __rcu_irq_enter_check_tick(void)
992 {
993 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
994 
995 	// If we're here from NMI there's nothing to do.
996 	if (in_nmi())
997 		return;
998 
999 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
1000 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
1001 
1002 	if (!tick_nohz_full_cpu(rdp->cpu) ||
1003 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
1004 	    READ_ONCE(rdp->rcu_forced_tick)) {
1005 		// RCU doesn't need nohz_full help from this CPU, or it is
1006 		// already getting that help.
1007 		return;
1008 	}
1009 
1010 	// We get here only when not in an extended quiescent state and
1011 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
1012 	// already watching and (2) The fact that we are in an interrupt
1013 	// handler and that the rcu_node lock is an irq-disabled lock
1014 	// prevents self-deadlock.  So we can safely recheck under the lock.
1015 	// Note that the nohz_full state currently cannot change.
1016 	raw_spin_lock_rcu_node(rdp->mynode);
1017 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
1018 		// A nohz_full CPU is in the kernel and RCU needs a
1019 		// quiescent state.  Turn on the tick!
1020 		WRITE_ONCE(rdp->rcu_forced_tick, true);
1021 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1022 	}
1023 	raw_spin_unlock_rcu_node(rdp->mynode);
1024 }
1025 #endif /* CONFIG_NO_HZ_FULL */
1026 
1027 /**
1028  * rcu_nmi_enter - inform RCU of entry to NMI context
1029  *
1030  * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
1031  * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
1032  * that the CPU is active.  This implementation permits nested NMIs, as
1033  * long as the nesting level does not overflow an int.  (You will probably
1034  * run out of stack space first.)
1035  *
1036  * If you add or remove a call to rcu_nmi_enter(), be sure to test
1037  * with CONFIG_RCU_EQS_DEBUG=y.
1038  */
1039 noinstr void rcu_nmi_enter(void)
1040 {
1041 	long incby = 2;
1042 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1043 
1044 	/* Complain about underflow. */
1045 	WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
1046 
1047 	/*
1048 	 * If idle from RCU viewpoint, atomically increment ->dynticks
1049 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
1050 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
1051 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
1052 	 * to be in the outermost NMI handler that interrupted an RCU-idle
1053 	 * period (observation due to Andy Lutomirski).
1054 	 */
1055 	if (rcu_dynticks_curr_cpu_in_eqs()) {
1056 
1057 		if (!in_nmi())
1058 			rcu_dynticks_task_exit();
1059 
1060 		// RCU is not watching here ...
1061 		rcu_dynticks_eqs_exit();
1062 		// ... but is watching here.
1063 
1064 		if (!in_nmi()) {
1065 			instrumentation_begin();
1066 			rcu_cleanup_after_idle();
1067 			instrumentation_end();
1068 		}
1069 
1070 		instrumentation_begin();
1071 		// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1072 		instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1073 		// instrumentation for the noinstr rcu_dynticks_eqs_exit()
1074 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1075 
1076 		incby = 1;
1077 	} else if (!in_nmi()) {
1078 		instrumentation_begin();
1079 		rcu_irq_enter_check_tick();
1080 		instrumentation_end();
1081 	} else  {
1082 		instrumentation_begin();
1083 	}
1084 
1085 	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1086 			  rdp->dynticks_nmi_nesting,
1087 			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1088 	instrumentation_end();
1089 	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1090 		   rdp->dynticks_nmi_nesting + incby);
1091 	barrier();
1092 }
1093 
1094 /**
1095  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1096  *
1097  * Enter an interrupt handler, which might possibly result in exiting
1098  * idle mode, in other words, entering the mode in which read-side critical
1099  * sections can occur.  The caller must have disabled interrupts.
1100  *
1101  * Note that the Linux kernel is fully capable of entering an interrupt
1102  * handler that it never exits, for example when doing upcalls to user mode!
1103  * This code assumes that the idle loop never does upcalls to user mode.
1104  * If your architecture's idle loop does do upcalls to user mode (or does
1105  * anything else that results in unbalanced calls to the irq_enter() and
1106  * irq_exit() functions), RCU will give you what you deserve, good and hard.
1107  * But very infrequently and irreproducibly.
1108  *
1109  * Use things like work queues to work around this limitation.
1110  *
1111  * You have been warned.
1112  *
1113  * If you add or remove a call to rcu_irq_enter(), be sure to test with
1114  * CONFIG_RCU_EQS_DEBUG=y.
1115  */
1116 noinstr void rcu_irq_enter(void)
1117 {
1118 	lockdep_assert_irqs_disabled();
1119 	rcu_nmi_enter();
1120 }
1121 
1122 /*
1123  * Wrapper for rcu_irq_enter() where interrupts are enabled.
1124  *
1125  * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1126  * with CONFIG_RCU_EQS_DEBUG=y.
1127  */
1128 void rcu_irq_enter_irqson(void)
1129 {
1130 	unsigned long flags;
1131 
1132 	local_irq_save(flags);
1133 	rcu_irq_enter();
1134 	local_irq_restore(flags);
1135 }
1136 
1137 /*
1138  * If any sort of urgency was applied to the current CPU (for example,
1139  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1140  * to get to a quiescent state, disable it.
1141  */
1142 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1143 {
1144 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
1145 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
1146 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1147 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1148 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1149 		WRITE_ONCE(rdp->rcu_forced_tick, false);
1150 	}
1151 }
1152 
1153 /**
1154  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1155  *
1156  * Return true if RCU is watching the running CPU, which means that this
1157  * CPU can safely enter RCU read-side critical sections.  In other words,
1158  * if the current CPU is not in its idle loop or is in an interrupt or
1159  * NMI handler, return true.
1160  *
1161  * Make notrace because it can be called by the internal functions of
1162  * ftrace, and making this notrace removes unnecessary recursion calls.
1163  */
1164 notrace bool rcu_is_watching(void)
1165 {
1166 	bool ret;
1167 
1168 	preempt_disable_notrace();
1169 	ret = !rcu_dynticks_curr_cpu_in_eqs();
1170 	preempt_enable_notrace();
1171 	return ret;
1172 }
1173 EXPORT_SYMBOL_GPL(rcu_is_watching);
1174 
1175 /*
1176  * If a holdout task is actually running, request an urgent quiescent
1177  * state from its CPU.  This is unsynchronized, so migrations can cause
1178  * the request to go to the wrong CPU.  Which is OK, all that will happen
1179  * is that the CPU's next context switch will be a bit slower and next
1180  * time around this task will generate another request.
1181  */
1182 void rcu_request_urgent_qs_task(struct task_struct *t)
1183 {
1184 	int cpu;
1185 
1186 	barrier();
1187 	cpu = task_cpu(t);
1188 	if (!task_curr(t))
1189 		return; /* This task is not running on that CPU. */
1190 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1191 }
1192 
1193 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1194 
1195 /*
1196  * Is the current CPU online as far as RCU is concerned?
1197  *
1198  * Disable preemption to avoid false positives that could otherwise
1199  * happen due to the current CPU number being sampled, this task being
1200  * preempted, its old CPU being taken offline, resuming on some other CPU,
1201  * then determining that its old CPU is now offline.
1202  *
1203  * Disable checking if in an NMI handler because we cannot safely
1204  * report errors from NMI handlers anyway.  In addition, it is OK to use
1205  * RCU on an offline processor during initial boot, hence the check for
1206  * rcu_scheduler_fully_active.
1207  */
1208 bool rcu_lockdep_current_cpu_online(void)
1209 {
1210 	struct rcu_data *rdp;
1211 	struct rcu_node *rnp;
1212 	bool ret = false;
1213 
1214 	if (in_nmi() || !rcu_scheduler_fully_active)
1215 		return true;
1216 	preempt_disable_notrace();
1217 	rdp = this_cpu_ptr(&rcu_data);
1218 	rnp = rdp->mynode;
1219 	if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1220 		ret = true;
1221 	preempt_enable_notrace();
1222 	return ret;
1223 }
1224 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1225 
1226 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1227 
1228 /*
1229  * We are reporting a quiescent state on behalf of some other CPU, so
1230  * it is our responsibility to check for and handle potential overflow
1231  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1232  * After all, the CPU might be in deep idle state, and thus executing no
1233  * code whatsoever.
1234  */
1235 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1236 {
1237 	raw_lockdep_assert_held_rcu_node(rnp);
1238 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1239 			 rnp->gp_seq))
1240 		WRITE_ONCE(rdp->gpwrap, true);
1241 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1242 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1243 }
1244 
1245 /*
1246  * Snapshot the specified CPU's dynticks counter so that we can later
1247  * credit them with an implicit quiescent state.  Return 1 if this CPU
1248  * is in dynticks idle mode, which is an extended quiescent state.
1249  */
1250 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1251 {
1252 	rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1253 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1254 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1255 		rcu_gpnum_ovf(rdp->mynode, rdp);
1256 		return 1;
1257 	}
1258 	return 0;
1259 }
1260 
1261 /*
1262  * Return true if the specified CPU has passed through a quiescent
1263  * state by virtue of being in or having passed through an dynticks
1264  * idle state since the last call to dyntick_save_progress_counter()
1265  * for this same CPU, or by virtue of having been offline.
1266  */
1267 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1268 {
1269 	unsigned long jtsq;
1270 	bool *rnhqp;
1271 	bool *ruqp;
1272 	struct rcu_node *rnp = rdp->mynode;
1273 
1274 	/*
1275 	 * If the CPU passed through or entered a dynticks idle phase with
1276 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1277 	 * already acknowledged the request to pass through a quiescent
1278 	 * state.  Either way, that CPU cannot possibly be in an RCU
1279 	 * read-side critical section that started before the beginning
1280 	 * of the current RCU grace period.
1281 	 */
1282 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1283 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1284 		rcu_gpnum_ovf(rnp, rdp);
1285 		return 1;
1286 	}
1287 
1288 	/*
1289 	 * Complain if a CPU that is considered to be offline from RCU's
1290 	 * perspective has not yet reported a quiescent state.  After all,
1291 	 * the offline CPU should have reported a quiescent state during
1292 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
1293 	 * if it ran concurrently with either the CPU going offline or the
1294 	 * last task on a leaf rcu_node structure exiting its RCU read-side
1295 	 * critical section while all CPUs corresponding to that structure
1296 	 * are offline.  This added warning detects bugs in any of these
1297 	 * code paths.
1298 	 *
1299 	 * The rcu_node structure's ->lock is held here, which excludes
1300 	 * the relevant portions the CPU-hotplug code, the grace-period
1301 	 * initialization code, and the rcu_read_unlock() code paths.
1302 	 *
1303 	 * For more detail, please refer to the "Hotplug CPU" section
1304 	 * of RCU's Requirements documentation.
1305 	 */
1306 	if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1307 		bool onl;
1308 		struct rcu_node *rnp1;
1309 
1310 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1311 			__func__, rnp->grplo, rnp->grphi, rnp->level,
1312 			(long)rnp->gp_seq, (long)rnp->completedqs);
1313 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1314 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1315 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1316 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1317 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1318 			__func__, rdp->cpu, ".o"[onl],
1319 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1320 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1321 		return 1; /* Break things loose after complaining. */
1322 	}
1323 
1324 	/*
1325 	 * A CPU running for an extended time within the kernel can
1326 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1327 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1328 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1329 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1330 	 * variable are safe because the assignments are repeated if this
1331 	 * CPU failed to pass through a quiescent state.  This code
1332 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
1333 	 * is set way high.
1334 	 */
1335 	jtsq = READ_ONCE(jiffies_to_sched_qs);
1336 	ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1337 	rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1338 	if (!READ_ONCE(*rnhqp) &&
1339 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1340 	     time_after(jiffies, rcu_state.jiffies_resched) ||
1341 	     rcu_state.cbovld)) {
1342 		WRITE_ONCE(*rnhqp, true);
1343 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1344 		smp_store_release(ruqp, true);
1345 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1346 		WRITE_ONCE(*ruqp, true);
1347 	}
1348 
1349 	/*
1350 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1351 	 * The above code handles this, but only for straight cond_resched().
1352 	 * And some in-kernel loops check need_resched() before calling
1353 	 * cond_resched(), which defeats the above code for CPUs that are
1354 	 * running in-kernel with scheduling-clock interrupts disabled.
1355 	 * So hit them over the head with the resched_cpu() hammer!
1356 	 */
1357 	if (tick_nohz_full_cpu(rdp->cpu) &&
1358 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1359 	     rcu_state.cbovld)) {
1360 		WRITE_ONCE(*ruqp, true);
1361 		resched_cpu(rdp->cpu);
1362 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1363 	}
1364 
1365 	/*
1366 	 * If more than halfway to RCU CPU stall-warning time, invoke
1367 	 * resched_cpu() more frequently to try to loosen things up a bit.
1368 	 * Also check to see if the CPU is getting hammered with interrupts,
1369 	 * but only once per grace period, just to keep the IPIs down to
1370 	 * a dull roar.
1371 	 */
1372 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
1373 		if (time_after(jiffies,
1374 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1375 			resched_cpu(rdp->cpu);
1376 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1377 		}
1378 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1379 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1380 		    (rnp->ffmask & rdp->grpmask)) {
1381 			rdp->rcu_iw_pending = true;
1382 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
1383 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1384 		}
1385 	}
1386 
1387 	return 0;
1388 }
1389 
1390 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
1391 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1392 			      unsigned long gp_seq_req, const char *s)
1393 {
1394 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1395 				      gp_seq_req, rnp->level,
1396 				      rnp->grplo, rnp->grphi, s);
1397 }
1398 
1399 /*
1400  * rcu_start_this_gp - Request the start of a particular grace period
1401  * @rnp_start: The leaf node of the CPU from which to start.
1402  * @rdp: The rcu_data corresponding to the CPU from which to start.
1403  * @gp_seq_req: The gp_seq of the grace period to start.
1404  *
1405  * Start the specified grace period, as needed to handle newly arrived
1406  * callbacks.  The required future grace periods are recorded in each
1407  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1408  * is reason to awaken the grace-period kthread.
1409  *
1410  * The caller must hold the specified rcu_node structure's ->lock, which
1411  * is why the caller is responsible for waking the grace-period kthread.
1412  *
1413  * Returns true if the GP thread needs to be awakened else false.
1414  */
1415 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1416 			      unsigned long gp_seq_req)
1417 {
1418 	bool ret = false;
1419 	struct rcu_node *rnp;
1420 
1421 	/*
1422 	 * Use funnel locking to either acquire the root rcu_node
1423 	 * structure's lock or bail out if the need for this grace period
1424 	 * has already been recorded -- or if that grace period has in
1425 	 * fact already started.  If there is already a grace period in
1426 	 * progress in a non-leaf node, no recording is needed because the
1427 	 * end of the grace period will scan the leaf rcu_node structures.
1428 	 * Note that rnp_start->lock must not be released.
1429 	 */
1430 	raw_lockdep_assert_held_rcu_node(rnp_start);
1431 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1432 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
1433 		if (rnp != rnp_start)
1434 			raw_spin_lock_rcu_node(rnp);
1435 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1436 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1437 		    (rnp != rnp_start &&
1438 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1439 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1440 					  TPS("Prestarted"));
1441 			goto unlock_out;
1442 		}
1443 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1444 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1445 			/*
1446 			 * We just marked the leaf or internal node, and a
1447 			 * grace period is in progress, which means that
1448 			 * rcu_gp_cleanup() will see the marking.  Bail to
1449 			 * reduce contention.
1450 			 */
1451 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1452 					  TPS("Startedleaf"));
1453 			goto unlock_out;
1454 		}
1455 		if (rnp != rnp_start && rnp->parent != NULL)
1456 			raw_spin_unlock_rcu_node(rnp);
1457 		if (!rnp->parent)
1458 			break;  /* At root, and perhaps also leaf. */
1459 	}
1460 
1461 	/* If GP already in progress, just leave, otherwise start one. */
1462 	if (rcu_gp_in_progress()) {
1463 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1464 		goto unlock_out;
1465 	}
1466 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1467 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1468 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1469 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1470 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1471 		goto unlock_out;
1472 	}
1473 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1474 	ret = true;  /* Caller must wake GP kthread. */
1475 unlock_out:
1476 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1477 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1478 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1479 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1480 	}
1481 	if (rnp != rnp_start)
1482 		raw_spin_unlock_rcu_node(rnp);
1483 	return ret;
1484 }
1485 
1486 /*
1487  * Clean up any old requests for the just-ended grace period.  Also return
1488  * whether any additional grace periods have been requested.
1489  */
1490 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1491 {
1492 	bool needmore;
1493 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1494 
1495 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1496 	if (!needmore)
1497 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1498 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1499 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1500 	return needmore;
1501 }
1502 
1503 /*
1504  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1505  * interrupt or softirq handler, in which case we just might immediately
1506  * sleep upon return, resulting in a grace-period hang), and don't bother
1507  * awakening when there is nothing for the grace-period kthread to do
1508  * (as in several CPUs raced to awaken, we lost), and finally don't try
1509  * to awaken a kthread that has not yet been created.  If all those checks
1510  * are passed, track some debug information and awaken.
1511  *
1512  * So why do the self-wakeup when in an interrupt or softirq handler
1513  * in the grace-period kthread's context?  Because the kthread might have
1514  * been interrupted just as it was going to sleep, and just after the final
1515  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1516  * is required, and is therefore supplied.
1517  */
1518 static void rcu_gp_kthread_wake(void)
1519 {
1520 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1521 
1522 	if ((current == t && !in_irq() && !in_serving_softirq()) ||
1523 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1524 		return;
1525 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1526 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1527 	swake_up_one(&rcu_state.gp_wq);
1528 }
1529 
1530 /*
1531  * If there is room, assign a ->gp_seq number to any callbacks on this
1532  * CPU that have not already been assigned.  Also accelerate any callbacks
1533  * that were previously assigned a ->gp_seq number that has since proven
1534  * to be too conservative, which can happen if callbacks get assigned a
1535  * ->gp_seq number while RCU is idle, but with reference to a non-root
1536  * rcu_node structure.  This function is idempotent, so it does not hurt
1537  * to call it repeatedly.  Returns an flag saying that we should awaken
1538  * the RCU grace-period kthread.
1539  *
1540  * The caller must hold rnp->lock with interrupts disabled.
1541  */
1542 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1543 {
1544 	unsigned long gp_seq_req;
1545 	bool ret = false;
1546 
1547 	rcu_lockdep_assert_cblist_protected(rdp);
1548 	raw_lockdep_assert_held_rcu_node(rnp);
1549 
1550 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1551 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1552 		return false;
1553 
1554 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1555 
1556 	/*
1557 	 * Callbacks are often registered with incomplete grace-period
1558 	 * information.  Something about the fact that getting exact
1559 	 * information requires acquiring a global lock...  RCU therefore
1560 	 * makes a conservative estimate of the grace period number at which
1561 	 * a given callback will become ready to invoke.	The following
1562 	 * code checks this estimate and improves it when possible, thus
1563 	 * accelerating callback invocation to an earlier grace-period
1564 	 * number.
1565 	 */
1566 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1567 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1568 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1569 
1570 	/* Trace depending on how much we were able to accelerate. */
1571 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1572 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1573 	else
1574 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1575 
1576 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1577 
1578 	return ret;
1579 }
1580 
1581 /*
1582  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1583  * rcu_node structure's ->lock be held.  It consults the cached value
1584  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1585  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1586  * while holding the leaf rcu_node structure's ->lock.
1587  */
1588 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1589 					struct rcu_data *rdp)
1590 {
1591 	unsigned long c;
1592 	bool needwake;
1593 
1594 	rcu_lockdep_assert_cblist_protected(rdp);
1595 	c = rcu_seq_snap(&rcu_state.gp_seq);
1596 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1597 		/* Old request still live, so mark recent callbacks. */
1598 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1599 		return;
1600 	}
1601 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1602 	needwake = rcu_accelerate_cbs(rnp, rdp);
1603 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1604 	if (needwake)
1605 		rcu_gp_kthread_wake();
1606 }
1607 
1608 /*
1609  * Move any callbacks whose grace period has completed to the
1610  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1611  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1612  * sublist.  This function is idempotent, so it does not hurt to
1613  * invoke it repeatedly.  As long as it is not invoked -too- often...
1614  * Returns true if the RCU grace-period kthread needs to be awakened.
1615  *
1616  * The caller must hold rnp->lock with interrupts disabled.
1617  */
1618 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1619 {
1620 	rcu_lockdep_assert_cblist_protected(rdp);
1621 	raw_lockdep_assert_held_rcu_node(rnp);
1622 
1623 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1624 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1625 		return false;
1626 
1627 	/*
1628 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1629 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1630 	 */
1631 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1632 
1633 	/* Classify any remaining callbacks. */
1634 	return rcu_accelerate_cbs(rnp, rdp);
1635 }
1636 
1637 /*
1638  * Move and classify callbacks, but only if doing so won't require
1639  * that the RCU grace-period kthread be awakened.
1640  */
1641 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1642 						  struct rcu_data *rdp)
1643 {
1644 	rcu_lockdep_assert_cblist_protected(rdp);
1645 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1646 	    !raw_spin_trylock_rcu_node(rnp))
1647 		return;
1648 	WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1649 	raw_spin_unlock_rcu_node(rnp);
1650 }
1651 
1652 /*
1653  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1654  * quiescent state.  This is intended to be invoked when the CPU notices
1655  * a new grace period.
1656  */
1657 static void rcu_strict_gp_check_qs(void)
1658 {
1659 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1660 		rcu_read_lock();
1661 		rcu_read_unlock();
1662 	}
1663 }
1664 
1665 /*
1666  * Update CPU-local rcu_data state to record the beginnings and ends of
1667  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1668  * structure corresponding to the current CPU, and must have irqs disabled.
1669  * Returns true if the grace-period kthread needs to be awakened.
1670  */
1671 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1672 {
1673 	bool ret = false;
1674 	bool need_qs;
1675 	const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
1676 
1677 	raw_lockdep_assert_held_rcu_node(rnp);
1678 
1679 	if (rdp->gp_seq == rnp->gp_seq)
1680 		return false; /* Nothing to do. */
1681 
1682 	/* Handle the ends of any preceding grace periods first. */
1683 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1684 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1685 		if (!offloaded)
1686 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1687 		rdp->core_needs_qs = false;
1688 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1689 	} else {
1690 		if (!offloaded)
1691 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1692 		if (rdp->core_needs_qs)
1693 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1694 	}
1695 
1696 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1697 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1698 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1699 		/*
1700 		 * If the current grace period is waiting for this CPU,
1701 		 * set up to detect a quiescent state, otherwise don't
1702 		 * go looking for one.
1703 		 */
1704 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1705 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1706 		rdp->cpu_no_qs.b.norm = need_qs;
1707 		rdp->core_needs_qs = need_qs;
1708 		zero_cpu_stall_ticks(rdp);
1709 	}
1710 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1711 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1712 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1713 	WRITE_ONCE(rdp->gpwrap, false);
1714 	rcu_gpnum_ovf(rnp, rdp);
1715 	return ret;
1716 }
1717 
1718 static void note_gp_changes(struct rcu_data *rdp)
1719 {
1720 	unsigned long flags;
1721 	bool needwake;
1722 	struct rcu_node *rnp;
1723 
1724 	local_irq_save(flags);
1725 	rnp = rdp->mynode;
1726 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1727 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1728 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1729 		local_irq_restore(flags);
1730 		return;
1731 	}
1732 	needwake = __note_gp_changes(rnp, rdp);
1733 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1734 	rcu_strict_gp_check_qs();
1735 	if (needwake)
1736 		rcu_gp_kthread_wake();
1737 }
1738 
1739 static void rcu_gp_slow(int delay)
1740 {
1741 	if (delay > 0 &&
1742 	    !(rcu_seq_ctr(rcu_state.gp_seq) %
1743 	      (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1744 		schedule_timeout_idle(delay);
1745 }
1746 
1747 static unsigned long sleep_duration;
1748 
1749 /* Allow rcutorture to stall the grace-period kthread. */
1750 void rcu_gp_set_torture_wait(int duration)
1751 {
1752 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1753 		WRITE_ONCE(sleep_duration, duration);
1754 }
1755 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1756 
1757 /* Actually implement the aforementioned wait. */
1758 static void rcu_gp_torture_wait(void)
1759 {
1760 	unsigned long duration;
1761 
1762 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1763 		return;
1764 	duration = xchg(&sleep_duration, 0UL);
1765 	if (duration > 0) {
1766 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1767 		schedule_timeout_idle(duration);
1768 		pr_alert("%s: Wait complete\n", __func__);
1769 	}
1770 }
1771 
1772 /*
1773  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1774  * processing.
1775  */
1776 static void rcu_strict_gp_boundary(void *unused)
1777 {
1778 	invoke_rcu_core();
1779 }
1780 
1781 /*
1782  * Initialize a new grace period.  Return false if no grace period required.
1783  */
1784 static bool rcu_gp_init(void)
1785 {
1786 	unsigned long firstseq;
1787 	unsigned long flags;
1788 	unsigned long oldmask;
1789 	unsigned long mask;
1790 	struct rcu_data *rdp;
1791 	struct rcu_node *rnp = rcu_get_root();
1792 
1793 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1794 	raw_spin_lock_irq_rcu_node(rnp);
1795 	if (!READ_ONCE(rcu_state.gp_flags)) {
1796 		/* Spurious wakeup, tell caller to go back to sleep.  */
1797 		raw_spin_unlock_irq_rcu_node(rnp);
1798 		return false;
1799 	}
1800 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1801 
1802 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1803 		/*
1804 		 * Grace period already in progress, don't start another.
1805 		 * Not supposed to be able to happen.
1806 		 */
1807 		raw_spin_unlock_irq_rcu_node(rnp);
1808 		return false;
1809 	}
1810 
1811 	/* Advance to a new grace period and initialize state. */
1812 	record_gp_stall_check_time();
1813 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1814 	rcu_seq_start(&rcu_state.gp_seq);
1815 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1816 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1817 	raw_spin_unlock_irq_rcu_node(rnp);
1818 
1819 	/*
1820 	 * Apply per-leaf buffered online and offline operations to
1821 	 * the rcu_node tree. Note that this new grace period need not
1822 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1823 	 * offlining path, when combined with checks in this function,
1824 	 * will handle CPUs that are currently going offline or that will
1825 	 * go offline later.  Please also refer to "Hotplug CPU" section
1826 	 * of RCU's Requirements documentation.
1827 	 */
1828 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1829 	rcu_for_each_leaf_node(rnp) {
1830 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1831 		firstseq = READ_ONCE(rnp->ofl_seq);
1832 		if (firstseq & 0x1)
1833 			while (firstseq == READ_ONCE(rnp->ofl_seq))
1834 				schedule_timeout_idle(1);  // Can't wake unless RCU is watching.
1835 		smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1836 		raw_spin_lock(&rcu_state.ofl_lock);
1837 		raw_spin_lock_irq_rcu_node(rnp);
1838 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1839 		    !rnp->wait_blkd_tasks) {
1840 			/* Nothing to do on this leaf rcu_node structure. */
1841 			raw_spin_unlock_irq_rcu_node(rnp);
1842 			raw_spin_unlock(&rcu_state.ofl_lock);
1843 			continue;
1844 		}
1845 
1846 		/* Record old state, apply changes to ->qsmaskinit field. */
1847 		oldmask = rnp->qsmaskinit;
1848 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1849 
1850 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1851 		if (!oldmask != !rnp->qsmaskinit) {
1852 			if (!oldmask) { /* First online CPU for rcu_node. */
1853 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1854 					rcu_init_new_rnp(rnp);
1855 			} else if (rcu_preempt_has_tasks(rnp)) {
1856 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1857 			} else { /* Last offline CPU and can propagate. */
1858 				rcu_cleanup_dead_rnp(rnp);
1859 			}
1860 		}
1861 
1862 		/*
1863 		 * If all waited-on tasks from prior grace period are
1864 		 * done, and if all this rcu_node structure's CPUs are
1865 		 * still offline, propagate up the rcu_node tree and
1866 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1867 		 * rcu_node structure's CPUs has since come back online,
1868 		 * simply clear ->wait_blkd_tasks.
1869 		 */
1870 		if (rnp->wait_blkd_tasks &&
1871 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1872 			rnp->wait_blkd_tasks = false;
1873 			if (!rnp->qsmaskinit)
1874 				rcu_cleanup_dead_rnp(rnp);
1875 		}
1876 
1877 		raw_spin_unlock_irq_rcu_node(rnp);
1878 		raw_spin_unlock(&rcu_state.ofl_lock);
1879 	}
1880 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1881 
1882 	/*
1883 	 * Set the quiescent-state-needed bits in all the rcu_node
1884 	 * structures for all currently online CPUs in breadth-first
1885 	 * order, starting from the root rcu_node structure, relying on the
1886 	 * layout of the tree within the rcu_state.node[] array.  Note that
1887 	 * other CPUs will access only the leaves of the hierarchy, thus
1888 	 * seeing that no grace period is in progress, at least until the
1889 	 * corresponding leaf node has been initialized.
1890 	 *
1891 	 * The grace period cannot complete until the initialization
1892 	 * process finishes, because this kthread handles both.
1893 	 */
1894 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1895 	rcu_for_each_node_breadth_first(rnp) {
1896 		rcu_gp_slow(gp_init_delay);
1897 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1898 		rdp = this_cpu_ptr(&rcu_data);
1899 		rcu_preempt_check_blocked_tasks(rnp);
1900 		rnp->qsmask = rnp->qsmaskinit;
1901 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1902 		if (rnp == rdp->mynode)
1903 			(void)__note_gp_changes(rnp, rdp);
1904 		rcu_preempt_boost_start_gp(rnp);
1905 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1906 					    rnp->level, rnp->grplo,
1907 					    rnp->grphi, rnp->qsmask);
1908 		/* Quiescent states for tasks on any now-offline CPUs. */
1909 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1910 		rnp->rcu_gp_init_mask = mask;
1911 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1912 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1913 		else
1914 			raw_spin_unlock_irq_rcu_node(rnp);
1915 		cond_resched_tasks_rcu_qs();
1916 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1917 	}
1918 
1919 	// If strict, make all CPUs aware of new grace period.
1920 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1921 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1922 
1923 	return true;
1924 }
1925 
1926 /*
1927  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1928  * time.
1929  */
1930 static bool rcu_gp_fqs_check_wake(int *gfp)
1931 {
1932 	struct rcu_node *rnp = rcu_get_root();
1933 
1934 	// If under overload conditions, force an immediate FQS scan.
1935 	if (*gfp & RCU_GP_FLAG_OVLD)
1936 		return true;
1937 
1938 	// Someone like call_rcu() requested a force-quiescent-state scan.
1939 	*gfp = READ_ONCE(rcu_state.gp_flags);
1940 	if (*gfp & RCU_GP_FLAG_FQS)
1941 		return true;
1942 
1943 	// The current grace period has completed.
1944 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1945 		return true;
1946 
1947 	return false;
1948 }
1949 
1950 /*
1951  * Do one round of quiescent-state forcing.
1952  */
1953 static void rcu_gp_fqs(bool first_time)
1954 {
1955 	struct rcu_node *rnp = rcu_get_root();
1956 
1957 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1958 	rcu_state.n_force_qs++;
1959 	if (first_time) {
1960 		/* Collect dyntick-idle snapshots. */
1961 		force_qs_rnp(dyntick_save_progress_counter);
1962 	} else {
1963 		/* Handle dyntick-idle and offline CPUs. */
1964 		force_qs_rnp(rcu_implicit_dynticks_qs);
1965 	}
1966 	/* Clear flag to prevent immediate re-entry. */
1967 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1968 		raw_spin_lock_irq_rcu_node(rnp);
1969 		WRITE_ONCE(rcu_state.gp_flags,
1970 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1971 		raw_spin_unlock_irq_rcu_node(rnp);
1972 	}
1973 }
1974 
1975 /*
1976  * Loop doing repeated quiescent-state forcing until the grace period ends.
1977  */
1978 static void rcu_gp_fqs_loop(void)
1979 {
1980 	bool first_gp_fqs;
1981 	int gf = 0;
1982 	unsigned long j;
1983 	int ret;
1984 	struct rcu_node *rnp = rcu_get_root();
1985 
1986 	first_gp_fqs = true;
1987 	j = READ_ONCE(jiffies_till_first_fqs);
1988 	if (rcu_state.cbovld)
1989 		gf = RCU_GP_FLAG_OVLD;
1990 	ret = 0;
1991 	for (;;) {
1992 		if (!ret) {
1993 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1994 			/*
1995 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1996 			 * update; required for stall checks.
1997 			 */
1998 			smp_wmb();
1999 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2000 				   jiffies + (j ? 3 * j : 2));
2001 		}
2002 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2003 				       TPS("fqswait"));
2004 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2005 		ret = swait_event_idle_timeout_exclusive(
2006 				rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
2007 		rcu_gp_torture_wait();
2008 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2009 		/* Locking provides needed memory barriers. */
2010 		/* If grace period done, leave loop. */
2011 		if (!READ_ONCE(rnp->qsmask) &&
2012 		    !rcu_preempt_blocked_readers_cgp(rnp))
2013 			break;
2014 		/* If time for quiescent-state forcing, do it. */
2015 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2016 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2017 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2018 					       TPS("fqsstart"));
2019 			rcu_gp_fqs(first_gp_fqs);
2020 			gf = 0;
2021 			if (first_gp_fqs) {
2022 				first_gp_fqs = false;
2023 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2024 			}
2025 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2026 					       TPS("fqsend"));
2027 			cond_resched_tasks_rcu_qs();
2028 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2029 			ret = 0; /* Force full wait till next FQS. */
2030 			j = READ_ONCE(jiffies_till_next_fqs);
2031 		} else {
2032 			/* Deal with stray signal. */
2033 			cond_resched_tasks_rcu_qs();
2034 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2035 			WARN_ON(signal_pending(current));
2036 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2037 					       TPS("fqswaitsig"));
2038 			ret = 1; /* Keep old FQS timing. */
2039 			j = jiffies;
2040 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
2041 				j = 1;
2042 			else
2043 				j = rcu_state.jiffies_force_qs - j;
2044 			gf = 0;
2045 		}
2046 	}
2047 }
2048 
2049 /*
2050  * Clean up after the old grace period.
2051  */
2052 static void rcu_gp_cleanup(void)
2053 {
2054 	int cpu;
2055 	bool needgp = false;
2056 	unsigned long gp_duration;
2057 	unsigned long new_gp_seq;
2058 	bool offloaded;
2059 	struct rcu_data *rdp;
2060 	struct rcu_node *rnp = rcu_get_root();
2061 	struct swait_queue_head *sq;
2062 
2063 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
2064 	raw_spin_lock_irq_rcu_node(rnp);
2065 	rcu_state.gp_end = jiffies;
2066 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2067 	if (gp_duration > rcu_state.gp_max)
2068 		rcu_state.gp_max = gp_duration;
2069 
2070 	/*
2071 	 * We know the grace period is complete, but to everyone else
2072 	 * it appears to still be ongoing.  But it is also the case
2073 	 * that to everyone else it looks like there is nothing that
2074 	 * they can do to advance the grace period.  It is therefore
2075 	 * safe for us to drop the lock in order to mark the grace
2076 	 * period as completed in all of the rcu_node structures.
2077 	 */
2078 	raw_spin_unlock_irq_rcu_node(rnp);
2079 
2080 	/*
2081 	 * Propagate new ->gp_seq value to rcu_node structures so that
2082 	 * other CPUs don't have to wait until the start of the next grace
2083 	 * period to process their callbacks.  This also avoids some nasty
2084 	 * RCU grace-period initialization races by forcing the end of
2085 	 * the current grace period to be completely recorded in all of
2086 	 * the rcu_node structures before the beginning of the next grace
2087 	 * period is recorded in any of the rcu_node structures.
2088 	 */
2089 	new_gp_seq = rcu_state.gp_seq;
2090 	rcu_seq_end(&new_gp_seq);
2091 	rcu_for_each_node_breadth_first(rnp) {
2092 		raw_spin_lock_irq_rcu_node(rnp);
2093 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2094 			dump_blkd_tasks(rnp, 10);
2095 		WARN_ON_ONCE(rnp->qsmask);
2096 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2097 		rdp = this_cpu_ptr(&rcu_data);
2098 		if (rnp == rdp->mynode)
2099 			needgp = __note_gp_changes(rnp, rdp) || needgp;
2100 		/* smp_mb() provided by prior unlock-lock pair. */
2101 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
2102 		// Reset overload indication for CPUs no longer overloaded
2103 		if (rcu_is_leaf_node(rnp))
2104 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2105 				rdp = per_cpu_ptr(&rcu_data, cpu);
2106 				check_cb_ovld_locked(rdp, rnp);
2107 			}
2108 		sq = rcu_nocb_gp_get(rnp);
2109 		raw_spin_unlock_irq_rcu_node(rnp);
2110 		rcu_nocb_gp_cleanup(sq);
2111 		cond_resched_tasks_rcu_qs();
2112 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
2113 		rcu_gp_slow(gp_cleanup_delay);
2114 	}
2115 	rnp = rcu_get_root();
2116 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2117 
2118 	/* Declare grace period done, trace first to use old GP number. */
2119 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2120 	rcu_seq_end(&rcu_state.gp_seq);
2121 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2122 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2123 	/* Check for GP requests since above loop. */
2124 	rdp = this_cpu_ptr(&rcu_data);
2125 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2126 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2127 				  TPS("CleanupMore"));
2128 		needgp = true;
2129 	}
2130 	/* Advance CBs to reduce false positives below. */
2131 	offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2132 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2133 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2134 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2135 		trace_rcu_grace_period(rcu_state.name,
2136 				       rcu_state.gp_seq,
2137 				       TPS("newreq"));
2138 	} else {
2139 		WRITE_ONCE(rcu_state.gp_flags,
2140 			   rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2141 	}
2142 	raw_spin_unlock_irq_rcu_node(rnp);
2143 
2144 	// If strict, make all CPUs aware of the end of the old grace period.
2145 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2146 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2147 }
2148 
2149 /*
2150  * Body of kthread that handles grace periods.
2151  */
2152 static int __noreturn rcu_gp_kthread(void *unused)
2153 {
2154 	rcu_bind_gp_kthread();
2155 	for (;;) {
2156 
2157 		/* Handle grace-period start. */
2158 		for (;;) {
2159 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2160 					       TPS("reqwait"));
2161 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2162 			swait_event_idle_exclusive(rcu_state.gp_wq,
2163 					 READ_ONCE(rcu_state.gp_flags) &
2164 					 RCU_GP_FLAG_INIT);
2165 			rcu_gp_torture_wait();
2166 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2167 			/* Locking provides needed memory barrier. */
2168 			if (rcu_gp_init())
2169 				break;
2170 			cond_resched_tasks_rcu_qs();
2171 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
2172 			WARN_ON(signal_pending(current));
2173 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2174 					       TPS("reqwaitsig"));
2175 		}
2176 
2177 		/* Handle quiescent-state forcing. */
2178 		rcu_gp_fqs_loop();
2179 
2180 		/* Handle grace-period end. */
2181 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2182 		rcu_gp_cleanup();
2183 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2184 	}
2185 }
2186 
2187 /*
2188  * Report a full set of quiescent states to the rcu_state data structure.
2189  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2190  * another grace period is required.  Whether we wake the grace-period
2191  * kthread or it awakens itself for the next round of quiescent-state
2192  * forcing, that kthread will clean up after the just-completed grace
2193  * period.  Note that the caller must hold rnp->lock, which is released
2194  * before return.
2195  */
2196 static void rcu_report_qs_rsp(unsigned long flags)
2197 	__releases(rcu_get_root()->lock)
2198 {
2199 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
2200 	WARN_ON_ONCE(!rcu_gp_in_progress());
2201 	WRITE_ONCE(rcu_state.gp_flags,
2202 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2203 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2204 	rcu_gp_kthread_wake();
2205 }
2206 
2207 /*
2208  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2209  * Allows quiescent states for a group of CPUs to be reported at one go
2210  * to the specified rcu_node structure, though all the CPUs in the group
2211  * must be represented by the same rcu_node structure (which need not be a
2212  * leaf rcu_node structure, though it often will be).  The gps parameter
2213  * is the grace-period snapshot, which means that the quiescent states
2214  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2215  * must be held upon entry, and it is released before return.
2216  *
2217  * As a special case, if mask is zero, the bit-already-cleared check is
2218  * disabled.  This allows propagating quiescent state due to resumed tasks
2219  * during grace-period initialization.
2220  */
2221 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2222 			      unsigned long gps, unsigned long flags)
2223 	__releases(rnp->lock)
2224 {
2225 	unsigned long oldmask = 0;
2226 	struct rcu_node *rnp_c;
2227 
2228 	raw_lockdep_assert_held_rcu_node(rnp);
2229 
2230 	/* Walk up the rcu_node hierarchy. */
2231 	for (;;) {
2232 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2233 
2234 			/*
2235 			 * Our bit has already been cleared, or the
2236 			 * relevant grace period is already over, so done.
2237 			 */
2238 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2239 			return;
2240 		}
2241 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2242 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2243 			     rcu_preempt_blocked_readers_cgp(rnp));
2244 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2245 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2246 						 mask, rnp->qsmask, rnp->level,
2247 						 rnp->grplo, rnp->grphi,
2248 						 !!rnp->gp_tasks);
2249 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2250 
2251 			/* Other bits still set at this level, so done. */
2252 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2253 			return;
2254 		}
2255 		rnp->completedqs = rnp->gp_seq;
2256 		mask = rnp->grpmask;
2257 		if (rnp->parent == NULL) {
2258 
2259 			/* No more levels.  Exit loop holding root lock. */
2260 
2261 			break;
2262 		}
2263 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2264 		rnp_c = rnp;
2265 		rnp = rnp->parent;
2266 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2267 		oldmask = READ_ONCE(rnp_c->qsmask);
2268 	}
2269 
2270 	/*
2271 	 * Get here if we are the last CPU to pass through a quiescent
2272 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2273 	 * to clean up and start the next grace period if one is needed.
2274 	 */
2275 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2276 }
2277 
2278 /*
2279  * Record a quiescent state for all tasks that were previously queued
2280  * on the specified rcu_node structure and that were blocking the current
2281  * RCU grace period.  The caller must hold the corresponding rnp->lock with
2282  * irqs disabled, and this lock is released upon return, but irqs remain
2283  * disabled.
2284  */
2285 static void __maybe_unused
2286 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2287 	__releases(rnp->lock)
2288 {
2289 	unsigned long gps;
2290 	unsigned long mask;
2291 	struct rcu_node *rnp_p;
2292 
2293 	raw_lockdep_assert_held_rcu_node(rnp);
2294 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2295 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2296 	    rnp->qsmask != 0) {
2297 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2298 		return;  /* Still need more quiescent states! */
2299 	}
2300 
2301 	rnp->completedqs = rnp->gp_seq;
2302 	rnp_p = rnp->parent;
2303 	if (rnp_p == NULL) {
2304 		/*
2305 		 * Only one rcu_node structure in the tree, so don't
2306 		 * try to report up to its nonexistent parent!
2307 		 */
2308 		rcu_report_qs_rsp(flags);
2309 		return;
2310 	}
2311 
2312 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2313 	gps = rnp->gp_seq;
2314 	mask = rnp->grpmask;
2315 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2316 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
2317 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2318 }
2319 
2320 /*
2321  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2322  * structure.  This must be called from the specified CPU.
2323  */
2324 static void
2325 rcu_report_qs_rdp(struct rcu_data *rdp)
2326 {
2327 	unsigned long flags;
2328 	unsigned long mask;
2329 	bool needwake = false;
2330 	const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2331 	struct rcu_node *rnp;
2332 
2333 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2334 	rnp = rdp->mynode;
2335 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2336 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2337 	    rdp->gpwrap) {
2338 
2339 		/*
2340 		 * The grace period in which this quiescent state was
2341 		 * recorded has ended, so don't report it upwards.
2342 		 * We will instead need a new quiescent state that lies
2343 		 * within the current grace period.
2344 		 */
2345 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2346 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2347 		return;
2348 	}
2349 	mask = rdp->grpmask;
2350 	rdp->core_needs_qs = false;
2351 	if ((rnp->qsmask & mask) == 0) {
2352 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2353 	} else {
2354 		/*
2355 		 * This GP can't end until cpu checks in, so all of our
2356 		 * callbacks can be processed during the next GP.
2357 		 */
2358 		if (!offloaded)
2359 			needwake = rcu_accelerate_cbs(rnp, rdp);
2360 
2361 		rcu_disable_urgency_upon_qs(rdp);
2362 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2363 		/* ^^^ Released rnp->lock */
2364 		if (needwake)
2365 			rcu_gp_kthread_wake();
2366 	}
2367 }
2368 
2369 /*
2370  * Check to see if there is a new grace period of which this CPU
2371  * is not yet aware, and if so, set up local rcu_data state for it.
2372  * Otherwise, see if this CPU has just passed through its first
2373  * quiescent state for this grace period, and record that fact if so.
2374  */
2375 static void
2376 rcu_check_quiescent_state(struct rcu_data *rdp)
2377 {
2378 	/* Check for grace-period ends and beginnings. */
2379 	note_gp_changes(rdp);
2380 
2381 	/*
2382 	 * Does this CPU still need to do its part for current grace period?
2383 	 * If no, return and let the other CPUs do their part as well.
2384 	 */
2385 	if (!rdp->core_needs_qs)
2386 		return;
2387 
2388 	/*
2389 	 * Was there a quiescent state since the beginning of the grace
2390 	 * period? If no, then exit and wait for the next call.
2391 	 */
2392 	if (rdp->cpu_no_qs.b.norm)
2393 		return;
2394 
2395 	/*
2396 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2397 	 * judge of that).
2398 	 */
2399 	rcu_report_qs_rdp(rdp);
2400 }
2401 
2402 /*
2403  * Near the end of the offline process.  Trace the fact that this CPU
2404  * is going offline.
2405  */
2406 int rcutree_dying_cpu(unsigned int cpu)
2407 {
2408 	bool blkd;
2409 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2410 	struct rcu_node *rnp = rdp->mynode;
2411 
2412 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2413 		return 0;
2414 
2415 	blkd = !!(rnp->qsmask & rdp->grpmask);
2416 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2417 			       blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2418 	return 0;
2419 }
2420 
2421 /*
2422  * All CPUs for the specified rcu_node structure have gone offline,
2423  * and all tasks that were preempted within an RCU read-side critical
2424  * section while running on one of those CPUs have since exited their RCU
2425  * read-side critical section.  Some other CPU is reporting this fact with
2426  * the specified rcu_node structure's ->lock held and interrupts disabled.
2427  * This function therefore goes up the tree of rcu_node structures,
2428  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2429  * the leaf rcu_node structure's ->qsmaskinit field has already been
2430  * updated.
2431  *
2432  * This function does check that the specified rcu_node structure has
2433  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2434  * prematurely.  That said, invoking it after the fact will cost you
2435  * a needless lock acquisition.  So once it has done its work, don't
2436  * invoke it again.
2437  */
2438 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2439 {
2440 	long mask;
2441 	struct rcu_node *rnp = rnp_leaf;
2442 
2443 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2444 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2445 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2446 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2447 		return;
2448 	for (;;) {
2449 		mask = rnp->grpmask;
2450 		rnp = rnp->parent;
2451 		if (!rnp)
2452 			break;
2453 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2454 		rnp->qsmaskinit &= ~mask;
2455 		/* Between grace periods, so better already be zero! */
2456 		WARN_ON_ONCE(rnp->qsmask);
2457 		if (rnp->qsmaskinit) {
2458 			raw_spin_unlock_rcu_node(rnp);
2459 			/* irqs remain disabled. */
2460 			return;
2461 		}
2462 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2463 	}
2464 }
2465 
2466 /*
2467  * The CPU has been completely removed, and some other CPU is reporting
2468  * this fact from process context.  Do the remainder of the cleanup.
2469  * There can only be one CPU hotplug operation at a time, so no need for
2470  * explicit locking.
2471  */
2472 int rcutree_dead_cpu(unsigned int cpu)
2473 {
2474 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2475 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2476 
2477 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2478 		return 0;
2479 
2480 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2481 	/* Adjust any no-longer-needed kthreads. */
2482 	rcu_boost_kthread_setaffinity(rnp, -1);
2483 	/* Do any needed no-CB deferred wakeups from this CPU. */
2484 	do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2485 
2486 	// Stop-machine done, so allow nohz_full to disable tick.
2487 	tick_dep_clear(TICK_DEP_BIT_RCU);
2488 	return 0;
2489 }
2490 
2491 /*
2492  * Invoke any RCU callbacks that have made it to the end of their grace
2493  * period.  Thottle as specified by rdp->blimit.
2494  */
2495 static void rcu_do_batch(struct rcu_data *rdp)
2496 {
2497 	int div;
2498 	bool __maybe_unused empty;
2499 	unsigned long flags;
2500 	const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
2501 	struct rcu_head *rhp;
2502 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2503 	long bl, count = 0;
2504 	long pending, tlimit = 0;
2505 
2506 	/* If no callbacks are ready, just return. */
2507 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2508 		trace_rcu_batch_start(rcu_state.name,
2509 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2510 		trace_rcu_batch_end(rcu_state.name, 0,
2511 				    !rcu_segcblist_empty(&rdp->cblist),
2512 				    need_resched(), is_idle_task(current),
2513 				    rcu_is_callbacks_kthread());
2514 		return;
2515 	}
2516 
2517 	/*
2518 	 * Extract the list of ready callbacks, disabling to prevent
2519 	 * races with call_rcu() from interrupt handlers.  Leave the
2520 	 * callback counts, as rcu_barrier() needs to be conservative.
2521 	 */
2522 	local_irq_save(flags);
2523 	rcu_nocb_lock(rdp);
2524 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2525 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2526 	div = READ_ONCE(rcu_divisor);
2527 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2528 	bl = max(rdp->blimit, pending >> div);
2529 	if (unlikely(bl > 100)) {
2530 		long rrn = READ_ONCE(rcu_resched_ns);
2531 
2532 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2533 		tlimit = local_clock() + rrn;
2534 	}
2535 	trace_rcu_batch_start(rcu_state.name,
2536 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2537 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2538 	if (offloaded)
2539 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2540 
2541 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2542 	rcu_nocb_unlock_irqrestore(rdp, flags);
2543 
2544 	/* Invoke callbacks. */
2545 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2546 	rhp = rcu_cblist_dequeue(&rcl);
2547 
2548 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2549 		rcu_callback_t f;
2550 
2551 		count++;
2552 		debug_rcu_head_unqueue(rhp);
2553 
2554 		rcu_lock_acquire(&rcu_callback_map);
2555 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2556 
2557 		f = rhp->func;
2558 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2559 		f(rhp);
2560 
2561 		rcu_lock_release(&rcu_callback_map);
2562 
2563 		/*
2564 		 * Stop only if limit reached and CPU has something to do.
2565 		 */
2566 		if (count >= bl && !offloaded &&
2567 		    (need_resched() ||
2568 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2569 			break;
2570 		if (unlikely(tlimit)) {
2571 			/* only call local_clock() every 32 callbacks */
2572 			if (likely((count & 31) || local_clock() < tlimit))
2573 				continue;
2574 			/* Exceeded the time limit, so leave. */
2575 			break;
2576 		}
2577 		if (!in_serving_softirq()) {
2578 			local_bh_enable();
2579 			lockdep_assert_irqs_enabled();
2580 			cond_resched_tasks_rcu_qs();
2581 			lockdep_assert_irqs_enabled();
2582 			local_bh_disable();
2583 		}
2584 	}
2585 
2586 	local_irq_save(flags);
2587 	rcu_nocb_lock(rdp);
2588 	rdp->n_cbs_invoked += count;
2589 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2590 			    is_idle_task(current), rcu_is_callbacks_kthread());
2591 
2592 	/* Update counts and requeue any remaining callbacks. */
2593 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2594 	rcu_segcblist_add_len(&rdp->cblist, -count);
2595 
2596 	/* Reinstate batch limit if we have worked down the excess. */
2597 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2598 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2599 		rdp->blimit = blimit;
2600 
2601 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2602 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2603 		rdp->qlen_last_fqs_check = 0;
2604 		rdp->n_force_qs_snap = rcu_state.n_force_qs;
2605 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2606 		rdp->qlen_last_fqs_check = count;
2607 
2608 	/*
2609 	 * The following usually indicates a double call_rcu().  To track
2610 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2611 	 */
2612 	empty = rcu_segcblist_empty(&rdp->cblist);
2613 	WARN_ON_ONCE(count == 0 && !empty);
2614 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2615 		     count != 0 && empty);
2616 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2617 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2618 
2619 	rcu_nocb_unlock_irqrestore(rdp, flags);
2620 
2621 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2622 	if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2623 		invoke_rcu_core();
2624 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2625 }
2626 
2627 /*
2628  * This function is invoked from each scheduling-clock interrupt,
2629  * and checks to see if this CPU is in a non-context-switch quiescent
2630  * state, for example, user mode or idle loop.  It also schedules RCU
2631  * core processing.  If the current grace period has gone on too long,
2632  * it will ask the scheduler to manufacture a context switch for the sole
2633  * purpose of providing a providing the needed quiescent state.
2634  */
2635 void rcu_sched_clock_irq(int user)
2636 {
2637 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2638 	lockdep_assert_irqs_disabled();
2639 	raw_cpu_inc(rcu_data.ticks_this_gp);
2640 	/* The load-acquire pairs with the store-release setting to true. */
2641 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2642 		/* Idle and userspace execution already are quiescent states. */
2643 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2644 			set_tsk_need_resched(current);
2645 			set_preempt_need_resched();
2646 		}
2647 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2648 	}
2649 	rcu_flavor_sched_clock_irq(user);
2650 	if (rcu_pending(user))
2651 		invoke_rcu_core();
2652 	lockdep_assert_irqs_disabled();
2653 
2654 	trace_rcu_utilization(TPS("End scheduler-tick"));
2655 }
2656 
2657 /*
2658  * Scan the leaf rcu_node structures.  For each structure on which all
2659  * CPUs have reported a quiescent state and on which there are tasks
2660  * blocking the current grace period, initiate RCU priority boosting.
2661  * Otherwise, invoke the specified function to check dyntick state for
2662  * each CPU that has not yet reported a quiescent state.
2663  */
2664 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2665 {
2666 	int cpu;
2667 	unsigned long flags;
2668 	unsigned long mask;
2669 	struct rcu_data *rdp;
2670 	struct rcu_node *rnp;
2671 
2672 	rcu_state.cbovld = rcu_state.cbovldnext;
2673 	rcu_state.cbovldnext = false;
2674 	rcu_for_each_leaf_node(rnp) {
2675 		cond_resched_tasks_rcu_qs();
2676 		mask = 0;
2677 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2678 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2679 		if (rnp->qsmask == 0) {
2680 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2681 				/*
2682 				 * No point in scanning bits because they
2683 				 * are all zero.  But we might need to
2684 				 * priority-boost blocked readers.
2685 				 */
2686 				rcu_initiate_boost(rnp, flags);
2687 				/* rcu_initiate_boost() releases rnp->lock */
2688 				continue;
2689 			}
2690 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2691 			continue;
2692 		}
2693 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2694 			rdp = per_cpu_ptr(&rcu_data, cpu);
2695 			if (f(rdp)) {
2696 				mask |= rdp->grpmask;
2697 				rcu_disable_urgency_upon_qs(rdp);
2698 			}
2699 		}
2700 		if (mask != 0) {
2701 			/* Idle/offline CPUs, report (releases rnp->lock). */
2702 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2703 		} else {
2704 			/* Nothing to do here, so just drop the lock. */
2705 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2706 		}
2707 	}
2708 }
2709 
2710 /*
2711  * Force quiescent states on reluctant CPUs, and also detect which
2712  * CPUs are in dyntick-idle mode.
2713  */
2714 void rcu_force_quiescent_state(void)
2715 {
2716 	unsigned long flags;
2717 	bool ret;
2718 	struct rcu_node *rnp;
2719 	struct rcu_node *rnp_old = NULL;
2720 
2721 	/* Funnel through hierarchy to reduce memory contention. */
2722 	rnp = __this_cpu_read(rcu_data.mynode);
2723 	for (; rnp != NULL; rnp = rnp->parent) {
2724 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2725 		       !raw_spin_trylock(&rnp->fqslock);
2726 		if (rnp_old != NULL)
2727 			raw_spin_unlock(&rnp_old->fqslock);
2728 		if (ret)
2729 			return;
2730 		rnp_old = rnp;
2731 	}
2732 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2733 
2734 	/* Reached the root of the rcu_node tree, acquire lock. */
2735 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2736 	raw_spin_unlock(&rnp_old->fqslock);
2737 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2738 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2739 		return;  /* Someone beat us to it. */
2740 	}
2741 	WRITE_ONCE(rcu_state.gp_flags,
2742 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2743 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2744 	rcu_gp_kthread_wake();
2745 }
2746 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2747 
2748 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2749 // grace periods.
2750 static void strict_work_handler(struct work_struct *work)
2751 {
2752 	rcu_read_lock();
2753 	rcu_read_unlock();
2754 }
2755 
2756 /* Perform RCU core processing work for the current CPU.  */
2757 static __latent_entropy void rcu_core(void)
2758 {
2759 	unsigned long flags;
2760 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2761 	struct rcu_node *rnp = rdp->mynode;
2762 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2763 
2764 	if (cpu_is_offline(smp_processor_id()))
2765 		return;
2766 	trace_rcu_utilization(TPS("Start RCU core"));
2767 	WARN_ON_ONCE(!rdp->beenonline);
2768 
2769 	/* Report any deferred quiescent states if preemption enabled. */
2770 	if (!(preempt_count() & PREEMPT_MASK)) {
2771 		rcu_preempt_deferred_qs(current);
2772 	} else if (rcu_preempt_need_deferred_qs(current)) {
2773 		set_tsk_need_resched(current);
2774 		set_preempt_need_resched();
2775 	}
2776 
2777 	/* Update RCU state based on any recent quiescent states. */
2778 	rcu_check_quiescent_state(rdp);
2779 
2780 	/* No grace period and unregistered callbacks? */
2781 	if (!rcu_gp_in_progress() &&
2782 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2783 		rcu_nocb_lock_irqsave(rdp, flags);
2784 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2785 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2786 		rcu_nocb_unlock_irqrestore(rdp, flags);
2787 	}
2788 
2789 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2790 
2791 	/* If there are callbacks ready, invoke them. */
2792 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2793 	    likely(READ_ONCE(rcu_scheduler_fully_active)))
2794 		rcu_do_batch(rdp);
2795 
2796 	/* Do any needed deferred wakeups of rcuo kthreads. */
2797 	do_nocb_deferred_wakeup(rdp);
2798 	trace_rcu_utilization(TPS("End RCU core"));
2799 
2800 	// If strict GPs, schedule an RCU reader in a clean environment.
2801 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2802 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2803 }
2804 
2805 static void rcu_core_si(struct softirq_action *h)
2806 {
2807 	rcu_core();
2808 }
2809 
2810 static void rcu_wake_cond(struct task_struct *t, int status)
2811 {
2812 	/*
2813 	 * If the thread is yielding, only wake it when this
2814 	 * is invoked from idle
2815 	 */
2816 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2817 		wake_up_process(t);
2818 }
2819 
2820 static void invoke_rcu_core_kthread(void)
2821 {
2822 	struct task_struct *t;
2823 	unsigned long flags;
2824 
2825 	local_irq_save(flags);
2826 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2827 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2828 	if (t != NULL && t != current)
2829 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2830 	local_irq_restore(flags);
2831 }
2832 
2833 /*
2834  * Wake up this CPU's rcuc kthread to do RCU core processing.
2835  */
2836 static void invoke_rcu_core(void)
2837 {
2838 	if (!cpu_online(smp_processor_id()))
2839 		return;
2840 	if (use_softirq)
2841 		raise_softirq(RCU_SOFTIRQ);
2842 	else
2843 		invoke_rcu_core_kthread();
2844 }
2845 
2846 static void rcu_cpu_kthread_park(unsigned int cpu)
2847 {
2848 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2849 }
2850 
2851 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2852 {
2853 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2854 }
2855 
2856 /*
2857  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2858  * the RCU softirq used in configurations of RCU that do not support RCU
2859  * priority boosting.
2860  */
2861 static void rcu_cpu_kthread(unsigned int cpu)
2862 {
2863 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2864 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2865 	int spincnt;
2866 
2867 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2868 	for (spincnt = 0; spincnt < 10; spincnt++) {
2869 		local_bh_disable();
2870 		*statusp = RCU_KTHREAD_RUNNING;
2871 		local_irq_disable();
2872 		work = *workp;
2873 		*workp = 0;
2874 		local_irq_enable();
2875 		if (work)
2876 			rcu_core();
2877 		local_bh_enable();
2878 		if (*workp == 0) {
2879 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2880 			*statusp = RCU_KTHREAD_WAITING;
2881 			return;
2882 		}
2883 	}
2884 	*statusp = RCU_KTHREAD_YIELDING;
2885 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2886 	schedule_timeout_idle(2);
2887 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2888 	*statusp = RCU_KTHREAD_WAITING;
2889 }
2890 
2891 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2892 	.store			= &rcu_data.rcu_cpu_kthread_task,
2893 	.thread_should_run	= rcu_cpu_kthread_should_run,
2894 	.thread_fn		= rcu_cpu_kthread,
2895 	.thread_comm		= "rcuc/%u",
2896 	.setup			= rcu_cpu_kthread_setup,
2897 	.park			= rcu_cpu_kthread_park,
2898 };
2899 
2900 /*
2901  * Spawn per-CPU RCU core processing kthreads.
2902  */
2903 static int __init rcu_spawn_core_kthreads(void)
2904 {
2905 	int cpu;
2906 
2907 	for_each_possible_cpu(cpu)
2908 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2909 	if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2910 		return 0;
2911 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2912 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2913 	return 0;
2914 }
2915 early_initcall(rcu_spawn_core_kthreads);
2916 
2917 /*
2918  * Handle any core-RCU processing required by a call_rcu() invocation.
2919  */
2920 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2921 			    unsigned long flags)
2922 {
2923 	/*
2924 	 * If called from an extended quiescent state, invoke the RCU
2925 	 * core in order to force a re-evaluation of RCU's idleness.
2926 	 */
2927 	if (!rcu_is_watching())
2928 		invoke_rcu_core();
2929 
2930 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2931 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2932 		return;
2933 
2934 	/*
2935 	 * Force the grace period if too many callbacks or too long waiting.
2936 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2937 	 * if some other CPU has recently done so.  Also, don't bother
2938 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2939 	 * is the only one waiting for a grace period to complete.
2940 	 */
2941 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2942 		     rdp->qlen_last_fqs_check + qhimark)) {
2943 
2944 		/* Are we ignoring a completed grace period? */
2945 		note_gp_changes(rdp);
2946 
2947 		/* Start a new grace period if one not already started. */
2948 		if (!rcu_gp_in_progress()) {
2949 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2950 		} else {
2951 			/* Give the grace period a kick. */
2952 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2953 			if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2954 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2955 				rcu_force_quiescent_state();
2956 			rdp->n_force_qs_snap = rcu_state.n_force_qs;
2957 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2958 		}
2959 	}
2960 }
2961 
2962 /*
2963  * RCU callback function to leak a callback.
2964  */
2965 static void rcu_leak_callback(struct rcu_head *rhp)
2966 {
2967 }
2968 
2969 /*
2970  * Check and if necessary update the leaf rcu_node structure's
2971  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2972  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2973  * structure's ->lock.
2974  */
2975 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2976 {
2977 	raw_lockdep_assert_held_rcu_node(rnp);
2978 	if (qovld_calc <= 0)
2979 		return; // Early boot and wildcard value set.
2980 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2981 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2982 	else
2983 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2984 }
2985 
2986 /*
2987  * Check and if necessary update the leaf rcu_node structure's
2988  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2989  * number of queued RCU callbacks.  No locks need be held, but the
2990  * caller must have disabled interrupts.
2991  *
2992  * Note that this function ignores the possibility that there are a lot
2993  * of callbacks all of which have already seen the end of their respective
2994  * grace periods.  This omission is due to the need for no-CBs CPUs to
2995  * be holding ->nocb_lock to do this check, which is too heavy for a
2996  * common-case operation.
2997  */
2998 static void check_cb_ovld(struct rcu_data *rdp)
2999 {
3000 	struct rcu_node *const rnp = rdp->mynode;
3001 
3002 	if (qovld_calc <= 0 ||
3003 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3004 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3005 		return; // Early boot wildcard value or already set correctly.
3006 	raw_spin_lock_rcu_node(rnp);
3007 	check_cb_ovld_locked(rdp, rnp);
3008 	raw_spin_unlock_rcu_node(rnp);
3009 }
3010 
3011 /* Helper function for call_rcu() and friends.  */
3012 static void
3013 __call_rcu(struct rcu_head *head, rcu_callback_t func)
3014 {
3015 	static atomic_t doublefrees;
3016 	unsigned long flags;
3017 	struct rcu_data *rdp;
3018 	bool was_alldone;
3019 
3020 	/* Misaligned rcu_head! */
3021 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3022 
3023 	if (debug_rcu_head_queue(head)) {
3024 		/*
3025 		 * Probable double call_rcu(), so leak the callback.
3026 		 * Use rcu:rcu_callback trace event to find the previous
3027 		 * time callback was passed to __call_rcu().
3028 		 */
3029 		if (atomic_inc_return(&doublefrees) < 4) {
3030 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
3031 			mem_dump_obj(head);
3032 		}
3033 		WRITE_ONCE(head->func, rcu_leak_callback);
3034 		return;
3035 	}
3036 	head->func = func;
3037 	head->next = NULL;
3038 	local_irq_save(flags);
3039 	kasan_record_aux_stack(head);
3040 	rdp = this_cpu_ptr(&rcu_data);
3041 
3042 	/* Add the callback to our list. */
3043 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3044 		// This can trigger due to call_rcu() from offline CPU:
3045 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3046 		WARN_ON_ONCE(!rcu_is_watching());
3047 		// Very early boot, before rcu_init().  Initialize if needed
3048 		// and then drop through to queue the callback.
3049 		if (rcu_segcblist_empty(&rdp->cblist))
3050 			rcu_segcblist_init(&rdp->cblist);
3051 	}
3052 
3053 	check_cb_ovld(rdp);
3054 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
3055 		return; // Enqueued onto ->nocb_bypass, so just leave.
3056 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
3057 	rcu_segcblist_enqueue(&rdp->cblist, head);
3058 	if (__is_kvfree_rcu_offset((unsigned long)func))
3059 		trace_rcu_kvfree_callback(rcu_state.name, head,
3060 					 (unsigned long)func,
3061 					 rcu_segcblist_n_cbs(&rdp->cblist));
3062 	else
3063 		trace_rcu_callback(rcu_state.name, head,
3064 				   rcu_segcblist_n_cbs(&rdp->cblist));
3065 
3066 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
3067 
3068 	/* Go handle any RCU core processing required. */
3069 	if (unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
3070 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3071 	} else {
3072 		__call_rcu_core(rdp, head, flags);
3073 		local_irq_restore(flags);
3074 	}
3075 }
3076 
3077 /**
3078  * call_rcu() - Queue an RCU callback for invocation after a grace period.
3079  * @head: structure to be used for queueing the RCU updates.
3080  * @func: actual callback function to be invoked after the grace period
3081  *
3082  * The callback function will be invoked some time after a full grace
3083  * period elapses, in other words after all pre-existing RCU read-side
3084  * critical sections have completed.  However, the callback function
3085  * might well execute concurrently with RCU read-side critical sections
3086  * that started after call_rcu() was invoked.  RCU read-side critical
3087  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
3088  * may be nested.  In addition, regions of code across which interrupts,
3089  * preemption, or softirqs have been disabled also serve as RCU read-side
3090  * critical sections.  This includes hardware interrupt handlers, softirq
3091  * handlers, and NMI handlers.
3092  *
3093  * Note that all CPUs must agree that the grace period extended beyond
3094  * all pre-existing RCU read-side critical section.  On systems with more
3095  * than one CPU, this means that when "func()" is invoked, each CPU is
3096  * guaranteed to have executed a full memory barrier since the end of its
3097  * last RCU read-side critical section whose beginning preceded the call
3098  * to call_rcu().  It also means that each CPU executing an RCU read-side
3099  * critical section that continues beyond the start of "func()" must have
3100  * executed a memory barrier after the call_rcu() but before the beginning
3101  * of that RCU read-side critical section.  Note that these guarantees
3102  * include CPUs that are offline, idle, or executing in user mode, as
3103  * well as CPUs that are executing in the kernel.
3104  *
3105  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3106  * resulting RCU callback function "func()", then both CPU A and CPU B are
3107  * guaranteed to execute a full memory barrier during the time interval
3108  * between the call to call_rcu() and the invocation of "func()" -- even
3109  * if CPU A and CPU B are the same CPU (but again only if the system has
3110  * more than one CPU).
3111  */
3112 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3113 {
3114 	__call_rcu(head, func);
3115 }
3116 EXPORT_SYMBOL_GPL(call_rcu);
3117 
3118 
3119 /* Maximum number of jiffies to wait before draining a batch. */
3120 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3121 #define KFREE_N_BATCHES 2
3122 #define FREE_N_CHANNELS 2
3123 
3124 /**
3125  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3126  * @nr_records: Number of active pointers in the array
3127  * @next: Next bulk object in the block chain
3128  * @records: Array of the kvfree_rcu() pointers
3129  */
3130 struct kvfree_rcu_bulk_data {
3131 	unsigned long nr_records;
3132 	struct kvfree_rcu_bulk_data *next;
3133 	void *records[];
3134 };
3135 
3136 /*
3137  * This macro defines how many entries the "records" array
3138  * will contain. It is based on the fact that the size of
3139  * kvfree_rcu_bulk_data structure becomes exactly one page.
3140  */
3141 #define KVFREE_BULK_MAX_ENTR \
3142 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3143 
3144 /**
3145  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3146  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3147  * @head_free: List of kfree_rcu() objects waiting for a grace period
3148  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3149  * @krcp: Pointer to @kfree_rcu_cpu structure
3150  */
3151 
3152 struct kfree_rcu_cpu_work {
3153 	struct rcu_work rcu_work;
3154 	struct rcu_head *head_free;
3155 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3156 	struct kfree_rcu_cpu *krcp;
3157 };
3158 
3159 /**
3160  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3161  * @head: List of kfree_rcu() objects not yet waiting for a grace period
3162  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3163  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3164  * @lock: Synchronize access to this structure
3165  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3166  * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3167  * @initialized: The @rcu_work fields have been initialized
3168  * @count: Number of objects for which GP not started
3169  * @bkvcache:
3170  *	A simple cache list that contains objects for reuse purpose.
3171  *	In order to save some per-cpu space the list is singular.
3172  *	Even though it is lockless an access has to be protected by the
3173  *	per-cpu lock.
3174  * @page_cache_work: A work to refill the cache when it is empty
3175  * @work_in_progress: Indicates that page_cache_work is running
3176  * @hrtimer: A hrtimer for scheduling a page_cache_work
3177  * @nr_bkv_objs: number of allocated objects at @bkvcache.
3178  *
3179  * This is a per-CPU structure.  The reason that it is not included in
3180  * the rcu_data structure is to permit this code to be extracted from
3181  * the RCU files.  Such extraction could allow further optimization of
3182  * the interactions with the slab allocators.
3183  */
3184 struct kfree_rcu_cpu {
3185 	struct rcu_head *head;
3186 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3187 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3188 	raw_spinlock_t lock;
3189 	struct delayed_work monitor_work;
3190 	bool monitor_todo;
3191 	bool initialized;
3192 	int count;
3193 
3194 	struct work_struct page_cache_work;
3195 	atomic_t work_in_progress;
3196 	struct hrtimer hrtimer;
3197 
3198 	struct llist_head bkvcache;
3199 	int nr_bkv_objs;
3200 };
3201 
3202 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3203 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3204 };
3205 
3206 static __always_inline void
3207 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3208 {
3209 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3210 	int i;
3211 
3212 	for (i = 0; i < bhead->nr_records; i++)
3213 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3214 #endif
3215 }
3216 
3217 static inline struct kfree_rcu_cpu *
3218 krc_this_cpu_lock(unsigned long *flags)
3219 {
3220 	struct kfree_rcu_cpu *krcp;
3221 
3222 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
3223 	krcp = this_cpu_ptr(&krc);
3224 	raw_spin_lock(&krcp->lock);
3225 
3226 	return krcp;
3227 }
3228 
3229 static inline void
3230 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3231 {
3232 	raw_spin_unlock(&krcp->lock);
3233 	local_irq_restore(flags);
3234 }
3235 
3236 static inline struct kvfree_rcu_bulk_data *
3237 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3238 {
3239 	if (!krcp->nr_bkv_objs)
3240 		return NULL;
3241 
3242 	krcp->nr_bkv_objs--;
3243 	return (struct kvfree_rcu_bulk_data *)
3244 		llist_del_first(&krcp->bkvcache);
3245 }
3246 
3247 static inline bool
3248 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3249 	struct kvfree_rcu_bulk_data *bnode)
3250 {
3251 	// Check the limit.
3252 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3253 		return false;
3254 
3255 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3256 	krcp->nr_bkv_objs++;
3257 	return true;
3258 
3259 }
3260 
3261 /*
3262  * This function is invoked in workqueue context after a grace period.
3263  * It frees all the objects queued on ->bhead_free or ->head_free.
3264  */
3265 static void kfree_rcu_work(struct work_struct *work)
3266 {
3267 	unsigned long flags;
3268 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3269 	struct rcu_head *head, *next;
3270 	struct kfree_rcu_cpu *krcp;
3271 	struct kfree_rcu_cpu_work *krwp;
3272 	int i, j;
3273 
3274 	krwp = container_of(to_rcu_work(work),
3275 			    struct kfree_rcu_cpu_work, rcu_work);
3276 	krcp = krwp->krcp;
3277 
3278 	raw_spin_lock_irqsave(&krcp->lock, flags);
3279 	// Channels 1 and 2.
3280 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3281 		bkvhead[i] = krwp->bkvhead_free[i];
3282 		krwp->bkvhead_free[i] = NULL;
3283 	}
3284 
3285 	// Channel 3.
3286 	head = krwp->head_free;
3287 	krwp->head_free = NULL;
3288 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3289 
3290 	// Handle two first channels.
3291 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3292 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3293 			bnext = bkvhead[i]->next;
3294 			debug_rcu_bhead_unqueue(bkvhead[i]);
3295 
3296 			rcu_lock_acquire(&rcu_callback_map);
3297 			if (i == 0) { // kmalloc() / kfree().
3298 				trace_rcu_invoke_kfree_bulk_callback(
3299 					rcu_state.name, bkvhead[i]->nr_records,
3300 					bkvhead[i]->records);
3301 
3302 				kfree_bulk(bkvhead[i]->nr_records,
3303 					bkvhead[i]->records);
3304 			} else { // vmalloc() / vfree().
3305 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3306 					trace_rcu_invoke_kvfree_callback(
3307 						rcu_state.name,
3308 						bkvhead[i]->records[j], 0);
3309 
3310 					vfree(bkvhead[i]->records[j]);
3311 				}
3312 			}
3313 			rcu_lock_release(&rcu_callback_map);
3314 
3315 			raw_spin_lock_irqsave(&krcp->lock, flags);
3316 			if (put_cached_bnode(krcp, bkvhead[i]))
3317 				bkvhead[i] = NULL;
3318 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3319 
3320 			if (bkvhead[i])
3321 				free_page((unsigned long) bkvhead[i]);
3322 
3323 			cond_resched_tasks_rcu_qs();
3324 		}
3325 	}
3326 
3327 	/*
3328 	 * Emergency case only. It can happen under low memory
3329 	 * condition when an allocation gets failed, so the "bulk"
3330 	 * path can not be temporary maintained.
3331 	 */
3332 	for (; head; head = next) {
3333 		unsigned long offset = (unsigned long)head->func;
3334 		void *ptr = (void *)head - offset;
3335 
3336 		next = head->next;
3337 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3338 		rcu_lock_acquire(&rcu_callback_map);
3339 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3340 
3341 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3342 			kvfree(ptr);
3343 
3344 		rcu_lock_release(&rcu_callback_map);
3345 		cond_resched_tasks_rcu_qs();
3346 	}
3347 }
3348 
3349 /*
3350  * Schedule the kfree batch RCU work to run in workqueue context after a GP.
3351  *
3352  * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES
3353  * timeout has been reached.
3354  */
3355 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp)
3356 {
3357 	struct kfree_rcu_cpu_work *krwp;
3358 	bool repeat = false;
3359 	int i, j;
3360 
3361 	lockdep_assert_held(&krcp->lock);
3362 
3363 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3364 		krwp = &(krcp->krw_arr[i]);
3365 
3366 		/*
3367 		 * Try to detach bkvhead or head and attach it over any
3368 		 * available corresponding free channel. It can be that
3369 		 * a previous RCU batch is in progress, it means that
3370 		 * immediately to queue another one is not possible so
3371 		 * return false to tell caller to retry.
3372 		 */
3373 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3374 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3375 				(krcp->head && !krwp->head_free)) {
3376 			// Channel 1 corresponds to SLAB ptrs.
3377 			// Channel 2 corresponds to vmalloc ptrs.
3378 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3379 				if (!krwp->bkvhead_free[j]) {
3380 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3381 					krcp->bkvhead[j] = NULL;
3382 				}
3383 			}
3384 
3385 			// Channel 3 corresponds to emergency path.
3386 			if (!krwp->head_free) {
3387 				krwp->head_free = krcp->head;
3388 				krcp->head = NULL;
3389 			}
3390 
3391 			WRITE_ONCE(krcp->count, 0);
3392 
3393 			/*
3394 			 * One work is per one batch, so there are three
3395 			 * "free channels", the batch can handle. It can
3396 			 * be that the work is in the pending state when
3397 			 * channels have been detached following by each
3398 			 * other.
3399 			 */
3400 			queue_rcu_work(system_wq, &krwp->rcu_work);
3401 		}
3402 
3403 		// Repeat if any "free" corresponding channel is still busy.
3404 		if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head)
3405 			repeat = true;
3406 	}
3407 
3408 	return !repeat;
3409 }
3410 
3411 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
3412 					  unsigned long flags)
3413 {
3414 	// Attempt to start a new batch.
3415 	krcp->monitor_todo = false;
3416 	if (queue_kfree_rcu_work(krcp)) {
3417 		// Success! Our job is done here.
3418 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3419 		return;
3420 	}
3421 
3422 	// Previous RCU batch still in progress, try again later.
3423 	krcp->monitor_todo = true;
3424 	schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3425 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3426 }
3427 
3428 /*
3429  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3430  * It invokes kfree_rcu_drain_unlock() to attempt to start another batch.
3431  */
3432 static void kfree_rcu_monitor(struct work_struct *work)
3433 {
3434 	unsigned long flags;
3435 	struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu,
3436 						 monitor_work.work);
3437 
3438 	raw_spin_lock_irqsave(&krcp->lock, flags);
3439 	if (krcp->monitor_todo)
3440 		kfree_rcu_drain_unlock(krcp, flags);
3441 	else
3442 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3443 }
3444 
3445 static enum hrtimer_restart
3446 schedule_page_work_fn(struct hrtimer *t)
3447 {
3448 	struct kfree_rcu_cpu *krcp =
3449 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3450 
3451 	queue_work(system_highpri_wq, &krcp->page_cache_work);
3452 	return HRTIMER_NORESTART;
3453 }
3454 
3455 static void fill_page_cache_func(struct work_struct *work)
3456 {
3457 	struct kvfree_rcu_bulk_data *bnode;
3458 	struct kfree_rcu_cpu *krcp =
3459 		container_of(work, struct kfree_rcu_cpu,
3460 			page_cache_work);
3461 	unsigned long flags;
3462 	bool pushed;
3463 	int i;
3464 
3465 	for (i = 0; i < rcu_min_cached_objs; i++) {
3466 		bnode = (struct kvfree_rcu_bulk_data *)
3467 			__get_free_page(GFP_KERNEL | __GFP_NOWARN);
3468 
3469 		if (bnode) {
3470 			raw_spin_lock_irqsave(&krcp->lock, flags);
3471 			pushed = put_cached_bnode(krcp, bnode);
3472 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3473 
3474 			if (!pushed) {
3475 				free_page((unsigned long) bnode);
3476 				break;
3477 			}
3478 		}
3479 	}
3480 
3481 	atomic_set(&krcp->work_in_progress, 0);
3482 }
3483 
3484 static void
3485 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3486 {
3487 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3488 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3489 		hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC,
3490 			HRTIMER_MODE_REL);
3491 		krcp->hrtimer.function = schedule_page_work_fn;
3492 		hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3493 	}
3494 }
3495 
3496 static inline bool
3497 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
3498 {
3499 	struct kvfree_rcu_bulk_data *bnode;
3500 	int idx;
3501 
3502 	if (unlikely(!krcp->initialized))
3503 		return false;
3504 
3505 	lockdep_assert_held(&krcp->lock);
3506 	idx = !!is_vmalloc_addr(ptr);
3507 
3508 	/* Check if a new block is required. */
3509 	if (!krcp->bkvhead[idx] ||
3510 			krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3511 		bnode = get_cached_bnode(krcp);
3512 		/* Switch to emergency path. */
3513 		if (!bnode)
3514 			return false;
3515 
3516 		/* Initialize the new block. */
3517 		bnode->nr_records = 0;
3518 		bnode->next = krcp->bkvhead[idx];
3519 
3520 		/* Attach it to the head. */
3521 		krcp->bkvhead[idx] = bnode;
3522 	}
3523 
3524 	/* Finally insert. */
3525 	krcp->bkvhead[idx]->records
3526 		[krcp->bkvhead[idx]->nr_records++] = ptr;
3527 
3528 	return true;
3529 }
3530 
3531 /*
3532  * Queue a request for lazy invocation of appropriate free routine after a
3533  * grace period. Please note there are three paths are maintained, two are the
3534  * main ones that use array of pointers interface and third one is emergency
3535  * one, that is used only when the main path can not be maintained temporary,
3536  * due to memory pressure.
3537  *
3538  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3539  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3540  * be free'd in workqueue context. This allows us to: batch requests together to
3541  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3542  */
3543 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3544 {
3545 	unsigned long flags;
3546 	struct kfree_rcu_cpu *krcp;
3547 	bool success;
3548 	void *ptr;
3549 
3550 	if (head) {
3551 		ptr = (void *) head - (unsigned long) func;
3552 	} else {
3553 		/*
3554 		 * Please note there is a limitation for the head-less
3555 		 * variant, that is why there is a clear rule for such
3556 		 * objects: it can be used from might_sleep() context
3557 		 * only. For other places please embed an rcu_head to
3558 		 * your data.
3559 		 */
3560 		might_sleep();
3561 		ptr = (unsigned long *) func;
3562 	}
3563 
3564 	krcp = krc_this_cpu_lock(&flags);
3565 
3566 	// Queue the object but don't yet schedule the batch.
3567 	if (debug_rcu_head_queue(ptr)) {
3568 		// Probable double kfree_rcu(), just leak.
3569 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3570 			  __func__, head);
3571 
3572 		// Mark as success and leave.
3573 		success = true;
3574 		goto unlock_return;
3575 	}
3576 
3577 	kasan_record_aux_stack(ptr);
3578 	success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
3579 	if (!success) {
3580 		run_page_cache_worker(krcp);
3581 
3582 		if (head == NULL)
3583 			// Inline if kvfree_rcu(one_arg) call.
3584 			goto unlock_return;
3585 
3586 		head->func = func;
3587 		head->next = krcp->head;
3588 		krcp->head = head;
3589 		success = true;
3590 	}
3591 
3592 	WRITE_ONCE(krcp->count, krcp->count + 1);
3593 
3594 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3595 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3596 	    !krcp->monitor_todo) {
3597 		krcp->monitor_todo = true;
3598 		schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3599 	}
3600 
3601 unlock_return:
3602 	krc_this_cpu_unlock(krcp, flags);
3603 
3604 	/*
3605 	 * Inline kvfree() after synchronize_rcu(). We can do
3606 	 * it from might_sleep() context only, so the current
3607 	 * CPU can pass the QS state.
3608 	 */
3609 	if (!success) {
3610 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3611 		synchronize_rcu();
3612 		kvfree(ptr);
3613 	}
3614 }
3615 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3616 
3617 static unsigned long
3618 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3619 {
3620 	int cpu;
3621 	unsigned long count = 0;
3622 
3623 	/* Snapshot count of all CPUs */
3624 	for_each_possible_cpu(cpu) {
3625 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3626 
3627 		count += READ_ONCE(krcp->count);
3628 	}
3629 
3630 	return count;
3631 }
3632 
3633 static unsigned long
3634 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3635 {
3636 	int cpu, freed = 0;
3637 	unsigned long flags;
3638 
3639 	for_each_possible_cpu(cpu) {
3640 		int count;
3641 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3642 
3643 		count = krcp->count;
3644 		raw_spin_lock_irqsave(&krcp->lock, flags);
3645 		if (krcp->monitor_todo)
3646 			kfree_rcu_drain_unlock(krcp, flags);
3647 		else
3648 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3649 
3650 		sc->nr_to_scan -= count;
3651 		freed += count;
3652 
3653 		if (sc->nr_to_scan <= 0)
3654 			break;
3655 	}
3656 
3657 	return freed == 0 ? SHRINK_STOP : freed;
3658 }
3659 
3660 static struct shrinker kfree_rcu_shrinker = {
3661 	.count_objects = kfree_rcu_shrink_count,
3662 	.scan_objects = kfree_rcu_shrink_scan,
3663 	.batch = 0,
3664 	.seeks = DEFAULT_SEEKS,
3665 };
3666 
3667 void __init kfree_rcu_scheduler_running(void)
3668 {
3669 	int cpu;
3670 	unsigned long flags;
3671 
3672 	for_each_possible_cpu(cpu) {
3673 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3674 
3675 		raw_spin_lock_irqsave(&krcp->lock, flags);
3676 		if (!krcp->head || krcp->monitor_todo) {
3677 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3678 			continue;
3679 		}
3680 		krcp->monitor_todo = true;
3681 		schedule_delayed_work_on(cpu, &krcp->monitor_work,
3682 					 KFREE_DRAIN_JIFFIES);
3683 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3684 	}
3685 }
3686 
3687 /*
3688  * During early boot, any blocking grace-period wait automatically
3689  * implies a grace period.  Later on, this is never the case for PREEMPTION.
3690  *
3691  * However, because a context switch is a grace period for !PREEMPTION, any
3692  * blocking grace-period wait automatically implies a grace period if
3693  * there is only one CPU online at any point time during execution of
3694  * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3695  * occasionally incorrectly indicate that there are multiple CPUs online
3696  * when there was in fact only one the whole time, as this just adds some
3697  * overhead: RCU still operates correctly.
3698  */
3699 static int rcu_blocking_is_gp(void)
3700 {
3701 	int ret;
3702 
3703 	if (IS_ENABLED(CONFIG_PREEMPTION))
3704 		return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3705 	might_sleep();  /* Check for RCU read-side critical section. */
3706 	preempt_disable();
3707 	/*
3708 	 * If the rcu_state.n_online_cpus counter is equal to one,
3709 	 * there is only one CPU, and that CPU sees all prior accesses
3710 	 * made by any CPU that was online at the time of its access.
3711 	 * Furthermore, if this counter is equal to one, its value cannot
3712 	 * change until after the preempt_enable() below.
3713 	 *
3714 	 * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3715 	 * all later CPUs (both this one and any that come online later
3716 	 * on) are guaranteed to see all accesses prior to this point
3717 	 * in the code, without the need for additional memory barriers.
3718 	 * Those memory barriers are provided by CPU-hotplug code.
3719 	 */
3720 	ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3721 	preempt_enable();
3722 	return ret;
3723 }
3724 
3725 /**
3726  * synchronize_rcu - wait until a grace period has elapsed.
3727  *
3728  * Control will return to the caller some time after a full grace
3729  * period has elapsed, in other words after all currently executing RCU
3730  * read-side critical sections have completed.  Note, however, that
3731  * upon return from synchronize_rcu(), the caller might well be executing
3732  * concurrently with new RCU read-side critical sections that began while
3733  * synchronize_rcu() was waiting.  RCU read-side critical sections are
3734  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
3735  * In addition, regions of code across which interrupts, preemption, or
3736  * softirqs have been disabled also serve as RCU read-side critical
3737  * sections.  This includes hardware interrupt handlers, softirq handlers,
3738  * and NMI handlers.
3739  *
3740  * Note that this guarantee implies further memory-ordering guarantees.
3741  * On systems with more than one CPU, when synchronize_rcu() returns,
3742  * each CPU is guaranteed to have executed a full memory barrier since
3743  * the end of its last RCU read-side critical section whose beginning
3744  * preceded the call to synchronize_rcu().  In addition, each CPU having
3745  * an RCU read-side critical section that extends beyond the return from
3746  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3747  * after the beginning of synchronize_rcu() and before the beginning of
3748  * that RCU read-side critical section.  Note that these guarantees include
3749  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3750  * that are executing in the kernel.
3751  *
3752  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3753  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3754  * to have executed a full memory barrier during the execution of
3755  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3756  * again only if the system has more than one CPU).
3757  */
3758 void synchronize_rcu(void)
3759 {
3760 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3761 			 lock_is_held(&rcu_lock_map) ||
3762 			 lock_is_held(&rcu_sched_lock_map),
3763 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3764 	if (rcu_blocking_is_gp())
3765 		return;  // Context allows vacuous grace periods.
3766 	if (rcu_gp_is_expedited())
3767 		synchronize_rcu_expedited();
3768 	else
3769 		wait_rcu_gp(call_rcu);
3770 }
3771 EXPORT_SYMBOL_GPL(synchronize_rcu);
3772 
3773 /**
3774  * get_state_synchronize_rcu - Snapshot current RCU state
3775  *
3776  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3777  * to determine whether or not a full grace period has elapsed in the
3778  * meantime.
3779  */
3780 unsigned long get_state_synchronize_rcu(void)
3781 {
3782 	/*
3783 	 * Any prior manipulation of RCU-protected data must happen
3784 	 * before the load from ->gp_seq.
3785 	 */
3786 	smp_mb();  /* ^^^ */
3787 	return rcu_seq_snap(&rcu_state.gp_seq);
3788 }
3789 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3790 
3791 /**
3792  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3793  *
3794  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3795  *
3796  * If a full RCU grace period has elapsed since the earlier call to
3797  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3798  * synchronize_rcu() to wait for a full grace period.
3799  *
3800  * Yes, this function does not take counter wrap into account.  But
3801  * counter wrap is harmless.  If the counter wraps, we have waited for
3802  * more than 2 billion grace periods (and way more on a 64-bit system!),
3803  * so waiting for one additional grace period should be just fine.
3804  */
3805 void cond_synchronize_rcu(unsigned long oldstate)
3806 {
3807 	if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
3808 		synchronize_rcu();
3809 	else
3810 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3811 }
3812 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3813 
3814 /*
3815  * Check to see if there is any immediate RCU-related work to be done by
3816  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3817  * in order of increasing expense: checks that can be carried out against
3818  * CPU-local state are performed first.  However, we must check for CPU
3819  * stalls first, else we might not get a chance.
3820  */
3821 static int rcu_pending(int user)
3822 {
3823 	bool gp_in_progress;
3824 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3825 	struct rcu_node *rnp = rdp->mynode;
3826 
3827 	lockdep_assert_irqs_disabled();
3828 
3829 	/* Check for CPU stalls, if enabled. */
3830 	check_cpu_stall(rdp);
3831 
3832 	/* Does this CPU need a deferred NOCB wakeup? */
3833 	if (rcu_nocb_need_deferred_wakeup(rdp))
3834 		return 1;
3835 
3836 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3837 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3838 		return 0;
3839 
3840 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3841 	gp_in_progress = rcu_gp_in_progress();
3842 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3843 		return 1;
3844 
3845 	/* Does this CPU have callbacks ready to invoke? */
3846 	if (!rcu_segcblist_is_offloaded(&rdp->cblist) &&
3847 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3848 		return 1;
3849 
3850 	/* Has RCU gone idle with this CPU needing another grace period? */
3851 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3852 	    !rcu_segcblist_is_offloaded(&rdp->cblist) &&
3853 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3854 		return 1;
3855 
3856 	/* Have RCU grace period completed or started?  */
3857 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3858 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3859 		return 1;
3860 
3861 	/* nothing to do */
3862 	return 0;
3863 }
3864 
3865 /*
3866  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3867  * the compiler is expected to optimize this away.
3868  */
3869 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3870 {
3871 	trace_rcu_barrier(rcu_state.name, s, cpu,
3872 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3873 }
3874 
3875 /*
3876  * RCU callback function for rcu_barrier().  If we are last, wake
3877  * up the task executing rcu_barrier().
3878  *
3879  * Note that the value of rcu_state.barrier_sequence must be captured
3880  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3881  * other CPUs might count the value down to zero before this CPU gets
3882  * around to invoking rcu_barrier_trace(), which might result in bogus
3883  * data from the next instance of rcu_barrier().
3884  */
3885 static void rcu_barrier_callback(struct rcu_head *rhp)
3886 {
3887 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3888 
3889 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3890 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3891 		complete(&rcu_state.barrier_completion);
3892 	} else {
3893 		rcu_barrier_trace(TPS("CB"), -1, s);
3894 	}
3895 }
3896 
3897 /*
3898  * Called with preemption disabled, and from cross-cpu IRQ context.
3899  */
3900 static void rcu_barrier_func(void *cpu_in)
3901 {
3902 	uintptr_t cpu = (uintptr_t)cpu_in;
3903 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3904 
3905 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3906 	rdp->barrier_head.func = rcu_barrier_callback;
3907 	debug_rcu_head_queue(&rdp->barrier_head);
3908 	rcu_nocb_lock(rdp);
3909 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
3910 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3911 		atomic_inc(&rcu_state.barrier_cpu_count);
3912 	} else {
3913 		debug_rcu_head_unqueue(&rdp->barrier_head);
3914 		rcu_barrier_trace(TPS("IRQNQ"), -1,
3915 				  rcu_state.barrier_sequence);
3916 	}
3917 	rcu_nocb_unlock(rdp);
3918 }
3919 
3920 /**
3921  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3922  *
3923  * Note that this primitive does not necessarily wait for an RCU grace period
3924  * to complete.  For example, if there are no RCU callbacks queued anywhere
3925  * in the system, then rcu_barrier() is within its rights to return
3926  * immediately, without waiting for anything, much less an RCU grace period.
3927  */
3928 void rcu_barrier(void)
3929 {
3930 	uintptr_t cpu;
3931 	struct rcu_data *rdp;
3932 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3933 
3934 	rcu_barrier_trace(TPS("Begin"), -1, s);
3935 
3936 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3937 	mutex_lock(&rcu_state.barrier_mutex);
3938 
3939 	/* Did someone else do our work for us? */
3940 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3941 		rcu_barrier_trace(TPS("EarlyExit"), -1,
3942 				  rcu_state.barrier_sequence);
3943 		smp_mb(); /* caller's subsequent code after above check. */
3944 		mutex_unlock(&rcu_state.barrier_mutex);
3945 		return;
3946 	}
3947 
3948 	/* Mark the start of the barrier operation. */
3949 	rcu_seq_start(&rcu_state.barrier_sequence);
3950 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3951 
3952 	/*
3953 	 * Initialize the count to two rather than to zero in order
3954 	 * to avoid a too-soon return to zero in case of an immediate
3955 	 * invocation of the just-enqueued callback (or preemption of
3956 	 * this task).  Exclude CPU-hotplug operations to ensure that no
3957 	 * offline non-offloaded CPU has callbacks queued.
3958 	 */
3959 	init_completion(&rcu_state.barrier_completion);
3960 	atomic_set(&rcu_state.barrier_cpu_count, 2);
3961 	get_online_cpus();
3962 
3963 	/*
3964 	 * Force each CPU with callbacks to register a new callback.
3965 	 * When that callback is invoked, we will know that all of the
3966 	 * corresponding CPU's preceding callbacks have been invoked.
3967 	 */
3968 	for_each_possible_cpu(cpu) {
3969 		rdp = per_cpu_ptr(&rcu_data, cpu);
3970 		if (cpu_is_offline(cpu) &&
3971 		    !rcu_segcblist_is_offloaded(&rdp->cblist))
3972 			continue;
3973 		if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
3974 			rcu_barrier_trace(TPS("OnlineQ"), cpu,
3975 					  rcu_state.barrier_sequence);
3976 			smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
3977 		} else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
3978 			   cpu_is_offline(cpu)) {
3979 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
3980 					  rcu_state.barrier_sequence);
3981 			local_irq_disable();
3982 			rcu_barrier_func((void *)cpu);
3983 			local_irq_enable();
3984 		} else if (cpu_is_offline(cpu)) {
3985 			rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
3986 					  rcu_state.barrier_sequence);
3987 		} else {
3988 			rcu_barrier_trace(TPS("OnlineNQ"), cpu,
3989 					  rcu_state.barrier_sequence);
3990 		}
3991 	}
3992 	put_online_cpus();
3993 
3994 	/*
3995 	 * Now that we have an rcu_barrier_callback() callback on each
3996 	 * CPU, and thus each counted, remove the initial count.
3997 	 */
3998 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3999 		complete(&rcu_state.barrier_completion);
4000 
4001 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4002 	wait_for_completion(&rcu_state.barrier_completion);
4003 
4004 	/* Mark the end of the barrier operation. */
4005 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4006 	rcu_seq_end(&rcu_state.barrier_sequence);
4007 
4008 	/* Other rcu_barrier() invocations can now safely proceed. */
4009 	mutex_unlock(&rcu_state.barrier_mutex);
4010 }
4011 EXPORT_SYMBOL_GPL(rcu_barrier);
4012 
4013 /*
4014  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4015  * first CPU in a given leaf rcu_node structure coming online.  The caller
4016  * must hold the corresponding leaf rcu_node ->lock with interrrupts
4017  * disabled.
4018  */
4019 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4020 {
4021 	long mask;
4022 	long oldmask;
4023 	struct rcu_node *rnp = rnp_leaf;
4024 
4025 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4026 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4027 	for (;;) {
4028 		mask = rnp->grpmask;
4029 		rnp = rnp->parent;
4030 		if (rnp == NULL)
4031 			return;
4032 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4033 		oldmask = rnp->qsmaskinit;
4034 		rnp->qsmaskinit |= mask;
4035 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4036 		if (oldmask)
4037 			return;
4038 	}
4039 }
4040 
4041 /*
4042  * Do boot-time initialization of a CPU's per-CPU RCU data.
4043  */
4044 static void __init
4045 rcu_boot_init_percpu_data(int cpu)
4046 {
4047 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4048 
4049 	/* Set up local state, ensuring consistent view of global state. */
4050 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4051 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4052 	WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4053 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4054 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4055 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4056 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4057 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4058 	rdp->cpu = cpu;
4059 	rcu_boot_init_nocb_percpu_data(rdp);
4060 }
4061 
4062 /*
4063  * Invoked early in the CPU-online process, when pretty much all services
4064  * are available.  The incoming CPU is not present.
4065  *
4066  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4067  * offline event can be happening at a given time.  Note also that we can
4068  * accept some slop in the rsp->gp_seq access due to the fact that this
4069  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4070  * And any offloaded callbacks are being numbered elsewhere.
4071  */
4072 int rcutree_prepare_cpu(unsigned int cpu)
4073 {
4074 	unsigned long flags;
4075 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4076 	struct rcu_node *rnp = rcu_get_root();
4077 
4078 	/* Set up local state, ensuring consistent view of global state. */
4079 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4080 	rdp->qlen_last_fqs_check = 0;
4081 	rdp->n_force_qs_snap = rcu_state.n_force_qs;
4082 	rdp->blimit = blimit;
4083 	rdp->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4084 	rcu_dynticks_eqs_online();
4085 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4086 	/*
4087 	 * Lock in case the CB/GP kthreads are still around handling
4088 	 * old callbacks (longer term we should flush all callbacks
4089 	 * before completing CPU offline)
4090 	 */
4091 	rcu_nocb_lock(rdp);
4092 	if (rcu_segcblist_empty(&rdp->cblist)) /* No early-boot CBs? */
4093 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4094 	rcu_nocb_unlock(rdp);
4095 
4096 	/*
4097 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4098 	 * propagation up the rcu_node tree will happen at the beginning
4099 	 * of the next grace period.
4100 	 */
4101 	rnp = rdp->mynode;
4102 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4103 	rdp->beenonline = true;	 /* We have now been online. */
4104 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4105 	rdp->gp_seq_needed = rdp->gp_seq;
4106 	rdp->cpu_no_qs.b.norm = true;
4107 	rdp->core_needs_qs = false;
4108 	rdp->rcu_iw_pending = false;
4109 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4110 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4111 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4112 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4113 	rcu_prepare_kthreads(cpu);
4114 	rcu_spawn_cpu_nocb_kthread(cpu);
4115 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4116 
4117 	return 0;
4118 }
4119 
4120 /*
4121  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4122  */
4123 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4124 {
4125 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4126 
4127 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4128 }
4129 
4130 /*
4131  * Near the end of the CPU-online process.  Pretty much all services
4132  * enabled, and the CPU is now very much alive.
4133  */
4134 int rcutree_online_cpu(unsigned int cpu)
4135 {
4136 	unsigned long flags;
4137 	struct rcu_data *rdp;
4138 	struct rcu_node *rnp;
4139 
4140 	rdp = per_cpu_ptr(&rcu_data, cpu);
4141 	rnp = rdp->mynode;
4142 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4143 	rnp->ffmask |= rdp->grpmask;
4144 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4145 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4146 		return 0; /* Too early in boot for scheduler work. */
4147 	sync_sched_exp_online_cleanup(cpu);
4148 	rcutree_affinity_setting(cpu, -1);
4149 
4150 	// Stop-machine done, so allow nohz_full to disable tick.
4151 	tick_dep_clear(TICK_DEP_BIT_RCU);
4152 	return 0;
4153 }
4154 
4155 /*
4156  * Near the beginning of the process.  The CPU is still very much alive
4157  * with pretty much all services enabled.
4158  */
4159 int rcutree_offline_cpu(unsigned int cpu)
4160 {
4161 	unsigned long flags;
4162 	struct rcu_data *rdp;
4163 	struct rcu_node *rnp;
4164 
4165 	rdp = per_cpu_ptr(&rcu_data, cpu);
4166 	rnp = rdp->mynode;
4167 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4168 	rnp->ffmask &= ~rdp->grpmask;
4169 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4170 
4171 	rcutree_affinity_setting(cpu, cpu);
4172 
4173 	// nohz_full CPUs need the tick for stop-machine to work quickly
4174 	tick_dep_set(TICK_DEP_BIT_RCU);
4175 	return 0;
4176 }
4177 
4178 /*
4179  * Mark the specified CPU as being online so that subsequent grace periods
4180  * (both expedited and normal) will wait on it.  Note that this means that
4181  * incoming CPUs are not allowed to use RCU read-side critical sections
4182  * until this function is called.  Failing to observe this restriction
4183  * will result in lockdep splats.
4184  *
4185  * Note that this function is special in that it is invoked directly
4186  * from the incoming CPU rather than from the cpuhp_step mechanism.
4187  * This is because this function must be invoked at a precise location.
4188  */
4189 void rcu_cpu_starting(unsigned int cpu)
4190 {
4191 	unsigned long flags;
4192 	unsigned long mask;
4193 	struct rcu_data *rdp;
4194 	struct rcu_node *rnp;
4195 	bool newcpu;
4196 
4197 	rdp = per_cpu_ptr(&rcu_data, cpu);
4198 	if (rdp->cpu_started)
4199 		return;
4200 	rdp->cpu_started = true;
4201 
4202 	rnp = rdp->mynode;
4203 	mask = rdp->grpmask;
4204 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4205 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4206 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4207 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4208 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4209 	newcpu = !(rnp->expmaskinitnext & mask);
4210 	rnp->expmaskinitnext |= mask;
4211 	/* Allow lockless access for expedited grace periods. */
4212 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4213 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4214 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4215 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4216 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4217 
4218 	/* An incoming CPU should never be blocking a grace period. */
4219 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4220 		rcu_disable_urgency_upon_qs(rdp);
4221 		/* Report QS -after- changing ->qsmaskinitnext! */
4222 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4223 	} else {
4224 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4225 	}
4226 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4227 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4228 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4229 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4230 }
4231 
4232 /*
4233  * The outgoing function has no further need of RCU, so remove it from
4234  * the rcu_node tree's ->qsmaskinitnext bit masks.
4235  *
4236  * Note that this function is special in that it is invoked directly
4237  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4238  * This is because this function must be invoked at a precise location.
4239  */
4240 void rcu_report_dead(unsigned int cpu)
4241 {
4242 	unsigned long flags;
4243 	unsigned long mask;
4244 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4245 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4246 
4247 	// Do any dangling deferred wakeups.
4248 	do_nocb_deferred_wakeup(rdp);
4249 
4250 	/* QS for any half-done expedited grace period. */
4251 	preempt_disable();
4252 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4253 	preempt_enable();
4254 	rcu_preempt_deferred_qs(current);
4255 
4256 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4257 	mask = rdp->grpmask;
4258 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4259 	WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4260 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4261 	raw_spin_lock(&rcu_state.ofl_lock);
4262 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4263 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4264 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4265 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4266 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4267 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4268 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4269 	}
4270 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4271 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4272 	raw_spin_unlock(&rcu_state.ofl_lock);
4273 	smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4274 	WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4275 	WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4276 
4277 	rdp->cpu_started = false;
4278 }
4279 
4280 #ifdef CONFIG_HOTPLUG_CPU
4281 /*
4282  * The outgoing CPU has just passed through the dying-idle state, and we
4283  * are being invoked from the CPU that was IPIed to continue the offline
4284  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4285  */
4286 void rcutree_migrate_callbacks(int cpu)
4287 {
4288 	unsigned long flags;
4289 	struct rcu_data *my_rdp;
4290 	struct rcu_node *my_rnp;
4291 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4292 	bool needwake;
4293 
4294 	if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
4295 	    rcu_segcblist_empty(&rdp->cblist))
4296 		return;  /* No callbacks to migrate. */
4297 
4298 	local_irq_save(flags);
4299 	my_rdp = this_cpu_ptr(&rcu_data);
4300 	my_rnp = my_rdp->mynode;
4301 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4302 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4303 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4304 	/* Leverage recent GPs and set GP for new callbacks. */
4305 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4306 		   rcu_advance_cbs(my_rnp, my_rdp);
4307 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4308 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4309 	rcu_segcblist_disable(&rdp->cblist);
4310 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4311 		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4312 	if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
4313 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4314 		__call_rcu_nocb_wake(my_rdp, true, flags);
4315 	} else {
4316 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4317 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4318 	}
4319 	if (needwake)
4320 		rcu_gp_kthread_wake();
4321 	lockdep_assert_irqs_enabled();
4322 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4323 		  !rcu_segcblist_empty(&rdp->cblist),
4324 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4325 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4326 		  rcu_segcblist_first_cb(&rdp->cblist));
4327 }
4328 #endif
4329 
4330 /*
4331  * On non-huge systems, use expedited RCU grace periods to make suspend
4332  * and hibernation run faster.
4333  */
4334 static int rcu_pm_notify(struct notifier_block *self,
4335 			 unsigned long action, void *hcpu)
4336 {
4337 	switch (action) {
4338 	case PM_HIBERNATION_PREPARE:
4339 	case PM_SUSPEND_PREPARE:
4340 		rcu_expedite_gp();
4341 		break;
4342 	case PM_POST_HIBERNATION:
4343 	case PM_POST_SUSPEND:
4344 		rcu_unexpedite_gp();
4345 		break;
4346 	default:
4347 		break;
4348 	}
4349 	return NOTIFY_OK;
4350 }
4351 
4352 /*
4353  * Spawn the kthreads that handle RCU's grace periods.
4354  */
4355 static int __init rcu_spawn_gp_kthread(void)
4356 {
4357 	unsigned long flags;
4358 	int kthread_prio_in = kthread_prio;
4359 	struct rcu_node *rnp;
4360 	struct sched_param sp;
4361 	struct task_struct *t;
4362 
4363 	/* Force priority into range. */
4364 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4365 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4366 		kthread_prio = 2;
4367 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4368 		kthread_prio = 1;
4369 	else if (kthread_prio < 0)
4370 		kthread_prio = 0;
4371 	else if (kthread_prio > 99)
4372 		kthread_prio = 99;
4373 
4374 	if (kthread_prio != kthread_prio_in)
4375 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4376 			 kthread_prio, kthread_prio_in);
4377 
4378 	rcu_scheduler_fully_active = 1;
4379 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4380 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4381 		return 0;
4382 	if (kthread_prio) {
4383 		sp.sched_priority = kthread_prio;
4384 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4385 	}
4386 	rnp = rcu_get_root();
4387 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4388 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4389 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4390 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4391 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4392 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4393 	wake_up_process(t);
4394 	rcu_spawn_nocb_kthreads();
4395 	rcu_spawn_boost_kthreads();
4396 	return 0;
4397 }
4398 early_initcall(rcu_spawn_gp_kthread);
4399 
4400 /*
4401  * This function is invoked towards the end of the scheduler's
4402  * initialization process.  Before this is called, the idle task might
4403  * contain synchronous grace-period primitives (during which time, this idle
4404  * task is booting the system, and such primitives are no-ops).  After this
4405  * function is called, any synchronous grace-period primitives are run as
4406  * expedited, with the requesting task driving the grace period forward.
4407  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4408  * runtime RCU functionality.
4409  */
4410 void rcu_scheduler_starting(void)
4411 {
4412 	WARN_ON(num_online_cpus() != 1);
4413 	WARN_ON(nr_context_switches() > 0);
4414 	rcu_test_sync_prims();
4415 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4416 	rcu_test_sync_prims();
4417 }
4418 
4419 /*
4420  * Helper function for rcu_init() that initializes the rcu_state structure.
4421  */
4422 static void __init rcu_init_one(void)
4423 {
4424 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4425 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4426 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4427 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4428 
4429 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4430 	int cpustride = 1;
4431 	int i;
4432 	int j;
4433 	struct rcu_node *rnp;
4434 
4435 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4436 
4437 	/* Silence gcc 4.8 false positive about array index out of range. */
4438 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4439 		panic("rcu_init_one: rcu_num_lvls out of range");
4440 
4441 	/* Initialize the level-tracking arrays. */
4442 
4443 	for (i = 1; i < rcu_num_lvls; i++)
4444 		rcu_state.level[i] =
4445 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4446 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4447 
4448 	/* Initialize the elements themselves, starting from the leaves. */
4449 
4450 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4451 		cpustride *= levelspread[i];
4452 		rnp = rcu_state.level[i];
4453 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4454 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4455 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4456 						   &rcu_node_class[i], buf[i]);
4457 			raw_spin_lock_init(&rnp->fqslock);
4458 			lockdep_set_class_and_name(&rnp->fqslock,
4459 						   &rcu_fqs_class[i], fqs[i]);
4460 			rnp->gp_seq = rcu_state.gp_seq;
4461 			rnp->gp_seq_needed = rcu_state.gp_seq;
4462 			rnp->completedqs = rcu_state.gp_seq;
4463 			rnp->qsmask = 0;
4464 			rnp->qsmaskinit = 0;
4465 			rnp->grplo = j * cpustride;
4466 			rnp->grphi = (j + 1) * cpustride - 1;
4467 			if (rnp->grphi >= nr_cpu_ids)
4468 				rnp->grphi = nr_cpu_ids - 1;
4469 			if (i == 0) {
4470 				rnp->grpnum = 0;
4471 				rnp->grpmask = 0;
4472 				rnp->parent = NULL;
4473 			} else {
4474 				rnp->grpnum = j % levelspread[i - 1];
4475 				rnp->grpmask = BIT(rnp->grpnum);
4476 				rnp->parent = rcu_state.level[i - 1] +
4477 					      j / levelspread[i - 1];
4478 			}
4479 			rnp->level = i;
4480 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4481 			rcu_init_one_nocb(rnp);
4482 			init_waitqueue_head(&rnp->exp_wq[0]);
4483 			init_waitqueue_head(&rnp->exp_wq[1]);
4484 			init_waitqueue_head(&rnp->exp_wq[2]);
4485 			init_waitqueue_head(&rnp->exp_wq[3]);
4486 			spin_lock_init(&rnp->exp_lock);
4487 		}
4488 	}
4489 
4490 	init_swait_queue_head(&rcu_state.gp_wq);
4491 	init_swait_queue_head(&rcu_state.expedited_wq);
4492 	rnp = rcu_first_leaf_node();
4493 	for_each_possible_cpu(i) {
4494 		while (i > rnp->grphi)
4495 			rnp++;
4496 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4497 		rcu_boot_init_percpu_data(i);
4498 	}
4499 }
4500 
4501 /*
4502  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4503  * replace the definitions in tree.h because those are needed to size
4504  * the ->node array in the rcu_state structure.
4505  */
4506 static void __init rcu_init_geometry(void)
4507 {
4508 	ulong d;
4509 	int i;
4510 	int rcu_capacity[RCU_NUM_LVLS];
4511 
4512 	/*
4513 	 * Initialize any unspecified boot parameters.
4514 	 * The default values of jiffies_till_first_fqs and
4515 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4516 	 * value, which is a function of HZ, then adding one for each
4517 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4518 	 */
4519 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4520 	if (jiffies_till_first_fqs == ULONG_MAX)
4521 		jiffies_till_first_fqs = d;
4522 	if (jiffies_till_next_fqs == ULONG_MAX)
4523 		jiffies_till_next_fqs = d;
4524 	adjust_jiffies_till_sched_qs();
4525 
4526 	/* If the compile-time values are accurate, just leave. */
4527 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4528 	    nr_cpu_ids == NR_CPUS)
4529 		return;
4530 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4531 		rcu_fanout_leaf, nr_cpu_ids);
4532 
4533 	/*
4534 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4535 	 * and cannot exceed the number of bits in the rcu_node masks.
4536 	 * Complain and fall back to the compile-time values if this
4537 	 * limit is exceeded.
4538 	 */
4539 	if (rcu_fanout_leaf < 2 ||
4540 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4541 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4542 		WARN_ON(1);
4543 		return;
4544 	}
4545 
4546 	/*
4547 	 * Compute number of nodes that can be handled an rcu_node tree
4548 	 * with the given number of levels.
4549 	 */
4550 	rcu_capacity[0] = rcu_fanout_leaf;
4551 	for (i = 1; i < RCU_NUM_LVLS; i++)
4552 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4553 
4554 	/*
4555 	 * The tree must be able to accommodate the configured number of CPUs.
4556 	 * If this limit is exceeded, fall back to the compile-time values.
4557 	 */
4558 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4559 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4560 		WARN_ON(1);
4561 		return;
4562 	}
4563 
4564 	/* Calculate the number of levels in the tree. */
4565 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4566 	}
4567 	rcu_num_lvls = i + 1;
4568 
4569 	/* Calculate the number of rcu_nodes at each level of the tree. */
4570 	for (i = 0; i < rcu_num_lvls; i++) {
4571 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4572 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4573 	}
4574 
4575 	/* Calculate the total number of rcu_node structures. */
4576 	rcu_num_nodes = 0;
4577 	for (i = 0; i < rcu_num_lvls; i++)
4578 		rcu_num_nodes += num_rcu_lvl[i];
4579 }
4580 
4581 /*
4582  * Dump out the structure of the rcu_node combining tree associated
4583  * with the rcu_state structure.
4584  */
4585 static void __init rcu_dump_rcu_node_tree(void)
4586 {
4587 	int level = 0;
4588 	struct rcu_node *rnp;
4589 
4590 	pr_info("rcu_node tree layout dump\n");
4591 	pr_info(" ");
4592 	rcu_for_each_node_breadth_first(rnp) {
4593 		if (rnp->level != level) {
4594 			pr_cont("\n");
4595 			pr_info(" ");
4596 			level = rnp->level;
4597 		}
4598 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4599 	}
4600 	pr_cont("\n");
4601 }
4602 
4603 struct workqueue_struct *rcu_gp_wq;
4604 struct workqueue_struct *rcu_par_gp_wq;
4605 
4606 static void __init kfree_rcu_batch_init(void)
4607 {
4608 	int cpu;
4609 	int i;
4610 
4611 	for_each_possible_cpu(cpu) {
4612 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4613 
4614 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4615 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4616 			krcp->krw_arr[i].krcp = krcp;
4617 		}
4618 
4619 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4620 		INIT_WORK(&krcp->page_cache_work, fill_page_cache_func);
4621 		krcp->initialized = true;
4622 	}
4623 	if (register_shrinker(&kfree_rcu_shrinker))
4624 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4625 }
4626 
4627 void __init rcu_init(void)
4628 {
4629 	int cpu;
4630 
4631 	rcu_early_boot_tests();
4632 
4633 	kfree_rcu_batch_init();
4634 	rcu_bootup_announce();
4635 	rcu_init_geometry();
4636 	rcu_init_one();
4637 	if (dump_tree)
4638 		rcu_dump_rcu_node_tree();
4639 	if (use_softirq)
4640 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4641 
4642 	/*
4643 	 * We don't need protection against CPU-hotplug here because
4644 	 * this is called early in boot, before either interrupts
4645 	 * or the scheduler are operational.
4646 	 */
4647 	pm_notifier(rcu_pm_notify, 0);
4648 	for_each_online_cpu(cpu) {
4649 		rcutree_prepare_cpu(cpu);
4650 		rcu_cpu_starting(cpu);
4651 		rcutree_online_cpu(cpu);
4652 	}
4653 
4654 	/* Create workqueue for expedited GPs and for Tree SRCU. */
4655 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4656 	WARN_ON(!rcu_gp_wq);
4657 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4658 	WARN_ON(!rcu_par_gp_wq);
4659 	srcu_init();
4660 
4661 	/* Fill in default value for rcutree.qovld boot parameter. */
4662 	/* -After- the rcu_node ->lock fields are initialized! */
4663 	if (qovld < 0)
4664 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4665 	else
4666 		qovld_calc = qovld;
4667 }
4668 
4669 #include "tree_stall.h"
4670 #include "tree_exp.h"
4671 #include "tree_plugin.h"
4672