xref: /openbmc/linux/kernel/rcu/tree.c (revision aaa746ad)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/panic.h>
36 #include <linux/panic_notifier.h>
37 #include <linux/percpu.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <linux/mutex.h>
41 #include <linux/time.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/wait.h>
44 #include <linux/kthread.h>
45 #include <uapi/linux/sched/types.h>
46 #include <linux/prefetch.h>
47 #include <linux/delay.h>
48 #include <linux/random.h>
49 #include <linux/trace_events.h>
50 #include <linux/suspend.h>
51 #include <linux/ftrace.h>
52 #include <linux/tick.h>
53 #include <linux/sysrq.h>
54 #include <linux/kprobes.h>
55 #include <linux/gfp.h>
56 #include <linux/oom.h>
57 #include <linux/smpboot.h>
58 #include <linux/jiffies.h>
59 #include <linux/slab.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/sched/clock.h>
62 #include <linux/vmalloc.h>
63 #include <linux/mm.h>
64 #include <linux/kasan.h>
65 #include <linux/context_tracking.h>
66 #include "../time/tick-internal.h"
67 
68 #include "tree.h"
69 #include "rcu.h"
70 
71 #ifdef MODULE_PARAM_PREFIX
72 #undef MODULE_PARAM_PREFIX
73 #endif
74 #define MODULE_PARAM_PREFIX "rcutree."
75 
76 /* Data structures. */
77 
78 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
79 	.gpwrap = true,
80 #ifdef CONFIG_RCU_NOCB_CPU
81 	.cblist.flags = SEGCBLIST_RCU_CORE,
82 #endif
83 };
84 static struct rcu_state rcu_state = {
85 	.level = { &rcu_state.node[0] },
86 	.gp_state = RCU_GP_IDLE,
87 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
88 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
89 	.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
90 	.name = RCU_NAME,
91 	.abbr = RCU_ABBR,
92 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
93 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
94 	.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
95 };
96 
97 /* Dump rcu_node combining tree at boot to verify correct setup. */
98 static bool dump_tree;
99 module_param(dump_tree, bool, 0444);
100 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
101 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
102 #ifndef CONFIG_PREEMPT_RT
103 module_param(use_softirq, bool, 0444);
104 #endif
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 
116 /*
117  * The rcu_scheduler_active variable is initialized to the value
118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
120  * RCU can assume that there is but one task, allowing RCU to (for example)
121  * optimize synchronize_rcu() to a simple barrier().  When this variable
122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123  * to detect real grace periods.  This variable is also used to suppress
124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126  * is fully initialized, including all of its kthreads having been spawned.
127  */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 
131 /*
132  * The rcu_scheduler_fully_active variable transitions from zero to one
133  * during the early_initcall() processing, which is after the scheduler
134  * is capable of creating new tasks.  So RCU processing (for example,
135  * creating tasks for RCU priority boosting) must be delayed until after
136  * rcu_scheduler_fully_active transitions from zero to one.  We also
137  * currently delay invocation of any RCU callbacks until after this point.
138  *
139  * It might later prove better for people registering RCU callbacks during
140  * early boot to take responsibility for these callbacks, but one step at
141  * a time.
142  */
143 static int rcu_scheduler_fully_active __read_mostly;
144 
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 			      unsigned long gps, unsigned long flags);
147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 static void invoke_rcu_core(void);
151 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void sync_sched_exp_online_cleanup(int cpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
154 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
155 
156 /*
157  * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
158  * real-time priority(enabling/disabling) is controlled by
159  * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
160  */
161 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
162 module_param(kthread_prio, int, 0444);
163 
164 /* Delay in jiffies for grace-period initialization delays, debug only. */
165 
166 static int gp_preinit_delay;
167 module_param(gp_preinit_delay, int, 0444);
168 static int gp_init_delay;
169 module_param(gp_init_delay, int, 0444);
170 static int gp_cleanup_delay;
171 module_param(gp_cleanup_delay, int, 0444);
172 
173 // Add delay to rcu_read_unlock() for strict grace periods.
174 static int rcu_unlock_delay;
175 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
176 module_param(rcu_unlock_delay, int, 0444);
177 #endif
178 
179 /*
180  * This rcu parameter is runtime-read-only. It reflects
181  * a minimum allowed number of objects which can be cached
182  * per-CPU. Object size is equal to one page. This value
183  * can be changed at boot time.
184  */
185 static int rcu_min_cached_objs = 5;
186 module_param(rcu_min_cached_objs, int, 0444);
187 
188 // A page shrinker can ask for pages to be freed to make them
189 // available for other parts of the system. This usually happens
190 // under low memory conditions, and in that case we should also
191 // defer page-cache filling for a short time period.
192 //
193 // The default value is 5 seconds, which is long enough to reduce
194 // interference with the shrinker while it asks other systems to
195 // drain their caches.
196 static int rcu_delay_page_cache_fill_msec = 5000;
197 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
198 
199 /* Retrieve RCU kthreads priority for rcutorture */
200 int rcu_get_gp_kthreads_prio(void)
201 {
202 	return kthread_prio;
203 }
204 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
205 
206 /*
207  * Number of grace periods between delays, normalized by the duration of
208  * the delay.  The longer the delay, the more the grace periods between
209  * each delay.  The reason for this normalization is that it means that,
210  * for non-zero delays, the overall slowdown of grace periods is constant
211  * regardless of the duration of the delay.  This arrangement balances
212  * the need for long delays to increase some race probabilities with the
213  * need for fast grace periods to increase other race probabilities.
214  */
215 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
216 
217 /*
218  * Compute the mask of online CPUs for the specified rcu_node structure.
219  * This will not be stable unless the rcu_node structure's ->lock is
220  * held, but the bit corresponding to the current CPU will be stable
221  * in most contexts.
222  */
223 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
224 {
225 	return READ_ONCE(rnp->qsmaskinitnext);
226 }
227 
228 /*
229  * Is the CPU corresponding to the specified rcu_data structure online
230  * from RCU's perspective?  This perspective is given by that structure's
231  * ->qsmaskinitnext field rather than by the global cpu_online_mask.
232  */
233 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
234 {
235 	return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
236 }
237 
238 /*
239  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
240  * permit this function to be invoked without holding the root rcu_node
241  * structure's ->lock, but of course results can be subject to change.
242  */
243 static int rcu_gp_in_progress(void)
244 {
245 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
246 }
247 
248 /*
249  * Return the number of callbacks queued on the specified CPU.
250  * Handles both the nocbs and normal cases.
251  */
252 static long rcu_get_n_cbs_cpu(int cpu)
253 {
254 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
255 
256 	if (rcu_segcblist_is_enabled(&rdp->cblist))
257 		return rcu_segcblist_n_cbs(&rdp->cblist);
258 	return 0;
259 }
260 
261 void rcu_softirq_qs(void)
262 {
263 	rcu_qs();
264 	rcu_preempt_deferred_qs(current);
265 	rcu_tasks_qs(current, false);
266 }
267 
268 /*
269  * Reset the current CPU's ->dynticks counter to indicate that the
270  * newly onlined CPU is no longer in an extended quiescent state.
271  * This will either leave the counter unchanged, or increment it
272  * to the next non-quiescent value.
273  *
274  * The non-atomic test/increment sequence works because the upper bits
275  * of the ->dynticks counter are manipulated only by the corresponding CPU,
276  * or when the corresponding CPU is offline.
277  */
278 static void rcu_dynticks_eqs_online(void)
279 {
280 	if (ct_dynticks() & RCU_DYNTICKS_IDX)
281 		return;
282 	ct_state_inc(RCU_DYNTICKS_IDX);
283 }
284 
285 /*
286  * Snapshot the ->dynticks counter with full ordering so as to allow
287  * stable comparison of this counter with past and future snapshots.
288  */
289 static int rcu_dynticks_snap(int cpu)
290 {
291 	smp_mb();  // Fundamental RCU ordering guarantee.
292 	return ct_dynticks_cpu_acquire(cpu);
293 }
294 
295 /*
296  * Return true if the snapshot returned from rcu_dynticks_snap()
297  * indicates that RCU is in an extended quiescent state.
298  */
299 static bool rcu_dynticks_in_eqs(int snap)
300 {
301 	return !(snap & RCU_DYNTICKS_IDX);
302 }
303 
304 /*
305  * Return true if the CPU corresponding to the specified rcu_data
306  * structure has spent some time in an extended quiescent state since
307  * rcu_dynticks_snap() returned the specified snapshot.
308  */
309 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
310 {
311 	return snap != rcu_dynticks_snap(rdp->cpu);
312 }
313 
314 /*
315  * Return true if the referenced integer is zero while the specified
316  * CPU remains within a single extended quiescent state.
317  */
318 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
319 {
320 	int snap;
321 
322 	// If not quiescent, force back to earlier extended quiescent state.
323 	snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
324 	smp_rmb(); // Order ->dynticks and *vp reads.
325 	if (READ_ONCE(*vp))
326 		return false;  // Non-zero, so report failure;
327 	smp_rmb(); // Order *vp read and ->dynticks re-read.
328 
329 	// If still in the same extended quiescent state, we are good!
330 	return snap == ct_dynticks_cpu(cpu);
331 }
332 
333 /*
334  * Let the RCU core know that this CPU has gone through the scheduler,
335  * which is a quiescent state.  This is called when the need for a
336  * quiescent state is urgent, so we burn an atomic operation and full
337  * memory barriers to let the RCU core know about it, regardless of what
338  * this CPU might (or might not) do in the near future.
339  *
340  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
341  *
342  * The caller must have disabled interrupts and must not be idle.
343  */
344 notrace void rcu_momentary_dyntick_idle(void)
345 {
346 	int seq;
347 
348 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
349 	seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
350 	/* It is illegal to call this from idle state. */
351 	WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
352 	rcu_preempt_deferred_qs(current);
353 }
354 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
355 
356 /**
357  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
358  *
359  * If the current CPU is idle and running at a first-level (not nested)
360  * interrupt, or directly, from idle, return true.
361  *
362  * The caller must have at least disabled IRQs.
363  */
364 static int rcu_is_cpu_rrupt_from_idle(void)
365 {
366 	long nesting;
367 
368 	/*
369 	 * Usually called from the tick; but also used from smp_function_call()
370 	 * for expedited grace periods. This latter can result in running from
371 	 * the idle task, instead of an actual IPI.
372 	 */
373 	lockdep_assert_irqs_disabled();
374 
375 	/* Check for counter underflows */
376 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
377 			 "RCU dynticks_nesting counter underflow!");
378 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
379 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
380 
381 	/* Are we at first interrupt nesting level? */
382 	nesting = ct_dynticks_nmi_nesting();
383 	if (nesting > 1)
384 		return false;
385 
386 	/*
387 	 * If we're not in an interrupt, we must be in the idle task!
388 	 */
389 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
390 
391 	/* Does CPU appear to be idle from an RCU standpoint? */
392 	return ct_dynticks_nesting() == 0;
393 }
394 
395 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
396 				// Maximum callbacks per rcu_do_batch ...
397 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
398 static long blimit = DEFAULT_RCU_BLIMIT;
399 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
400 static long qhimark = DEFAULT_RCU_QHIMARK;
401 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
402 static long qlowmark = DEFAULT_RCU_QLOMARK;
403 #define DEFAULT_RCU_QOVLD_MULT 2
404 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
405 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
406 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
407 
408 module_param(blimit, long, 0444);
409 module_param(qhimark, long, 0444);
410 module_param(qlowmark, long, 0444);
411 module_param(qovld, long, 0444);
412 
413 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
414 static ulong jiffies_till_next_fqs = ULONG_MAX;
415 static bool rcu_kick_kthreads;
416 static int rcu_divisor = 7;
417 module_param(rcu_divisor, int, 0644);
418 
419 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
420 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
421 module_param(rcu_resched_ns, long, 0644);
422 
423 /*
424  * How long the grace period must be before we start recruiting
425  * quiescent-state help from rcu_note_context_switch().
426  */
427 static ulong jiffies_till_sched_qs = ULONG_MAX;
428 module_param(jiffies_till_sched_qs, ulong, 0444);
429 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
430 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
431 
432 /*
433  * Make sure that we give the grace-period kthread time to detect any
434  * idle CPUs before taking active measures to force quiescent states.
435  * However, don't go below 100 milliseconds, adjusted upwards for really
436  * large systems.
437  */
438 static void adjust_jiffies_till_sched_qs(void)
439 {
440 	unsigned long j;
441 
442 	/* If jiffies_till_sched_qs was specified, respect the request. */
443 	if (jiffies_till_sched_qs != ULONG_MAX) {
444 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
445 		return;
446 	}
447 	/* Otherwise, set to third fqs scan, but bound below on large system. */
448 	j = READ_ONCE(jiffies_till_first_fqs) +
449 		      2 * READ_ONCE(jiffies_till_next_fqs);
450 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
451 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
452 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
453 	WRITE_ONCE(jiffies_to_sched_qs, j);
454 }
455 
456 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
457 {
458 	ulong j;
459 	int ret = kstrtoul(val, 0, &j);
460 
461 	if (!ret) {
462 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
463 		adjust_jiffies_till_sched_qs();
464 	}
465 	return ret;
466 }
467 
468 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
469 {
470 	ulong j;
471 	int ret = kstrtoul(val, 0, &j);
472 
473 	if (!ret) {
474 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
475 		adjust_jiffies_till_sched_qs();
476 	}
477 	return ret;
478 }
479 
480 static const struct kernel_param_ops first_fqs_jiffies_ops = {
481 	.set = param_set_first_fqs_jiffies,
482 	.get = param_get_ulong,
483 };
484 
485 static const struct kernel_param_ops next_fqs_jiffies_ops = {
486 	.set = param_set_next_fqs_jiffies,
487 	.get = param_get_ulong,
488 };
489 
490 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
491 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
492 module_param(rcu_kick_kthreads, bool, 0644);
493 
494 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
495 static int rcu_pending(int user);
496 
497 /*
498  * Return the number of RCU GPs completed thus far for debug & stats.
499  */
500 unsigned long rcu_get_gp_seq(void)
501 {
502 	return READ_ONCE(rcu_state.gp_seq);
503 }
504 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
505 
506 /*
507  * Return the number of RCU expedited batches completed thus far for
508  * debug & stats.  Odd numbers mean that a batch is in progress, even
509  * numbers mean idle.  The value returned will thus be roughly double
510  * the cumulative batches since boot.
511  */
512 unsigned long rcu_exp_batches_completed(void)
513 {
514 	return rcu_state.expedited_sequence;
515 }
516 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
517 
518 /*
519  * Return the root node of the rcu_state structure.
520  */
521 static struct rcu_node *rcu_get_root(void)
522 {
523 	return &rcu_state.node[0];
524 }
525 
526 /*
527  * Send along grace-period-related data for rcutorture diagnostics.
528  */
529 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
530 			    unsigned long *gp_seq)
531 {
532 	switch (test_type) {
533 	case RCU_FLAVOR:
534 		*flags = READ_ONCE(rcu_state.gp_flags);
535 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
536 		break;
537 	default:
538 		break;
539 	}
540 }
541 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
542 
543 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
544 /*
545  * An empty function that will trigger a reschedule on
546  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
547  */
548 static void late_wakeup_func(struct irq_work *work)
549 {
550 }
551 
552 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
553 	IRQ_WORK_INIT(late_wakeup_func);
554 
555 /*
556  * If either:
557  *
558  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
559  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
560  *
561  * In these cases the late RCU wake ups aren't supported in the resched loops and our
562  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
563  * get re-enabled again.
564  */
565 noinstr void rcu_irq_work_resched(void)
566 {
567 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
568 
569 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
570 		return;
571 
572 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
573 		return;
574 
575 	instrumentation_begin();
576 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
577 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
578 	}
579 	instrumentation_end();
580 }
581 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
582 
583 #ifdef CONFIG_PROVE_RCU
584 /**
585  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
586  */
587 void rcu_irq_exit_check_preempt(void)
588 {
589 	lockdep_assert_irqs_disabled();
590 
591 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
592 			 "RCU dynticks_nesting counter underflow/zero!");
593 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
594 			 DYNTICK_IRQ_NONIDLE,
595 			 "Bad RCU  dynticks_nmi_nesting counter\n");
596 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
597 			 "RCU in extended quiescent state!");
598 }
599 #endif /* #ifdef CONFIG_PROVE_RCU */
600 
601 #ifdef CONFIG_NO_HZ_FULL
602 /**
603  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
604  *
605  * The scheduler tick is not normally enabled when CPUs enter the kernel
606  * from nohz_full userspace execution.  After all, nohz_full userspace
607  * execution is an RCU quiescent state and the time executing in the kernel
608  * is quite short.  Except of course when it isn't.  And it is not hard to
609  * cause a large system to spend tens of seconds or even minutes looping
610  * in the kernel, which can cause a number of problems, include RCU CPU
611  * stall warnings.
612  *
613  * Therefore, if a nohz_full CPU fails to report a quiescent state
614  * in a timely manner, the RCU grace-period kthread sets that CPU's
615  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
616  * exception will invoke this function, which will turn on the scheduler
617  * tick, which will enable RCU to detect that CPU's quiescent states,
618  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
619  * The tick will be disabled once a quiescent state is reported for
620  * this CPU.
621  *
622  * Of course, in carefully tuned systems, there might never be an
623  * interrupt or exception.  In that case, the RCU grace-period kthread
624  * will eventually cause one to happen.  However, in less carefully
625  * controlled environments, this function allows RCU to get what it
626  * needs without creating otherwise useless interruptions.
627  */
628 void __rcu_irq_enter_check_tick(void)
629 {
630 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
631 
632 	// If we're here from NMI there's nothing to do.
633 	if (in_nmi())
634 		return;
635 
636 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
637 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
638 
639 	if (!tick_nohz_full_cpu(rdp->cpu) ||
640 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
641 	    READ_ONCE(rdp->rcu_forced_tick)) {
642 		// RCU doesn't need nohz_full help from this CPU, or it is
643 		// already getting that help.
644 		return;
645 	}
646 
647 	// We get here only when not in an extended quiescent state and
648 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
649 	// already watching and (2) The fact that we are in an interrupt
650 	// handler and that the rcu_node lock is an irq-disabled lock
651 	// prevents self-deadlock.  So we can safely recheck under the lock.
652 	// Note that the nohz_full state currently cannot change.
653 	raw_spin_lock_rcu_node(rdp->mynode);
654 	if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
655 		// A nohz_full CPU is in the kernel and RCU needs a
656 		// quiescent state.  Turn on the tick!
657 		WRITE_ONCE(rdp->rcu_forced_tick, true);
658 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
659 	}
660 	raw_spin_unlock_rcu_node(rdp->mynode);
661 }
662 #endif /* CONFIG_NO_HZ_FULL */
663 
664 /*
665  * Check to see if any future non-offloaded RCU-related work will need
666  * to be done by the current CPU, even if none need be done immediately,
667  * returning 1 if so.  This function is part of the RCU implementation;
668  * it is -not- an exported member of the RCU API.  This is used by
669  * the idle-entry code to figure out whether it is safe to disable the
670  * scheduler-clock interrupt.
671  *
672  * Just check whether or not this CPU has non-offloaded RCU callbacks
673  * queued.
674  */
675 int rcu_needs_cpu(void)
676 {
677 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
678 		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
679 }
680 
681 /*
682  * If any sort of urgency was applied to the current CPU (for example,
683  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
684  * to get to a quiescent state, disable it.
685  */
686 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
687 {
688 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
689 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
690 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
691 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
692 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
693 		WRITE_ONCE(rdp->rcu_forced_tick, false);
694 	}
695 }
696 
697 /**
698  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
699  *
700  * Return true if RCU is watching the running CPU, which means that this
701  * CPU can safely enter RCU read-side critical sections.  In other words,
702  * if the current CPU is not in its idle loop or is in an interrupt or
703  * NMI handler, return true.
704  *
705  * Make notrace because it can be called by the internal functions of
706  * ftrace, and making this notrace removes unnecessary recursion calls.
707  */
708 notrace bool rcu_is_watching(void)
709 {
710 	bool ret;
711 
712 	preempt_disable_notrace();
713 	ret = !rcu_dynticks_curr_cpu_in_eqs();
714 	preempt_enable_notrace();
715 	return ret;
716 }
717 EXPORT_SYMBOL_GPL(rcu_is_watching);
718 
719 /*
720  * If a holdout task is actually running, request an urgent quiescent
721  * state from its CPU.  This is unsynchronized, so migrations can cause
722  * the request to go to the wrong CPU.  Which is OK, all that will happen
723  * is that the CPU's next context switch will be a bit slower and next
724  * time around this task will generate another request.
725  */
726 void rcu_request_urgent_qs_task(struct task_struct *t)
727 {
728 	int cpu;
729 
730 	barrier();
731 	cpu = task_cpu(t);
732 	if (!task_curr(t))
733 		return; /* This task is not running on that CPU. */
734 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
735 }
736 
737 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
738 
739 /*
740  * Is the current CPU online as far as RCU is concerned?
741  *
742  * Disable preemption to avoid false positives that could otherwise
743  * happen due to the current CPU number being sampled, this task being
744  * preempted, its old CPU being taken offline, resuming on some other CPU,
745  * then determining that its old CPU is now offline.
746  *
747  * Disable checking if in an NMI handler because we cannot safely
748  * report errors from NMI handlers anyway.  In addition, it is OK to use
749  * RCU on an offline processor during initial boot, hence the check for
750  * rcu_scheduler_fully_active.
751  */
752 bool rcu_lockdep_current_cpu_online(void)
753 {
754 	struct rcu_data *rdp;
755 	bool ret = false;
756 
757 	if (in_nmi() || !rcu_scheduler_fully_active)
758 		return true;
759 	preempt_disable_notrace();
760 	rdp = this_cpu_ptr(&rcu_data);
761 	/*
762 	 * Strictly, we care here about the case where the current CPU is
763 	 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
764 	 * not being up to date. So arch_spin_is_locked() might have a
765 	 * false positive if it's held by some *other* CPU, but that's
766 	 * OK because that just means a false *negative* on the warning.
767 	 */
768 	if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
769 		ret = true;
770 	preempt_enable_notrace();
771 	return ret;
772 }
773 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
774 
775 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
776 
777 /*
778  * When trying to report a quiescent state on behalf of some other CPU,
779  * it is our responsibility to check for and handle potential overflow
780  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
781  * After all, the CPU might be in deep idle state, and thus executing no
782  * code whatsoever.
783  */
784 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
785 {
786 	raw_lockdep_assert_held_rcu_node(rnp);
787 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
788 			 rnp->gp_seq))
789 		WRITE_ONCE(rdp->gpwrap, true);
790 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
791 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
792 }
793 
794 /*
795  * Snapshot the specified CPU's dynticks counter so that we can later
796  * credit them with an implicit quiescent state.  Return 1 if this CPU
797  * is in dynticks idle mode, which is an extended quiescent state.
798  */
799 static int dyntick_save_progress_counter(struct rcu_data *rdp)
800 {
801 	rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
802 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
803 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
804 		rcu_gpnum_ovf(rdp->mynode, rdp);
805 		return 1;
806 	}
807 	return 0;
808 }
809 
810 /*
811  * Return true if the specified CPU has passed through a quiescent
812  * state by virtue of being in or having passed through an dynticks
813  * idle state since the last call to dyntick_save_progress_counter()
814  * for this same CPU, or by virtue of having been offline.
815  */
816 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
817 {
818 	unsigned long jtsq;
819 	struct rcu_node *rnp = rdp->mynode;
820 
821 	/*
822 	 * If the CPU passed through or entered a dynticks idle phase with
823 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
824 	 * already acknowledged the request to pass through a quiescent
825 	 * state.  Either way, that CPU cannot possibly be in an RCU
826 	 * read-side critical section that started before the beginning
827 	 * of the current RCU grace period.
828 	 */
829 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
830 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
831 		rcu_gpnum_ovf(rnp, rdp);
832 		return 1;
833 	}
834 
835 	/*
836 	 * Complain if a CPU that is considered to be offline from RCU's
837 	 * perspective has not yet reported a quiescent state.  After all,
838 	 * the offline CPU should have reported a quiescent state during
839 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
840 	 * if it ran concurrently with either the CPU going offline or the
841 	 * last task on a leaf rcu_node structure exiting its RCU read-side
842 	 * critical section while all CPUs corresponding to that structure
843 	 * are offline.  This added warning detects bugs in any of these
844 	 * code paths.
845 	 *
846 	 * The rcu_node structure's ->lock is held here, which excludes
847 	 * the relevant portions the CPU-hotplug code, the grace-period
848 	 * initialization code, and the rcu_read_unlock() code paths.
849 	 *
850 	 * For more detail, please refer to the "Hotplug CPU" section
851 	 * of RCU's Requirements documentation.
852 	 */
853 	if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
854 		struct rcu_node *rnp1;
855 
856 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
857 			__func__, rnp->grplo, rnp->grphi, rnp->level,
858 			(long)rnp->gp_seq, (long)rnp->completedqs);
859 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
860 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
861 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
862 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
863 			__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
864 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
865 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
866 		return 1; /* Break things loose after complaining. */
867 	}
868 
869 	/*
870 	 * A CPU running for an extended time within the kernel can
871 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
872 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
873 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
874 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
875 	 * variable are safe because the assignments are repeated if this
876 	 * CPU failed to pass through a quiescent state.  This code
877 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
878 	 * is set way high.
879 	 */
880 	jtsq = READ_ONCE(jiffies_to_sched_qs);
881 	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
882 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
883 	     time_after(jiffies, rcu_state.jiffies_resched) ||
884 	     rcu_state.cbovld)) {
885 		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
886 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
887 		smp_store_release(&rdp->rcu_urgent_qs, true);
888 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
889 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
890 	}
891 
892 	/*
893 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
894 	 * The above code handles this, but only for straight cond_resched().
895 	 * And some in-kernel loops check need_resched() before calling
896 	 * cond_resched(), which defeats the above code for CPUs that are
897 	 * running in-kernel with scheduling-clock interrupts disabled.
898 	 * So hit them over the head with the resched_cpu() hammer!
899 	 */
900 	if (tick_nohz_full_cpu(rdp->cpu) &&
901 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
902 	     rcu_state.cbovld)) {
903 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
904 		resched_cpu(rdp->cpu);
905 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
906 	}
907 
908 	/*
909 	 * If more than halfway to RCU CPU stall-warning time, invoke
910 	 * resched_cpu() more frequently to try to loosen things up a bit.
911 	 * Also check to see if the CPU is getting hammered with interrupts,
912 	 * but only once per grace period, just to keep the IPIs down to
913 	 * a dull roar.
914 	 */
915 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
916 		if (time_after(jiffies,
917 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
918 			resched_cpu(rdp->cpu);
919 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
920 		}
921 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
922 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
923 		    (rnp->ffmask & rdp->grpmask)) {
924 			rdp->rcu_iw_pending = true;
925 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
926 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
927 		}
928 	}
929 
930 	return 0;
931 }
932 
933 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
934 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
935 			      unsigned long gp_seq_req, const char *s)
936 {
937 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
938 				      gp_seq_req, rnp->level,
939 				      rnp->grplo, rnp->grphi, s);
940 }
941 
942 /*
943  * rcu_start_this_gp - Request the start of a particular grace period
944  * @rnp_start: The leaf node of the CPU from which to start.
945  * @rdp: The rcu_data corresponding to the CPU from which to start.
946  * @gp_seq_req: The gp_seq of the grace period to start.
947  *
948  * Start the specified grace period, as needed to handle newly arrived
949  * callbacks.  The required future grace periods are recorded in each
950  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
951  * is reason to awaken the grace-period kthread.
952  *
953  * The caller must hold the specified rcu_node structure's ->lock, which
954  * is why the caller is responsible for waking the grace-period kthread.
955  *
956  * Returns true if the GP thread needs to be awakened else false.
957  */
958 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
959 			      unsigned long gp_seq_req)
960 {
961 	bool ret = false;
962 	struct rcu_node *rnp;
963 
964 	/*
965 	 * Use funnel locking to either acquire the root rcu_node
966 	 * structure's lock or bail out if the need for this grace period
967 	 * has already been recorded -- or if that grace period has in
968 	 * fact already started.  If there is already a grace period in
969 	 * progress in a non-leaf node, no recording is needed because the
970 	 * end of the grace period will scan the leaf rcu_node structures.
971 	 * Note that rnp_start->lock must not be released.
972 	 */
973 	raw_lockdep_assert_held_rcu_node(rnp_start);
974 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
975 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
976 		if (rnp != rnp_start)
977 			raw_spin_lock_rcu_node(rnp);
978 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
979 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
980 		    (rnp != rnp_start &&
981 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
982 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
983 					  TPS("Prestarted"));
984 			goto unlock_out;
985 		}
986 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
987 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
988 			/*
989 			 * We just marked the leaf or internal node, and a
990 			 * grace period is in progress, which means that
991 			 * rcu_gp_cleanup() will see the marking.  Bail to
992 			 * reduce contention.
993 			 */
994 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
995 					  TPS("Startedleaf"));
996 			goto unlock_out;
997 		}
998 		if (rnp != rnp_start && rnp->parent != NULL)
999 			raw_spin_unlock_rcu_node(rnp);
1000 		if (!rnp->parent)
1001 			break;  /* At root, and perhaps also leaf. */
1002 	}
1003 
1004 	/* If GP already in progress, just leave, otherwise start one. */
1005 	if (rcu_gp_in_progress()) {
1006 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1007 		goto unlock_out;
1008 	}
1009 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1010 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1011 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1012 	if (!READ_ONCE(rcu_state.gp_kthread)) {
1013 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1014 		goto unlock_out;
1015 	}
1016 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1017 	ret = true;  /* Caller must wake GP kthread. */
1018 unlock_out:
1019 	/* Push furthest requested GP to leaf node and rcu_data structure. */
1020 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1021 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1022 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1023 	}
1024 	if (rnp != rnp_start)
1025 		raw_spin_unlock_rcu_node(rnp);
1026 	return ret;
1027 }
1028 
1029 /*
1030  * Clean up any old requests for the just-ended grace period.  Also return
1031  * whether any additional grace periods have been requested.
1032  */
1033 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1034 {
1035 	bool needmore;
1036 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1037 
1038 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1039 	if (!needmore)
1040 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1041 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1042 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1043 	return needmore;
1044 }
1045 
1046 /*
1047  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1048  * interrupt or softirq handler, in which case we just might immediately
1049  * sleep upon return, resulting in a grace-period hang), and don't bother
1050  * awakening when there is nothing for the grace-period kthread to do
1051  * (as in several CPUs raced to awaken, we lost), and finally don't try
1052  * to awaken a kthread that has not yet been created.  If all those checks
1053  * are passed, track some debug information and awaken.
1054  *
1055  * So why do the self-wakeup when in an interrupt or softirq handler
1056  * in the grace-period kthread's context?  Because the kthread might have
1057  * been interrupted just as it was going to sleep, and just after the final
1058  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1059  * is required, and is therefore supplied.
1060  */
1061 static void rcu_gp_kthread_wake(void)
1062 {
1063 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1064 
1065 	if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1066 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1067 		return;
1068 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1069 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1070 	swake_up_one(&rcu_state.gp_wq);
1071 }
1072 
1073 /*
1074  * If there is room, assign a ->gp_seq number to any callbacks on this
1075  * CPU that have not already been assigned.  Also accelerate any callbacks
1076  * that were previously assigned a ->gp_seq number that has since proven
1077  * to be too conservative, which can happen if callbacks get assigned a
1078  * ->gp_seq number while RCU is idle, but with reference to a non-root
1079  * rcu_node structure.  This function is idempotent, so it does not hurt
1080  * to call it repeatedly.  Returns an flag saying that we should awaken
1081  * the RCU grace-period kthread.
1082  *
1083  * The caller must hold rnp->lock with interrupts disabled.
1084  */
1085 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1086 {
1087 	unsigned long gp_seq_req;
1088 	bool ret = false;
1089 
1090 	rcu_lockdep_assert_cblist_protected(rdp);
1091 	raw_lockdep_assert_held_rcu_node(rnp);
1092 
1093 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1094 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1095 		return false;
1096 
1097 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1098 
1099 	/*
1100 	 * Callbacks are often registered with incomplete grace-period
1101 	 * information.  Something about the fact that getting exact
1102 	 * information requires acquiring a global lock...  RCU therefore
1103 	 * makes a conservative estimate of the grace period number at which
1104 	 * a given callback will become ready to invoke.	The following
1105 	 * code checks this estimate and improves it when possible, thus
1106 	 * accelerating callback invocation to an earlier grace-period
1107 	 * number.
1108 	 */
1109 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1110 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1111 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1112 
1113 	/* Trace depending on how much we were able to accelerate. */
1114 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1115 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1116 	else
1117 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1118 
1119 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1120 
1121 	return ret;
1122 }
1123 
1124 /*
1125  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1126  * rcu_node structure's ->lock be held.  It consults the cached value
1127  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1128  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1129  * while holding the leaf rcu_node structure's ->lock.
1130  */
1131 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1132 					struct rcu_data *rdp)
1133 {
1134 	unsigned long c;
1135 	bool needwake;
1136 
1137 	rcu_lockdep_assert_cblist_protected(rdp);
1138 	c = rcu_seq_snap(&rcu_state.gp_seq);
1139 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1140 		/* Old request still live, so mark recent callbacks. */
1141 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1142 		return;
1143 	}
1144 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1145 	needwake = rcu_accelerate_cbs(rnp, rdp);
1146 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1147 	if (needwake)
1148 		rcu_gp_kthread_wake();
1149 }
1150 
1151 /*
1152  * Move any callbacks whose grace period has completed to the
1153  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1154  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1155  * sublist.  This function is idempotent, so it does not hurt to
1156  * invoke it repeatedly.  As long as it is not invoked -too- often...
1157  * Returns true if the RCU grace-period kthread needs to be awakened.
1158  *
1159  * The caller must hold rnp->lock with interrupts disabled.
1160  */
1161 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1162 {
1163 	rcu_lockdep_assert_cblist_protected(rdp);
1164 	raw_lockdep_assert_held_rcu_node(rnp);
1165 
1166 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1167 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1168 		return false;
1169 
1170 	/*
1171 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1172 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1173 	 */
1174 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1175 
1176 	/* Classify any remaining callbacks. */
1177 	return rcu_accelerate_cbs(rnp, rdp);
1178 }
1179 
1180 /*
1181  * Move and classify callbacks, but only if doing so won't require
1182  * that the RCU grace-period kthread be awakened.
1183  */
1184 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1185 						  struct rcu_data *rdp)
1186 {
1187 	rcu_lockdep_assert_cblist_protected(rdp);
1188 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1189 		return;
1190 	// The grace period cannot end while we hold the rcu_node lock.
1191 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1192 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1193 	raw_spin_unlock_rcu_node(rnp);
1194 }
1195 
1196 /*
1197  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1198  * quiescent state.  This is intended to be invoked when the CPU notices
1199  * a new grace period.
1200  */
1201 static void rcu_strict_gp_check_qs(void)
1202 {
1203 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1204 		rcu_read_lock();
1205 		rcu_read_unlock();
1206 	}
1207 }
1208 
1209 /*
1210  * Update CPU-local rcu_data state to record the beginnings and ends of
1211  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1212  * structure corresponding to the current CPU, and must have irqs disabled.
1213  * Returns true if the grace-period kthread needs to be awakened.
1214  */
1215 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1216 {
1217 	bool ret = false;
1218 	bool need_qs;
1219 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1220 
1221 	raw_lockdep_assert_held_rcu_node(rnp);
1222 
1223 	if (rdp->gp_seq == rnp->gp_seq)
1224 		return false; /* Nothing to do. */
1225 
1226 	/* Handle the ends of any preceding grace periods first. */
1227 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1228 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1229 		if (!offloaded)
1230 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1231 		rdp->core_needs_qs = false;
1232 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1233 	} else {
1234 		if (!offloaded)
1235 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1236 		if (rdp->core_needs_qs)
1237 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1238 	}
1239 
1240 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1241 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1242 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1243 		/*
1244 		 * If the current grace period is waiting for this CPU,
1245 		 * set up to detect a quiescent state, otherwise don't
1246 		 * go looking for one.
1247 		 */
1248 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1249 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1250 		rdp->cpu_no_qs.b.norm = need_qs;
1251 		rdp->core_needs_qs = need_qs;
1252 		zero_cpu_stall_ticks(rdp);
1253 	}
1254 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1255 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1256 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1257 	if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1258 		WRITE_ONCE(rdp->last_sched_clock, jiffies);
1259 	WRITE_ONCE(rdp->gpwrap, false);
1260 	rcu_gpnum_ovf(rnp, rdp);
1261 	return ret;
1262 }
1263 
1264 static void note_gp_changes(struct rcu_data *rdp)
1265 {
1266 	unsigned long flags;
1267 	bool needwake;
1268 	struct rcu_node *rnp;
1269 
1270 	local_irq_save(flags);
1271 	rnp = rdp->mynode;
1272 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1273 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1274 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1275 		local_irq_restore(flags);
1276 		return;
1277 	}
1278 	needwake = __note_gp_changes(rnp, rdp);
1279 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1280 	rcu_strict_gp_check_qs();
1281 	if (needwake)
1282 		rcu_gp_kthread_wake();
1283 }
1284 
1285 static atomic_t *rcu_gp_slow_suppress;
1286 
1287 /* Register a counter to suppress debugging grace-period delays. */
1288 void rcu_gp_slow_register(atomic_t *rgssp)
1289 {
1290 	WARN_ON_ONCE(rcu_gp_slow_suppress);
1291 
1292 	WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1293 }
1294 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1295 
1296 /* Unregister a counter, with NULL for not caring which. */
1297 void rcu_gp_slow_unregister(atomic_t *rgssp)
1298 {
1299 	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
1300 
1301 	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1302 }
1303 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1304 
1305 static bool rcu_gp_slow_is_suppressed(void)
1306 {
1307 	atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1308 
1309 	return rgssp && atomic_read(rgssp);
1310 }
1311 
1312 static void rcu_gp_slow(int delay)
1313 {
1314 	if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1315 	    !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1316 		schedule_timeout_idle(delay);
1317 }
1318 
1319 static unsigned long sleep_duration;
1320 
1321 /* Allow rcutorture to stall the grace-period kthread. */
1322 void rcu_gp_set_torture_wait(int duration)
1323 {
1324 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1325 		WRITE_ONCE(sleep_duration, duration);
1326 }
1327 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1328 
1329 /* Actually implement the aforementioned wait. */
1330 static void rcu_gp_torture_wait(void)
1331 {
1332 	unsigned long duration;
1333 
1334 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1335 		return;
1336 	duration = xchg(&sleep_duration, 0UL);
1337 	if (duration > 0) {
1338 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1339 		schedule_timeout_idle(duration);
1340 		pr_alert("%s: Wait complete\n", __func__);
1341 	}
1342 }
1343 
1344 /*
1345  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1346  * processing.
1347  */
1348 static void rcu_strict_gp_boundary(void *unused)
1349 {
1350 	invoke_rcu_core();
1351 }
1352 
1353 // Has rcu_init() been invoked?  This is used (for example) to determine
1354 // whether spinlocks may be acquired safely.
1355 static bool rcu_init_invoked(void)
1356 {
1357 	return !!rcu_state.n_online_cpus;
1358 }
1359 
1360 // Make the polled API aware of the beginning of a grace period.
1361 static void rcu_poll_gp_seq_start(unsigned long *snap)
1362 {
1363 	struct rcu_node *rnp = rcu_get_root();
1364 
1365 	if (rcu_init_invoked())
1366 		raw_lockdep_assert_held_rcu_node(rnp);
1367 
1368 	// If RCU was idle, note beginning of GP.
1369 	if (!rcu_seq_state(rcu_state.gp_seq_polled))
1370 		rcu_seq_start(&rcu_state.gp_seq_polled);
1371 
1372 	// Either way, record current state.
1373 	*snap = rcu_state.gp_seq_polled;
1374 }
1375 
1376 // Make the polled API aware of the end of a grace period.
1377 static void rcu_poll_gp_seq_end(unsigned long *snap)
1378 {
1379 	struct rcu_node *rnp = rcu_get_root();
1380 
1381 	if (rcu_init_invoked())
1382 		raw_lockdep_assert_held_rcu_node(rnp);
1383 
1384 	// If the previously noted GP is still in effect, record the
1385 	// end of that GP.  Either way, zero counter to avoid counter-wrap
1386 	// problems.
1387 	if (*snap && *snap == rcu_state.gp_seq_polled) {
1388 		rcu_seq_end(&rcu_state.gp_seq_polled);
1389 		rcu_state.gp_seq_polled_snap = 0;
1390 		rcu_state.gp_seq_polled_exp_snap = 0;
1391 	} else {
1392 		*snap = 0;
1393 	}
1394 }
1395 
1396 // Make the polled API aware of the beginning of a grace period, but
1397 // where caller does not hold the root rcu_node structure's lock.
1398 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1399 {
1400 	unsigned long flags;
1401 	struct rcu_node *rnp = rcu_get_root();
1402 
1403 	if (rcu_init_invoked()) {
1404 		lockdep_assert_irqs_enabled();
1405 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1406 	}
1407 	rcu_poll_gp_seq_start(snap);
1408 	if (rcu_init_invoked())
1409 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1410 }
1411 
1412 // Make the polled API aware of the end of a grace period, but where
1413 // caller does not hold the root rcu_node structure's lock.
1414 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1415 {
1416 	unsigned long flags;
1417 	struct rcu_node *rnp = rcu_get_root();
1418 
1419 	if (rcu_init_invoked()) {
1420 		lockdep_assert_irqs_enabled();
1421 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1422 	}
1423 	rcu_poll_gp_seq_end(snap);
1424 	if (rcu_init_invoked())
1425 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1426 }
1427 
1428 /*
1429  * Initialize a new grace period.  Return false if no grace period required.
1430  */
1431 static noinline_for_stack bool rcu_gp_init(void)
1432 {
1433 	unsigned long flags;
1434 	unsigned long oldmask;
1435 	unsigned long mask;
1436 	struct rcu_data *rdp;
1437 	struct rcu_node *rnp = rcu_get_root();
1438 
1439 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1440 	raw_spin_lock_irq_rcu_node(rnp);
1441 	if (!READ_ONCE(rcu_state.gp_flags)) {
1442 		/* Spurious wakeup, tell caller to go back to sleep.  */
1443 		raw_spin_unlock_irq_rcu_node(rnp);
1444 		return false;
1445 	}
1446 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1447 
1448 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1449 		/*
1450 		 * Grace period already in progress, don't start another.
1451 		 * Not supposed to be able to happen.
1452 		 */
1453 		raw_spin_unlock_irq_rcu_node(rnp);
1454 		return false;
1455 	}
1456 
1457 	/* Advance to a new grace period and initialize state. */
1458 	record_gp_stall_check_time();
1459 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1460 	rcu_seq_start(&rcu_state.gp_seq);
1461 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1462 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1463 	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1464 	raw_spin_unlock_irq_rcu_node(rnp);
1465 
1466 	/*
1467 	 * Apply per-leaf buffered online and offline operations to
1468 	 * the rcu_node tree. Note that this new grace period need not
1469 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1470 	 * offlining path, when combined with checks in this function,
1471 	 * will handle CPUs that are currently going offline or that will
1472 	 * go offline later.  Please also refer to "Hotplug CPU" section
1473 	 * of RCU's Requirements documentation.
1474 	 */
1475 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1476 	/* Exclude CPU hotplug operations. */
1477 	rcu_for_each_leaf_node(rnp) {
1478 		local_irq_save(flags);
1479 		arch_spin_lock(&rcu_state.ofl_lock);
1480 		raw_spin_lock_rcu_node(rnp);
1481 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1482 		    !rnp->wait_blkd_tasks) {
1483 			/* Nothing to do on this leaf rcu_node structure. */
1484 			raw_spin_unlock_rcu_node(rnp);
1485 			arch_spin_unlock(&rcu_state.ofl_lock);
1486 			local_irq_restore(flags);
1487 			continue;
1488 		}
1489 
1490 		/* Record old state, apply changes to ->qsmaskinit field. */
1491 		oldmask = rnp->qsmaskinit;
1492 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1493 
1494 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1495 		if (!oldmask != !rnp->qsmaskinit) {
1496 			if (!oldmask) { /* First online CPU for rcu_node. */
1497 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1498 					rcu_init_new_rnp(rnp);
1499 			} else if (rcu_preempt_has_tasks(rnp)) {
1500 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1501 			} else { /* Last offline CPU and can propagate. */
1502 				rcu_cleanup_dead_rnp(rnp);
1503 			}
1504 		}
1505 
1506 		/*
1507 		 * If all waited-on tasks from prior grace period are
1508 		 * done, and if all this rcu_node structure's CPUs are
1509 		 * still offline, propagate up the rcu_node tree and
1510 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1511 		 * rcu_node structure's CPUs has since come back online,
1512 		 * simply clear ->wait_blkd_tasks.
1513 		 */
1514 		if (rnp->wait_blkd_tasks &&
1515 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1516 			rnp->wait_blkd_tasks = false;
1517 			if (!rnp->qsmaskinit)
1518 				rcu_cleanup_dead_rnp(rnp);
1519 		}
1520 
1521 		raw_spin_unlock_rcu_node(rnp);
1522 		arch_spin_unlock(&rcu_state.ofl_lock);
1523 		local_irq_restore(flags);
1524 	}
1525 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1526 
1527 	/*
1528 	 * Set the quiescent-state-needed bits in all the rcu_node
1529 	 * structures for all currently online CPUs in breadth-first
1530 	 * order, starting from the root rcu_node structure, relying on the
1531 	 * layout of the tree within the rcu_state.node[] array.  Note that
1532 	 * other CPUs will access only the leaves of the hierarchy, thus
1533 	 * seeing that no grace period is in progress, at least until the
1534 	 * corresponding leaf node has been initialized.
1535 	 *
1536 	 * The grace period cannot complete until the initialization
1537 	 * process finishes, because this kthread handles both.
1538 	 */
1539 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1540 	rcu_for_each_node_breadth_first(rnp) {
1541 		rcu_gp_slow(gp_init_delay);
1542 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1543 		rdp = this_cpu_ptr(&rcu_data);
1544 		rcu_preempt_check_blocked_tasks(rnp);
1545 		rnp->qsmask = rnp->qsmaskinit;
1546 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1547 		if (rnp == rdp->mynode)
1548 			(void)__note_gp_changes(rnp, rdp);
1549 		rcu_preempt_boost_start_gp(rnp);
1550 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1551 					    rnp->level, rnp->grplo,
1552 					    rnp->grphi, rnp->qsmask);
1553 		/* Quiescent states for tasks on any now-offline CPUs. */
1554 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1555 		rnp->rcu_gp_init_mask = mask;
1556 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1557 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1558 		else
1559 			raw_spin_unlock_irq_rcu_node(rnp);
1560 		cond_resched_tasks_rcu_qs();
1561 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1562 	}
1563 
1564 	// If strict, make all CPUs aware of new grace period.
1565 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1566 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1567 
1568 	return true;
1569 }
1570 
1571 /*
1572  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1573  * time.
1574  */
1575 static bool rcu_gp_fqs_check_wake(int *gfp)
1576 {
1577 	struct rcu_node *rnp = rcu_get_root();
1578 
1579 	// If under overload conditions, force an immediate FQS scan.
1580 	if (*gfp & RCU_GP_FLAG_OVLD)
1581 		return true;
1582 
1583 	// Someone like call_rcu() requested a force-quiescent-state scan.
1584 	*gfp = READ_ONCE(rcu_state.gp_flags);
1585 	if (*gfp & RCU_GP_FLAG_FQS)
1586 		return true;
1587 
1588 	// The current grace period has completed.
1589 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1590 		return true;
1591 
1592 	return false;
1593 }
1594 
1595 /*
1596  * Do one round of quiescent-state forcing.
1597  */
1598 static void rcu_gp_fqs(bool first_time)
1599 {
1600 	struct rcu_node *rnp = rcu_get_root();
1601 
1602 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1603 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1604 	if (first_time) {
1605 		/* Collect dyntick-idle snapshots. */
1606 		force_qs_rnp(dyntick_save_progress_counter);
1607 	} else {
1608 		/* Handle dyntick-idle and offline CPUs. */
1609 		force_qs_rnp(rcu_implicit_dynticks_qs);
1610 	}
1611 	/* Clear flag to prevent immediate re-entry. */
1612 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1613 		raw_spin_lock_irq_rcu_node(rnp);
1614 		WRITE_ONCE(rcu_state.gp_flags,
1615 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1616 		raw_spin_unlock_irq_rcu_node(rnp);
1617 	}
1618 }
1619 
1620 /*
1621  * Loop doing repeated quiescent-state forcing until the grace period ends.
1622  */
1623 static noinline_for_stack void rcu_gp_fqs_loop(void)
1624 {
1625 	bool first_gp_fqs = true;
1626 	int gf = 0;
1627 	unsigned long j;
1628 	int ret;
1629 	struct rcu_node *rnp = rcu_get_root();
1630 
1631 	j = READ_ONCE(jiffies_till_first_fqs);
1632 	if (rcu_state.cbovld)
1633 		gf = RCU_GP_FLAG_OVLD;
1634 	ret = 0;
1635 	for (;;) {
1636 		if (rcu_state.cbovld) {
1637 			j = (j + 2) / 3;
1638 			if (j <= 0)
1639 				j = 1;
1640 		}
1641 		if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1642 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1643 			/*
1644 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1645 			 * update; required for stall checks.
1646 			 */
1647 			smp_wmb();
1648 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1649 				   jiffies + (j ? 3 * j : 2));
1650 		}
1651 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1652 				       TPS("fqswait"));
1653 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1654 		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1655 				 rcu_gp_fqs_check_wake(&gf), j);
1656 		rcu_gp_torture_wait();
1657 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1658 		/* Locking provides needed memory barriers. */
1659 		/*
1660 		 * Exit the loop if the root rcu_node structure indicates that the grace period
1661 		 * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
1662 		 * is required only for single-node rcu_node trees because readers blocking
1663 		 * the current grace period are queued only on leaf rcu_node structures.
1664 		 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1665 		 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1666 		 * the corresponding leaf nodes have passed through their quiescent state.
1667 		 */
1668 		if (!READ_ONCE(rnp->qsmask) &&
1669 		    !rcu_preempt_blocked_readers_cgp(rnp))
1670 			break;
1671 		/* If time for quiescent-state forcing, do it. */
1672 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1673 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1674 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1675 					       TPS("fqsstart"));
1676 			rcu_gp_fqs(first_gp_fqs);
1677 			gf = 0;
1678 			if (first_gp_fqs) {
1679 				first_gp_fqs = false;
1680 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1681 			}
1682 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1683 					       TPS("fqsend"));
1684 			cond_resched_tasks_rcu_qs();
1685 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1686 			ret = 0; /* Force full wait till next FQS. */
1687 			j = READ_ONCE(jiffies_till_next_fqs);
1688 		} else {
1689 			/* Deal with stray signal. */
1690 			cond_resched_tasks_rcu_qs();
1691 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1692 			WARN_ON(signal_pending(current));
1693 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1694 					       TPS("fqswaitsig"));
1695 			ret = 1; /* Keep old FQS timing. */
1696 			j = jiffies;
1697 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1698 				j = 1;
1699 			else
1700 				j = rcu_state.jiffies_force_qs - j;
1701 			gf = 0;
1702 		}
1703 	}
1704 }
1705 
1706 /*
1707  * Clean up after the old grace period.
1708  */
1709 static noinline void rcu_gp_cleanup(void)
1710 {
1711 	int cpu;
1712 	bool needgp = false;
1713 	unsigned long gp_duration;
1714 	unsigned long new_gp_seq;
1715 	bool offloaded;
1716 	struct rcu_data *rdp;
1717 	struct rcu_node *rnp = rcu_get_root();
1718 	struct swait_queue_head *sq;
1719 
1720 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1721 	raw_spin_lock_irq_rcu_node(rnp);
1722 	rcu_state.gp_end = jiffies;
1723 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1724 	if (gp_duration > rcu_state.gp_max)
1725 		rcu_state.gp_max = gp_duration;
1726 
1727 	/*
1728 	 * We know the grace period is complete, but to everyone else
1729 	 * it appears to still be ongoing.  But it is also the case
1730 	 * that to everyone else it looks like there is nothing that
1731 	 * they can do to advance the grace period.  It is therefore
1732 	 * safe for us to drop the lock in order to mark the grace
1733 	 * period as completed in all of the rcu_node structures.
1734 	 */
1735 	rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1736 	raw_spin_unlock_irq_rcu_node(rnp);
1737 
1738 	/*
1739 	 * Propagate new ->gp_seq value to rcu_node structures so that
1740 	 * other CPUs don't have to wait until the start of the next grace
1741 	 * period to process their callbacks.  This also avoids some nasty
1742 	 * RCU grace-period initialization races by forcing the end of
1743 	 * the current grace period to be completely recorded in all of
1744 	 * the rcu_node structures before the beginning of the next grace
1745 	 * period is recorded in any of the rcu_node structures.
1746 	 */
1747 	new_gp_seq = rcu_state.gp_seq;
1748 	rcu_seq_end(&new_gp_seq);
1749 	rcu_for_each_node_breadth_first(rnp) {
1750 		raw_spin_lock_irq_rcu_node(rnp);
1751 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1752 			dump_blkd_tasks(rnp, 10);
1753 		WARN_ON_ONCE(rnp->qsmask);
1754 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1755 		if (!rnp->parent)
1756 			smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1757 		rdp = this_cpu_ptr(&rcu_data);
1758 		if (rnp == rdp->mynode)
1759 			needgp = __note_gp_changes(rnp, rdp) || needgp;
1760 		/* smp_mb() provided by prior unlock-lock pair. */
1761 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
1762 		// Reset overload indication for CPUs no longer overloaded
1763 		if (rcu_is_leaf_node(rnp))
1764 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1765 				rdp = per_cpu_ptr(&rcu_data, cpu);
1766 				check_cb_ovld_locked(rdp, rnp);
1767 			}
1768 		sq = rcu_nocb_gp_get(rnp);
1769 		raw_spin_unlock_irq_rcu_node(rnp);
1770 		rcu_nocb_gp_cleanup(sq);
1771 		cond_resched_tasks_rcu_qs();
1772 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1773 		rcu_gp_slow(gp_cleanup_delay);
1774 	}
1775 	rnp = rcu_get_root();
1776 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1777 
1778 	/* Declare grace period done, trace first to use old GP number. */
1779 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1780 	rcu_seq_end(&rcu_state.gp_seq);
1781 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1782 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1783 	/* Check for GP requests since above loop. */
1784 	rdp = this_cpu_ptr(&rcu_data);
1785 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1786 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1787 				  TPS("CleanupMore"));
1788 		needgp = true;
1789 	}
1790 	/* Advance CBs to reduce false positives below. */
1791 	offloaded = rcu_rdp_is_offloaded(rdp);
1792 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1793 
1794 		// We get here if a grace period was needed (“needgp”)
1795 		// and the above call to rcu_accelerate_cbs() did not set
1796 		// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1797 		// the need for another grace period).  The purpose
1798 		// of the “offloaded” check is to avoid invoking
1799 		// rcu_accelerate_cbs() on an offloaded CPU because we do not
1800 		// hold the ->nocb_lock needed to safely access an offloaded
1801 		// ->cblist.  We do not want to acquire that lock because
1802 		// it can be heavily contended during callback floods.
1803 
1804 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1805 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1806 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1807 	} else {
1808 
1809 		// We get here either if there is no need for an
1810 		// additional grace period or if rcu_accelerate_cbs() has
1811 		// already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
1812 		// So all we need to do is to clear all of the other
1813 		// ->gp_flags bits.
1814 
1815 		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1816 	}
1817 	raw_spin_unlock_irq_rcu_node(rnp);
1818 
1819 	// If strict, make all CPUs aware of the end of the old grace period.
1820 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1821 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1822 }
1823 
1824 /*
1825  * Body of kthread that handles grace periods.
1826  */
1827 static int __noreturn rcu_gp_kthread(void *unused)
1828 {
1829 	rcu_bind_gp_kthread();
1830 	for (;;) {
1831 
1832 		/* Handle grace-period start. */
1833 		for (;;) {
1834 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1835 					       TPS("reqwait"));
1836 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1837 			swait_event_idle_exclusive(rcu_state.gp_wq,
1838 					 READ_ONCE(rcu_state.gp_flags) &
1839 					 RCU_GP_FLAG_INIT);
1840 			rcu_gp_torture_wait();
1841 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1842 			/* Locking provides needed memory barrier. */
1843 			if (rcu_gp_init())
1844 				break;
1845 			cond_resched_tasks_rcu_qs();
1846 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1847 			WARN_ON(signal_pending(current));
1848 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1849 					       TPS("reqwaitsig"));
1850 		}
1851 
1852 		/* Handle quiescent-state forcing. */
1853 		rcu_gp_fqs_loop();
1854 
1855 		/* Handle grace-period end. */
1856 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1857 		rcu_gp_cleanup();
1858 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1859 	}
1860 }
1861 
1862 /*
1863  * Report a full set of quiescent states to the rcu_state data structure.
1864  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1865  * another grace period is required.  Whether we wake the grace-period
1866  * kthread or it awakens itself for the next round of quiescent-state
1867  * forcing, that kthread will clean up after the just-completed grace
1868  * period.  Note that the caller must hold rnp->lock, which is released
1869  * before return.
1870  */
1871 static void rcu_report_qs_rsp(unsigned long flags)
1872 	__releases(rcu_get_root()->lock)
1873 {
1874 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
1875 	WARN_ON_ONCE(!rcu_gp_in_progress());
1876 	WRITE_ONCE(rcu_state.gp_flags,
1877 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1878 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1879 	rcu_gp_kthread_wake();
1880 }
1881 
1882 /*
1883  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1884  * Allows quiescent states for a group of CPUs to be reported at one go
1885  * to the specified rcu_node structure, though all the CPUs in the group
1886  * must be represented by the same rcu_node structure (which need not be a
1887  * leaf rcu_node structure, though it often will be).  The gps parameter
1888  * is the grace-period snapshot, which means that the quiescent states
1889  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1890  * must be held upon entry, and it is released before return.
1891  *
1892  * As a special case, if mask is zero, the bit-already-cleared check is
1893  * disabled.  This allows propagating quiescent state due to resumed tasks
1894  * during grace-period initialization.
1895  */
1896 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1897 			      unsigned long gps, unsigned long flags)
1898 	__releases(rnp->lock)
1899 {
1900 	unsigned long oldmask = 0;
1901 	struct rcu_node *rnp_c;
1902 
1903 	raw_lockdep_assert_held_rcu_node(rnp);
1904 
1905 	/* Walk up the rcu_node hierarchy. */
1906 	for (;;) {
1907 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1908 
1909 			/*
1910 			 * Our bit has already been cleared, or the
1911 			 * relevant grace period is already over, so done.
1912 			 */
1913 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1914 			return;
1915 		}
1916 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1917 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1918 			     rcu_preempt_blocked_readers_cgp(rnp));
1919 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1920 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1921 						 mask, rnp->qsmask, rnp->level,
1922 						 rnp->grplo, rnp->grphi,
1923 						 !!rnp->gp_tasks);
1924 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1925 
1926 			/* Other bits still set at this level, so done. */
1927 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1928 			return;
1929 		}
1930 		rnp->completedqs = rnp->gp_seq;
1931 		mask = rnp->grpmask;
1932 		if (rnp->parent == NULL) {
1933 
1934 			/* No more levels.  Exit loop holding root lock. */
1935 
1936 			break;
1937 		}
1938 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1939 		rnp_c = rnp;
1940 		rnp = rnp->parent;
1941 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1942 		oldmask = READ_ONCE(rnp_c->qsmask);
1943 	}
1944 
1945 	/*
1946 	 * Get here if we are the last CPU to pass through a quiescent
1947 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1948 	 * to clean up and start the next grace period if one is needed.
1949 	 */
1950 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1951 }
1952 
1953 /*
1954  * Record a quiescent state for all tasks that were previously queued
1955  * on the specified rcu_node structure and that were blocking the current
1956  * RCU grace period.  The caller must hold the corresponding rnp->lock with
1957  * irqs disabled, and this lock is released upon return, but irqs remain
1958  * disabled.
1959  */
1960 static void __maybe_unused
1961 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1962 	__releases(rnp->lock)
1963 {
1964 	unsigned long gps;
1965 	unsigned long mask;
1966 	struct rcu_node *rnp_p;
1967 
1968 	raw_lockdep_assert_held_rcu_node(rnp);
1969 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1970 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1971 	    rnp->qsmask != 0) {
1972 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1973 		return;  /* Still need more quiescent states! */
1974 	}
1975 
1976 	rnp->completedqs = rnp->gp_seq;
1977 	rnp_p = rnp->parent;
1978 	if (rnp_p == NULL) {
1979 		/*
1980 		 * Only one rcu_node structure in the tree, so don't
1981 		 * try to report up to its nonexistent parent!
1982 		 */
1983 		rcu_report_qs_rsp(flags);
1984 		return;
1985 	}
1986 
1987 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1988 	gps = rnp->gp_seq;
1989 	mask = rnp->grpmask;
1990 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
1991 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
1992 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1993 }
1994 
1995 /*
1996  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1997  * structure.  This must be called from the specified CPU.
1998  */
1999 static void
2000 rcu_report_qs_rdp(struct rcu_data *rdp)
2001 {
2002 	unsigned long flags;
2003 	unsigned long mask;
2004 	bool needwake = false;
2005 	bool needacc = false;
2006 	struct rcu_node *rnp;
2007 
2008 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2009 	rnp = rdp->mynode;
2010 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2011 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2012 	    rdp->gpwrap) {
2013 
2014 		/*
2015 		 * The grace period in which this quiescent state was
2016 		 * recorded has ended, so don't report it upwards.
2017 		 * We will instead need a new quiescent state that lies
2018 		 * within the current grace period.
2019 		 */
2020 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
2021 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2022 		return;
2023 	}
2024 	mask = rdp->grpmask;
2025 	rdp->core_needs_qs = false;
2026 	if ((rnp->qsmask & mask) == 0) {
2027 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2028 	} else {
2029 		/*
2030 		 * This GP can't end until cpu checks in, so all of our
2031 		 * callbacks can be processed during the next GP.
2032 		 *
2033 		 * NOCB kthreads have their own way to deal with that...
2034 		 */
2035 		if (!rcu_rdp_is_offloaded(rdp)) {
2036 			needwake = rcu_accelerate_cbs(rnp, rdp);
2037 		} else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2038 			/*
2039 			 * ...but NOCB kthreads may miss or delay callbacks acceleration
2040 			 * if in the middle of a (de-)offloading process.
2041 			 */
2042 			needacc = true;
2043 		}
2044 
2045 		rcu_disable_urgency_upon_qs(rdp);
2046 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2047 		/* ^^^ Released rnp->lock */
2048 		if (needwake)
2049 			rcu_gp_kthread_wake();
2050 
2051 		if (needacc) {
2052 			rcu_nocb_lock_irqsave(rdp, flags);
2053 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2054 			rcu_nocb_unlock_irqrestore(rdp, flags);
2055 		}
2056 	}
2057 }
2058 
2059 /*
2060  * Check to see if there is a new grace period of which this CPU
2061  * is not yet aware, and if so, set up local rcu_data state for it.
2062  * Otherwise, see if this CPU has just passed through its first
2063  * quiescent state for this grace period, and record that fact if so.
2064  */
2065 static void
2066 rcu_check_quiescent_state(struct rcu_data *rdp)
2067 {
2068 	/* Check for grace-period ends and beginnings. */
2069 	note_gp_changes(rdp);
2070 
2071 	/*
2072 	 * Does this CPU still need to do its part for current grace period?
2073 	 * If no, return and let the other CPUs do their part as well.
2074 	 */
2075 	if (!rdp->core_needs_qs)
2076 		return;
2077 
2078 	/*
2079 	 * Was there a quiescent state since the beginning of the grace
2080 	 * period? If no, then exit and wait for the next call.
2081 	 */
2082 	if (rdp->cpu_no_qs.b.norm)
2083 		return;
2084 
2085 	/*
2086 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2087 	 * judge of that).
2088 	 */
2089 	rcu_report_qs_rdp(rdp);
2090 }
2091 
2092 /*
2093  * Near the end of the offline process.  Trace the fact that this CPU
2094  * is going offline.
2095  */
2096 int rcutree_dying_cpu(unsigned int cpu)
2097 {
2098 	bool blkd;
2099 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2100 	struct rcu_node *rnp = rdp->mynode;
2101 
2102 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2103 		return 0;
2104 
2105 	blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
2106 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2107 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2108 	return 0;
2109 }
2110 
2111 /*
2112  * All CPUs for the specified rcu_node structure have gone offline,
2113  * and all tasks that were preempted within an RCU read-side critical
2114  * section while running on one of those CPUs have since exited their RCU
2115  * read-side critical section.  Some other CPU is reporting this fact with
2116  * the specified rcu_node structure's ->lock held and interrupts disabled.
2117  * This function therefore goes up the tree of rcu_node structures,
2118  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2119  * the leaf rcu_node structure's ->qsmaskinit field has already been
2120  * updated.
2121  *
2122  * This function does check that the specified rcu_node structure has
2123  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2124  * prematurely.  That said, invoking it after the fact will cost you
2125  * a needless lock acquisition.  So once it has done its work, don't
2126  * invoke it again.
2127  */
2128 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2129 {
2130 	long mask;
2131 	struct rcu_node *rnp = rnp_leaf;
2132 
2133 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
2134 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2135 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2136 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2137 		return;
2138 	for (;;) {
2139 		mask = rnp->grpmask;
2140 		rnp = rnp->parent;
2141 		if (!rnp)
2142 			break;
2143 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2144 		rnp->qsmaskinit &= ~mask;
2145 		/* Between grace periods, so better already be zero! */
2146 		WARN_ON_ONCE(rnp->qsmask);
2147 		if (rnp->qsmaskinit) {
2148 			raw_spin_unlock_rcu_node(rnp);
2149 			/* irqs remain disabled. */
2150 			return;
2151 		}
2152 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2153 	}
2154 }
2155 
2156 /*
2157  * The CPU has been completely removed, and some other CPU is reporting
2158  * this fact from process context.  Do the remainder of the cleanup.
2159  * There can only be one CPU hotplug operation at a time, so no need for
2160  * explicit locking.
2161  */
2162 int rcutree_dead_cpu(unsigned int cpu)
2163 {
2164 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2165 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2166 
2167 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2168 		return 0;
2169 
2170 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2171 	/* Adjust any no-longer-needed kthreads. */
2172 	rcu_boost_kthread_setaffinity(rnp, -1);
2173 	// Stop-machine done, so allow nohz_full to disable tick.
2174 	tick_dep_clear(TICK_DEP_BIT_RCU);
2175 	return 0;
2176 }
2177 
2178 /*
2179  * Invoke any RCU callbacks that have made it to the end of their grace
2180  * period.  Throttle as specified by rdp->blimit.
2181  */
2182 static void rcu_do_batch(struct rcu_data *rdp)
2183 {
2184 	int div;
2185 	bool __maybe_unused empty;
2186 	unsigned long flags;
2187 	struct rcu_head *rhp;
2188 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2189 	long bl, count = 0;
2190 	long pending, tlimit = 0;
2191 
2192 	/* If no callbacks are ready, just return. */
2193 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2194 		trace_rcu_batch_start(rcu_state.name,
2195 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2196 		trace_rcu_batch_end(rcu_state.name, 0,
2197 				    !rcu_segcblist_empty(&rdp->cblist),
2198 				    need_resched(), is_idle_task(current),
2199 				    rcu_is_callbacks_kthread(rdp));
2200 		return;
2201 	}
2202 
2203 	/*
2204 	 * Extract the list of ready callbacks, disabling IRQs to prevent
2205 	 * races with call_rcu() from interrupt handlers.  Leave the
2206 	 * callback counts, as rcu_barrier() needs to be conservative.
2207 	 */
2208 	rcu_nocb_lock_irqsave(rdp, flags);
2209 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2210 	pending = rcu_segcblist_n_cbs(&rdp->cblist);
2211 	div = READ_ONCE(rcu_divisor);
2212 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2213 	bl = max(rdp->blimit, pending >> div);
2214 	if (in_serving_softirq() && unlikely(bl > 100)) {
2215 		long rrn = READ_ONCE(rcu_resched_ns);
2216 
2217 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2218 		tlimit = local_clock() + rrn;
2219 	}
2220 	trace_rcu_batch_start(rcu_state.name,
2221 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2222 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2223 	if (rcu_rdp_is_offloaded(rdp))
2224 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2225 
2226 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2227 	rcu_nocb_unlock_irqrestore(rdp, flags);
2228 
2229 	/* Invoke callbacks. */
2230 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2231 	rhp = rcu_cblist_dequeue(&rcl);
2232 
2233 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2234 		rcu_callback_t f;
2235 
2236 		count++;
2237 		debug_rcu_head_unqueue(rhp);
2238 
2239 		rcu_lock_acquire(&rcu_callback_map);
2240 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2241 
2242 		f = rhp->func;
2243 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2244 		f(rhp);
2245 
2246 		rcu_lock_release(&rcu_callback_map);
2247 
2248 		/*
2249 		 * Stop only if limit reached and CPU has something to do.
2250 		 */
2251 		if (in_serving_softirq()) {
2252 			if (count >= bl && (need_resched() || !is_idle_task(current)))
2253 				break;
2254 			/*
2255 			 * Make sure we don't spend too much time here and deprive other
2256 			 * softirq vectors of CPU cycles.
2257 			 */
2258 			if (unlikely(tlimit)) {
2259 				/* only call local_clock() every 32 callbacks */
2260 				if (likely((count & 31) || local_clock() < tlimit))
2261 					continue;
2262 				/* Exceeded the time limit, so leave. */
2263 				break;
2264 			}
2265 		} else {
2266 			local_bh_enable();
2267 			lockdep_assert_irqs_enabled();
2268 			cond_resched_tasks_rcu_qs();
2269 			lockdep_assert_irqs_enabled();
2270 			local_bh_disable();
2271 		}
2272 	}
2273 
2274 	rcu_nocb_lock_irqsave(rdp, flags);
2275 	rdp->n_cbs_invoked += count;
2276 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2277 			    is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2278 
2279 	/* Update counts and requeue any remaining callbacks. */
2280 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2281 	rcu_segcblist_add_len(&rdp->cblist, -count);
2282 
2283 	/* Reinstate batch limit if we have worked down the excess. */
2284 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2285 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2286 		rdp->blimit = blimit;
2287 
2288 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2289 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2290 		rdp->qlen_last_fqs_check = 0;
2291 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2292 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2293 		rdp->qlen_last_fqs_check = count;
2294 
2295 	/*
2296 	 * The following usually indicates a double call_rcu().  To track
2297 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2298 	 */
2299 	empty = rcu_segcblist_empty(&rdp->cblist);
2300 	WARN_ON_ONCE(count == 0 && !empty);
2301 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2302 		     count != 0 && empty);
2303 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2304 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2305 
2306 	rcu_nocb_unlock_irqrestore(rdp, flags);
2307 
2308 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2309 }
2310 
2311 /*
2312  * This function is invoked from each scheduling-clock interrupt,
2313  * and checks to see if this CPU is in a non-context-switch quiescent
2314  * state, for example, user mode or idle loop.  It also schedules RCU
2315  * core processing.  If the current grace period has gone on too long,
2316  * it will ask the scheduler to manufacture a context switch for the sole
2317  * purpose of providing the needed quiescent state.
2318  */
2319 void rcu_sched_clock_irq(int user)
2320 {
2321 	unsigned long j;
2322 
2323 	if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2324 		j = jiffies;
2325 		WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2326 		__this_cpu_write(rcu_data.last_sched_clock, j);
2327 	}
2328 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2329 	lockdep_assert_irqs_disabled();
2330 	raw_cpu_inc(rcu_data.ticks_this_gp);
2331 	/* The load-acquire pairs with the store-release setting to true. */
2332 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2333 		/* Idle and userspace execution already are quiescent states. */
2334 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2335 			set_tsk_need_resched(current);
2336 			set_preempt_need_resched();
2337 		}
2338 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2339 	}
2340 	rcu_flavor_sched_clock_irq(user);
2341 	if (rcu_pending(user))
2342 		invoke_rcu_core();
2343 	if (user || rcu_is_cpu_rrupt_from_idle())
2344 		rcu_note_voluntary_context_switch(current);
2345 	lockdep_assert_irqs_disabled();
2346 
2347 	trace_rcu_utilization(TPS("End scheduler-tick"));
2348 }
2349 
2350 /*
2351  * Scan the leaf rcu_node structures.  For each structure on which all
2352  * CPUs have reported a quiescent state and on which there are tasks
2353  * blocking the current grace period, initiate RCU priority boosting.
2354  * Otherwise, invoke the specified function to check dyntick state for
2355  * each CPU that has not yet reported a quiescent state.
2356  */
2357 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2358 {
2359 	int cpu;
2360 	unsigned long flags;
2361 	unsigned long mask;
2362 	struct rcu_data *rdp;
2363 	struct rcu_node *rnp;
2364 
2365 	rcu_state.cbovld = rcu_state.cbovldnext;
2366 	rcu_state.cbovldnext = false;
2367 	rcu_for_each_leaf_node(rnp) {
2368 		cond_resched_tasks_rcu_qs();
2369 		mask = 0;
2370 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2371 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2372 		if (rnp->qsmask == 0) {
2373 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2374 				/*
2375 				 * No point in scanning bits because they
2376 				 * are all zero.  But we might need to
2377 				 * priority-boost blocked readers.
2378 				 */
2379 				rcu_initiate_boost(rnp, flags);
2380 				/* rcu_initiate_boost() releases rnp->lock */
2381 				continue;
2382 			}
2383 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2384 			continue;
2385 		}
2386 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2387 			rdp = per_cpu_ptr(&rcu_data, cpu);
2388 			if (f(rdp)) {
2389 				mask |= rdp->grpmask;
2390 				rcu_disable_urgency_upon_qs(rdp);
2391 			}
2392 		}
2393 		if (mask != 0) {
2394 			/* Idle/offline CPUs, report (releases rnp->lock). */
2395 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2396 		} else {
2397 			/* Nothing to do here, so just drop the lock. */
2398 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2399 		}
2400 	}
2401 }
2402 
2403 /*
2404  * Force quiescent states on reluctant CPUs, and also detect which
2405  * CPUs are in dyntick-idle mode.
2406  */
2407 void rcu_force_quiescent_state(void)
2408 {
2409 	unsigned long flags;
2410 	bool ret;
2411 	struct rcu_node *rnp;
2412 	struct rcu_node *rnp_old = NULL;
2413 
2414 	/* Funnel through hierarchy to reduce memory contention. */
2415 	rnp = raw_cpu_read(rcu_data.mynode);
2416 	for (; rnp != NULL; rnp = rnp->parent) {
2417 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2418 		       !raw_spin_trylock(&rnp->fqslock);
2419 		if (rnp_old != NULL)
2420 			raw_spin_unlock(&rnp_old->fqslock);
2421 		if (ret)
2422 			return;
2423 		rnp_old = rnp;
2424 	}
2425 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2426 
2427 	/* Reached the root of the rcu_node tree, acquire lock. */
2428 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2429 	raw_spin_unlock(&rnp_old->fqslock);
2430 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2431 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2432 		return;  /* Someone beat us to it. */
2433 	}
2434 	WRITE_ONCE(rcu_state.gp_flags,
2435 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2436 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2437 	rcu_gp_kthread_wake();
2438 }
2439 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2440 
2441 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2442 // grace periods.
2443 static void strict_work_handler(struct work_struct *work)
2444 {
2445 	rcu_read_lock();
2446 	rcu_read_unlock();
2447 }
2448 
2449 /* Perform RCU core processing work for the current CPU.  */
2450 static __latent_entropy void rcu_core(void)
2451 {
2452 	unsigned long flags;
2453 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2454 	struct rcu_node *rnp = rdp->mynode;
2455 	/*
2456 	 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2457 	 * Therefore this function can race with concurrent NOCB (de-)offloading
2458 	 * on this CPU and the below condition must be considered volatile.
2459 	 * However if we race with:
2460 	 *
2461 	 * _ Offloading:   In the worst case we accelerate or process callbacks
2462 	 *                 concurrently with NOCB kthreads. We are guaranteed to
2463 	 *                 call rcu_nocb_lock() if that happens.
2464 	 *
2465 	 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2466 	 *                 processing. This is fine because the early stage
2467 	 *                 of deoffloading invokes rcu_core() after setting
2468 	 *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2469 	 *                 what could have been dismissed without the need to wait
2470 	 *                 for the next rcu_pending() check in the next jiffy.
2471 	 */
2472 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2473 
2474 	if (cpu_is_offline(smp_processor_id()))
2475 		return;
2476 	trace_rcu_utilization(TPS("Start RCU core"));
2477 	WARN_ON_ONCE(!rdp->beenonline);
2478 
2479 	/* Report any deferred quiescent states if preemption enabled. */
2480 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2481 		rcu_preempt_deferred_qs(current);
2482 	} else if (rcu_preempt_need_deferred_qs(current)) {
2483 		set_tsk_need_resched(current);
2484 		set_preempt_need_resched();
2485 	}
2486 
2487 	/* Update RCU state based on any recent quiescent states. */
2488 	rcu_check_quiescent_state(rdp);
2489 
2490 	/* No grace period and unregistered callbacks? */
2491 	if (!rcu_gp_in_progress() &&
2492 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2493 		rcu_nocb_lock_irqsave(rdp, flags);
2494 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2495 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2496 		rcu_nocb_unlock_irqrestore(rdp, flags);
2497 	}
2498 
2499 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2500 
2501 	/* If there are callbacks ready, invoke them. */
2502 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2503 	    likely(READ_ONCE(rcu_scheduler_fully_active))) {
2504 		rcu_do_batch(rdp);
2505 		/* Re-invoke RCU core processing if there are callbacks remaining. */
2506 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
2507 			invoke_rcu_core();
2508 	}
2509 
2510 	/* Do any needed deferred wakeups of rcuo kthreads. */
2511 	do_nocb_deferred_wakeup(rdp);
2512 	trace_rcu_utilization(TPS("End RCU core"));
2513 
2514 	// If strict GPs, schedule an RCU reader in a clean environment.
2515 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2516 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2517 }
2518 
2519 static void rcu_core_si(struct softirq_action *h)
2520 {
2521 	rcu_core();
2522 }
2523 
2524 static void rcu_wake_cond(struct task_struct *t, int status)
2525 {
2526 	/*
2527 	 * If the thread is yielding, only wake it when this
2528 	 * is invoked from idle
2529 	 */
2530 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2531 		wake_up_process(t);
2532 }
2533 
2534 static void invoke_rcu_core_kthread(void)
2535 {
2536 	struct task_struct *t;
2537 	unsigned long flags;
2538 
2539 	local_irq_save(flags);
2540 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2541 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2542 	if (t != NULL && t != current)
2543 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2544 	local_irq_restore(flags);
2545 }
2546 
2547 /*
2548  * Wake up this CPU's rcuc kthread to do RCU core processing.
2549  */
2550 static void invoke_rcu_core(void)
2551 {
2552 	if (!cpu_online(smp_processor_id()))
2553 		return;
2554 	if (use_softirq)
2555 		raise_softirq(RCU_SOFTIRQ);
2556 	else
2557 		invoke_rcu_core_kthread();
2558 }
2559 
2560 static void rcu_cpu_kthread_park(unsigned int cpu)
2561 {
2562 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2563 }
2564 
2565 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2566 {
2567 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2568 }
2569 
2570 /*
2571  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2572  * the RCU softirq used in configurations of RCU that do not support RCU
2573  * priority boosting.
2574  */
2575 static void rcu_cpu_kthread(unsigned int cpu)
2576 {
2577 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2578 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2579 	unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2580 	int spincnt;
2581 
2582 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2583 	for (spincnt = 0; spincnt < 10; spincnt++) {
2584 		WRITE_ONCE(*j, jiffies);
2585 		local_bh_disable();
2586 		*statusp = RCU_KTHREAD_RUNNING;
2587 		local_irq_disable();
2588 		work = *workp;
2589 		*workp = 0;
2590 		local_irq_enable();
2591 		if (work)
2592 			rcu_core();
2593 		local_bh_enable();
2594 		if (*workp == 0) {
2595 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2596 			*statusp = RCU_KTHREAD_WAITING;
2597 			return;
2598 		}
2599 	}
2600 	*statusp = RCU_KTHREAD_YIELDING;
2601 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2602 	schedule_timeout_idle(2);
2603 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2604 	*statusp = RCU_KTHREAD_WAITING;
2605 	WRITE_ONCE(*j, jiffies);
2606 }
2607 
2608 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2609 	.store			= &rcu_data.rcu_cpu_kthread_task,
2610 	.thread_should_run	= rcu_cpu_kthread_should_run,
2611 	.thread_fn		= rcu_cpu_kthread,
2612 	.thread_comm		= "rcuc/%u",
2613 	.setup			= rcu_cpu_kthread_setup,
2614 	.park			= rcu_cpu_kthread_park,
2615 };
2616 
2617 /*
2618  * Spawn per-CPU RCU core processing kthreads.
2619  */
2620 static int __init rcu_spawn_core_kthreads(void)
2621 {
2622 	int cpu;
2623 
2624 	for_each_possible_cpu(cpu)
2625 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2626 	if (use_softirq)
2627 		return 0;
2628 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2629 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2630 	return 0;
2631 }
2632 
2633 /*
2634  * Handle any core-RCU processing required by a call_rcu() invocation.
2635  */
2636 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2637 			    unsigned long flags)
2638 {
2639 	/*
2640 	 * If called from an extended quiescent state, invoke the RCU
2641 	 * core in order to force a re-evaluation of RCU's idleness.
2642 	 */
2643 	if (!rcu_is_watching())
2644 		invoke_rcu_core();
2645 
2646 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2647 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2648 		return;
2649 
2650 	/*
2651 	 * Force the grace period if too many callbacks or too long waiting.
2652 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2653 	 * if some other CPU has recently done so.  Also, don't bother
2654 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2655 	 * is the only one waiting for a grace period to complete.
2656 	 */
2657 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2658 		     rdp->qlen_last_fqs_check + qhimark)) {
2659 
2660 		/* Are we ignoring a completed grace period? */
2661 		note_gp_changes(rdp);
2662 
2663 		/* Start a new grace period if one not already started. */
2664 		if (!rcu_gp_in_progress()) {
2665 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2666 		} else {
2667 			/* Give the grace period a kick. */
2668 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2669 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2670 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2671 				rcu_force_quiescent_state();
2672 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2673 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2674 		}
2675 	}
2676 }
2677 
2678 /*
2679  * RCU callback function to leak a callback.
2680  */
2681 static void rcu_leak_callback(struct rcu_head *rhp)
2682 {
2683 }
2684 
2685 /*
2686  * Check and if necessary update the leaf rcu_node structure's
2687  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2688  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2689  * structure's ->lock.
2690  */
2691 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2692 {
2693 	raw_lockdep_assert_held_rcu_node(rnp);
2694 	if (qovld_calc <= 0)
2695 		return; // Early boot and wildcard value set.
2696 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2697 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2698 	else
2699 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2700 }
2701 
2702 /*
2703  * Check and if necessary update the leaf rcu_node structure's
2704  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2705  * number of queued RCU callbacks.  No locks need be held, but the
2706  * caller must have disabled interrupts.
2707  *
2708  * Note that this function ignores the possibility that there are a lot
2709  * of callbacks all of which have already seen the end of their respective
2710  * grace periods.  This omission is due to the need for no-CBs CPUs to
2711  * be holding ->nocb_lock to do this check, which is too heavy for a
2712  * common-case operation.
2713  */
2714 static void check_cb_ovld(struct rcu_data *rdp)
2715 {
2716 	struct rcu_node *const rnp = rdp->mynode;
2717 
2718 	if (qovld_calc <= 0 ||
2719 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2720 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2721 		return; // Early boot wildcard value or already set correctly.
2722 	raw_spin_lock_rcu_node(rnp);
2723 	check_cb_ovld_locked(rdp, rnp);
2724 	raw_spin_unlock_rcu_node(rnp);
2725 }
2726 
2727 static void
2728 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy)
2729 {
2730 	static atomic_t doublefrees;
2731 	unsigned long flags;
2732 	struct rcu_data *rdp;
2733 	bool was_alldone;
2734 
2735 	/* Misaligned rcu_head! */
2736 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2737 
2738 	if (debug_rcu_head_queue(head)) {
2739 		/*
2740 		 * Probable double call_rcu(), so leak the callback.
2741 		 * Use rcu:rcu_callback trace event to find the previous
2742 		 * time callback was passed to call_rcu().
2743 		 */
2744 		if (atomic_inc_return(&doublefrees) < 4) {
2745 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2746 			mem_dump_obj(head);
2747 		}
2748 		WRITE_ONCE(head->func, rcu_leak_callback);
2749 		return;
2750 	}
2751 	head->func = func;
2752 	head->next = NULL;
2753 	kasan_record_aux_stack_noalloc(head);
2754 	local_irq_save(flags);
2755 	rdp = this_cpu_ptr(&rcu_data);
2756 
2757 	/* Add the callback to our list. */
2758 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2759 		// This can trigger due to call_rcu() from offline CPU:
2760 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2761 		WARN_ON_ONCE(!rcu_is_watching());
2762 		// Very early boot, before rcu_init().  Initialize if needed
2763 		// and then drop through to queue the callback.
2764 		if (rcu_segcblist_empty(&rdp->cblist))
2765 			rcu_segcblist_init(&rdp->cblist);
2766 	}
2767 
2768 	check_cb_ovld(rdp);
2769 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2770 		return; // Enqueued onto ->nocb_bypass, so just leave.
2771 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2772 	rcu_segcblist_enqueue(&rdp->cblist, head);
2773 	if (__is_kvfree_rcu_offset((unsigned long)func))
2774 		trace_rcu_kvfree_callback(rcu_state.name, head,
2775 					 (unsigned long)func,
2776 					 rcu_segcblist_n_cbs(&rdp->cblist));
2777 	else
2778 		trace_rcu_callback(rcu_state.name, head,
2779 				   rcu_segcblist_n_cbs(&rdp->cblist));
2780 
2781 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2782 
2783 	/* Go handle any RCU core processing required. */
2784 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2785 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2786 	} else {
2787 		__call_rcu_core(rdp, head, flags);
2788 		local_irq_restore(flags);
2789 	}
2790 }
2791 
2792 #ifdef CONFIG_RCU_LAZY
2793 /**
2794  * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2795  * flush all lazy callbacks (including the new one) to the main ->cblist while
2796  * doing so.
2797  *
2798  * @head: structure to be used for queueing the RCU updates.
2799  * @func: actual callback function to be invoked after the grace period
2800  *
2801  * The callback function will be invoked some time after a full grace
2802  * period elapses, in other words after all pre-existing RCU read-side
2803  * critical sections have completed.
2804  *
2805  * Use this API instead of call_rcu() if you don't want the callback to be
2806  * invoked after very long periods of time, which can happen on systems without
2807  * memory pressure and on systems which are lightly loaded or mostly idle.
2808  * This function will cause callbacks to be invoked sooner than later at the
2809  * expense of extra power. Other than that, this function is identical to, and
2810  * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2811  * ordering and other functionality.
2812  */
2813 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2814 {
2815 	return __call_rcu_common(head, func, false);
2816 }
2817 EXPORT_SYMBOL_GPL(call_rcu_hurry);
2818 #endif
2819 
2820 /**
2821  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2822  * By default the callbacks are 'lazy' and are kept hidden from the main
2823  * ->cblist to prevent starting of grace periods too soon.
2824  * If you desire grace periods to start very soon, use call_rcu_hurry().
2825  *
2826  * @head: structure to be used for queueing the RCU updates.
2827  * @func: actual callback function to be invoked after the grace period
2828  *
2829  * The callback function will be invoked some time after a full grace
2830  * period elapses, in other words after all pre-existing RCU read-side
2831  * critical sections have completed.  However, the callback function
2832  * might well execute concurrently with RCU read-side critical sections
2833  * that started after call_rcu() was invoked.
2834  *
2835  * RCU read-side critical sections are delimited by rcu_read_lock()
2836  * and rcu_read_unlock(), and may be nested.  In addition, but only in
2837  * v5.0 and later, regions of code across which interrupts, preemption,
2838  * or softirqs have been disabled also serve as RCU read-side critical
2839  * sections.  This includes hardware interrupt handlers, softirq handlers,
2840  * and NMI handlers.
2841  *
2842  * Note that all CPUs must agree that the grace period extended beyond
2843  * all pre-existing RCU read-side critical section.  On systems with more
2844  * than one CPU, this means that when "func()" is invoked, each CPU is
2845  * guaranteed to have executed a full memory barrier since the end of its
2846  * last RCU read-side critical section whose beginning preceded the call
2847  * to call_rcu().  It also means that each CPU executing an RCU read-side
2848  * critical section that continues beyond the start of "func()" must have
2849  * executed a memory barrier after the call_rcu() but before the beginning
2850  * of that RCU read-side critical section.  Note that these guarantees
2851  * include CPUs that are offline, idle, or executing in user mode, as
2852  * well as CPUs that are executing in the kernel.
2853  *
2854  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2855  * resulting RCU callback function "func()", then both CPU A and CPU B are
2856  * guaranteed to execute a full memory barrier during the time interval
2857  * between the call to call_rcu() and the invocation of "func()" -- even
2858  * if CPU A and CPU B are the same CPU (but again only if the system has
2859  * more than one CPU).
2860  *
2861  * Implementation of these memory-ordering guarantees is described here:
2862  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2863  */
2864 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2865 {
2866 	return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2867 }
2868 EXPORT_SYMBOL_GPL(call_rcu);
2869 
2870 /* Maximum number of jiffies to wait before draining a batch. */
2871 #define KFREE_DRAIN_JIFFIES (5 * HZ)
2872 #define KFREE_N_BATCHES 2
2873 #define FREE_N_CHANNELS 2
2874 
2875 /**
2876  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2877  * @nr_records: Number of active pointers in the array
2878  * @next: Next bulk object in the block chain
2879  * @records: Array of the kvfree_rcu() pointers
2880  */
2881 struct kvfree_rcu_bulk_data {
2882 	unsigned long nr_records;
2883 	struct kvfree_rcu_bulk_data *next;
2884 	void *records[];
2885 };
2886 
2887 /*
2888  * This macro defines how many entries the "records" array
2889  * will contain. It is based on the fact that the size of
2890  * kvfree_rcu_bulk_data structure becomes exactly one page.
2891  */
2892 #define KVFREE_BULK_MAX_ENTR \
2893 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2894 
2895 /**
2896  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2897  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2898  * @head_free: List of kfree_rcu() objects waiting for a grace period
2899  * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2900  * @krcp: Pointer to @kfree_rcu_cpu structure
2901  */
2902 
2903 struct kfree_rcu_cpu_work {
2904 	struct rcu_work rcu_work;
2905 	struct rcu_head *head_free;
2906 	struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
2907 	struct kfree_rcu_cpu *krcp;
2908 };
2909 
2910 /**
2911  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2912  * @head: List of kfree_rcu() objects not yet waiting for a grace period
2913  * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2914  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2915  * @lock: Synchronize access to this structure
2916  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2917  * @initialized: The @rcu_work fields have been initialized
2918  * @count: Number of objects for which GP not started
2919  * @bkvcache:
2920  *	A simple cache list that contains objects for reuse purpose.
2921  *	In order to save some per-cpu space the list is singular.
2922  *	Even though it is lockless an access has to be protected by the
2923  *	per-cpu lock.
2924  * @page_cache_work: A work to refill the cache when it is empty
2925  * @backoff_page_cache_fill: Delay cache refills
2926  * @work_in_progress: Indicates that page_cache_work is running
2927  * @hrtimer: A hrtimer for scheduling a page_cache_work
2928  * @nr_bkv_objs: number of allocated objects at @bkvcache.
2929  *
2930  * This is a per-CPU structure.  The reason that it is not included in
2931  * the rcu_data structure is to permit this code to be extracted from
2932  * the RCU files.  Such extraction could allow further optimization of
2933  * the interactions with the slab allocators.
2934  */
2935 struct kfree_rcu_cpu {
2936 	struct rcu_head *head;
2937 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
2938 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2939 	raw_spinlock_t lock;
2940 	struct delayed_work monitor_work;
2941 	bool initialized;
2942 	int count;
2943 
2944 	struct delayed_work page_cache_work;
2945 	atomic_t backoff_page_cache_fill;
2946 	atomic_t work_in_progress;
2947 	struct hrtimer hrtimer;
2948 
2949 	struct llist_head bkvcache;
2950 	int nr_bkv_objs;
2951 };
2952 
2953 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2954 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2955 };
2956 
2957 static __always_inline void
2958 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2959 {
2960 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2961 	int i;
2962 
2963 	for (i = 0; i < bhead->nr_records; i++)
2964 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2965 #endif
2966 }
2967 
2968 static inline struct kfree_rcu_cpu *
2969 krc_this_cpu_lock(unsigned long *flags)
2970 {
2971 	struct kfree_rcu_cpu *krcp;
2972 
2973 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
2974 	krcp = this_cpu_ptr(&krc);
2975 	raw_spin_lock(&krcp->lock);
2976 
2977 	return krcp;
2978 }
2979 
2980 static inline void
2981 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2982 {
2983 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2984 }
2985 
2986 static inline struct kvfree_rcu_bulk_data *
2987 get_cached_bnode(struct kfree_rcu_cpu *krcp)
2988 {
2989 	if (!krcp->nr_bkv_objs)
2990 		return NULL;
2991 
2992 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2993 	return (struct kvfree_rcu_bulk_data *)
2994 		llist_del_first(&krcp->bkvcache);
2995 }
2996 
2997 static inline bool
2998 put_cached_bnode(struct kfree_rcu_cpu *krcp,
2999 	struct kvfree_rcu_bulk_data *bnode)
3000 {
3001 	// Check the limit.
3002 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3003 		return false;
3004 
3005 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3006 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3007 	return true;
3008 }
3009 
3010 static int
3011 drain_page_cache(struct kfree_rcu_cpu *krcp)
3012 {
3013 	unsigned long flags;
3014 	struct llist_node *page_list, *pos, *n;
3015 	int freed = 0;
3016 
3017 	raw_spin_lock_irqsave(&krcp->lock, flags);
3018 	page_list = llist_del_all(&krcp->bkvcache);
3019 	WRITE_ONCE(krcp->nr_bkv_objs, 0);
3020 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3021 
3022 	llist_for_each_safe(pos, n, page_list) {
3023 		free_page((unsigned long)pos);
3024 		freed++;
3025 	}
3026 
3027 	return freed;
3028 }
3029 
3030 /*
3031  * This function is invoked in workqueue context after a grace period.
3032  * It frees all the objects queued on ->bkvhead_free or ->head_free.
3033  */
3034 static void kfree_rcu_work(struct work_struct *work)
3035 {
3036 	unsigned long flags;
3037 	struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3038 	struct rcu_head *head, *next;
3039 	struct kfree_rcu_cpu *krcp;
3040 	struct kfree_rcu_cpu_work *krwp;
3041 	int i, j;
3042 
3043 	krwp = container_of(to_rcu_work(work),
3044 			    struct kfree_rcu_cpu_work, rcu_work);
3045 	krcp = krwp->krcp;
3046 
3047 	raw_spin_lock_irqsave(&krcp->lock, flags);
3048 	// Channels 1 and 2.
3049 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3050 		bkvhead[i] = krwp->bkvhead_free[i];
3051 		krwp->bkvhead_free[i] = NULL;
3052 	}
3053 
3054 	// Channel 3.
3055 	head = krwp->head_free;
3056 	krwp->head_free = NULL;
3057 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3058 
3059 	// Handle the first two channels.
3060 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3061 		for (; bkvhead[i]; bkvhead[i] = bnext) {
3062 			bnext = bkvhead[i]->next;
3063 			debug_rcu_bhead_unqueue(bkvhead[i]);
3064 
3065 			rcu_lock_acquire(&rcu_callback_map);
3066 			if (i == 0) { // kmalloc() / kfree().
3067 				trace_rcu_invoke_kfree_bulk_callback(
3068 					rcu_state.name, bkvhead[i]->nr_records,
3069 					bkvhead[i]->records);
3070 
3071 				kfree_bulk(bkvhead[i]->nr_records,
3072 					bkvhead[i]->records);
3073 			} else { // vmalloc() / vfree().
3074 				for (j = 0; j < bkvhead[i]->nr_records; j++) {
3075 					trace_rcu_invoke_kvfree_callback(
3076 						rcu_state.name,
3077 						bkvhead[i]->records[j], 0);
3078 
3079 					vfree(bkvhead[i]->records[j]);
3080 				}
3081 			}
3082 			rcu_lock_release(&rcu_callback_map);
3083 
3084 			raw_spin_lock_irqsave(&krcp->lock, flags);
3085 			if (put_cached_bnode(krcp, bkvhead[i]))
3086 				bkvhead[i] = NULL;
3087 			raw_spin_unlock_irqrestore(&krcp->lock, flags);
3088 
3089 			if (bkvhead[i])
3090 				free_page((unsigned long) bkvhead[i]);
3091 
3092 			cond_resched_tasks_rcu_qs();
3093 		}
3094 	}
3095 
3096 	/*
3097 	 * This is used when the "bulk" path can not be used for the
3098 	 * double-argument of kvfree_rcu().  This happens when the
3099 	 * page-cache is empty, which means that objects are instead
3100 	 * queued on a linked list through their rcu_head structures.
3101 	 * This list is named "Channel 3".
3102 	 */
3103 	for (; head; head = next) {
3104 		unsigned long offset = (unsigned long)head->func;
3105 		void *ptr = (void *)head - offset;
3106 
3107 		next = head->next;
3108 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
3109 		rcu_lock_acquire(&rcu_callback_map);
3110 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3111 
3112 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3113 			kvfree(ptr);
3114 
3115 		rcu_lock_release(&rcu_callback_map);
3116 		cond_resched_tasks_rcu_qs();
3117 	}
3118 }
3119 
3120 static bool
3121 need_offload_krc(struct kfree_rcu_cpu *krcp)
3122 {
3123 	int i;
3124 
3125 	for (i = 0; i < FREE_N_CHANNELS; i++)
3126 		if (krcp->bkvhead[i])
3127 			return true;
3128 
3129 	return !!krcp->head;
3130 }
3131 
3132 static void
3133 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3134 {
3135 	long delay, delay_left;
3136 
3137 	delay = READ_ONCE(krcp->count) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3138 	if (delayed_work_pending(&krcp->monitor_work)) {
3139 		delay_left = krcp->monitor_work.timer.expires - jiffies;
3140 		if (delay < delay_left)
3141 			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3142 		return;
3143 	}
3144 	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3145 }
3146 
3147 /*
3148  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3149  */
3150 static void kfree_rcu_monitor(struct work_struct *work)
3151 {
3152 	struct kfree_rcu_cpu *krcp = container_of(work,
3153 		struct kfree_rcu_cpu, monitor_work.work);
3154 	unsigned long flags;
3155 	int i, j;
3156 
3157 	raw_spin_lock_irqsave(&krcp->lock, flags);
3158 
3159 	// Attempt to start a new batch.
3160 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3161 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3162 
3163 		// Try to detach bkvhead or head and attach it over any
3164 		// available corresponding free channel. It can be that
3165 		// a previous RCU batch is in progress, it means that
3166 		// immediately to queue another one is not possible so
3167 		// in that case the monitor work is rearmed.
3168 		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3169 			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3170 				(krcp->head && !krwp->head_free)) {
3171 			// Channel 1 corresponds to the SLAB-pointer bulk path.
3172 			// Channel 2 corresponds to vmalloc-pointer bulk path.
3173 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3174 				if (!krwp->bkvhead_free[j]) {
3175 					krwp->bkvhead_free[j] = krcp->bkvhead[j];
3176 					krcp->bkvhead[j] = NULL;
3177 				}
3178 			}
3179 
3180 			// Channel 3 corresponds to both SLAB and vmalloc
3181 			// objects queued on the linked list.
3182 			if (!krwp->head_free) {
3183 				krwp->head_free = krcp->head;
3184 				krcp->head = NULL;
3185 			}
3186 
3187 			WRITE_ONCE(krcp->count, 0);
3188 
3189 			// One work is per one batch, so there are three
3190 			// "free channels", the batch can handle. It can
3191 			// be that the work is in the pending state when
3192 			// channels have been detached following by each
3193 			// other.
3194 			queue_rcu_work(system_wq, &krwp->rcu_work);
3195 		}
3196 	}
3197 
3198 	// If there is nothing to detach, it means that our job is
3199 	// successfully done here. In case of having at least one
3200 	// of the channels that is still busy we should rearm the
3201 	// work to repeat an attempt. Because previous batches are
3202 	// still in progress.
3203 	if (need_offload_krc(krcp))
3204 		schedule_delayed_monitor_work(krcp);
3205 
3206 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3207 }
3208 
3209 static enum hrtimer_restart
3210 schedule_page_work_fn(struct hrtimer *t)
3211 {
3212 	struct kfree_rcu_cpu *krcp =
3213 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3214 
3215 	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3216 	return HRTIMER_NORESTART;
3217 }
3218 
3219 static void fill_page_cache_func(struct work_struct *work)
3220 {
3221 	struct kvfree_rcu_bulk_data *bnode;
3222 	struct kfree_rcu_cpu *krcp =
3223 		container_of(work, struct kfree_rcu_cpu,
3224 			page_cache_work.work);
3225 	unsigned long flags;
3226 	int nr_pages;
3227 	bool pushed;
3228 	int i;
3229 
3230 	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3231 		1 : rcu_min_cached_objs;
3232 
3233 	for (i = 0; i < nr_pages; i++) {
3234 		bnode = (struct kvfree_rcu_bulk_data *)
3235 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3236 
3237 		if (!bnode)
3238 			break;
3239 
3240 		raw_spin_lock_irqsave(&krcp->lock, flags);
3241 		pushed = put_cached_bnode(krcp, bnode);
3242 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3243 
3244 		if (!pushed) {
3245 			free_page((unsigned long) bnode);
3246 			break;
3247 		}
3248 	}
3249 
3250 	atomic_set(&krcp->work_in_progress, 0);
3251 	atomic_set(&krcp->backoff_page_cache_fill, 0);
3252 }
3253 
3254 static void
3255 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3256 {
3257 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3258 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3259 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
3260 			queue_delayed_work(system_wq,
3261 				&krcp->page_cache_work,
3262 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3263 		} else {
3264 			hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3265 			krcp->hrtimer.function = schedule_page_work_fn;
3266 			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3267 		}
3268 	}
3269 }
3270 
3271 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3272 // state specified by flags.  If can_alloc is true, the caller must
3273 // be schedulable and not be holding any locks or mutexes that might be
3274 // acquired by the memory allocator or anything that it might invoke.
3275 // Returns true if ptr was successfully recorded, else the caller must
3276 // use a fallback.
3277 static inline bool
3278 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3279 	unsigned long *flags, void *ptr, bool can_alloc)
3280 {
3281 	struct kvfree_rcu_bulk_data *bnode;
3282 	int idx;
3283 
3284 	*krcp = krc_this_cpu_lock(flags);
3285 	if (unlikely(!(*krcp)->initialized))
3286 		return false;
3287 
3288 	idx = !!is_vmalloc_addr(ptr);
3289 
3290 	/* Check if a new block is required. */
3291 	if (!(*krcp)->bkvhead[idx] ||
3292 			(*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3293 		bnode = get_cached_bnode(*krcp);
3294 		if (!bnode && can_alloc) {
3295 			krc_this_cpu_unlock(*krcp, *flags);
3296 
3297 			// __GFP_NORETRY - allows a light-weight direct reclaim
3298 			// what is OK from minimizing of fallback hitting point of
3299 			// view. Apart of that it forbids any OOM invoking what is
3300 			// also beneficial since we are about to release memory soon.
3301 			//
3302 			// __GFP_NOMEMALLOC - prevents from consuming of all the
3303 			// memory reserves. Please note we have a fallback path.
3304 			//
3305 			// __GFP_NOWARN - it is supposed that an allocation can
3306 			// be failed under low memory or high memory pressure
3307 			// scenarios.
3308 			bnode = (struct kvfree_rcu_bulk_data *)
3309 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3310 			*krcp = krc_this_cpu_lock(flags);
3311 		}
3312 
3313 		if (!bnode)
3314 			return false;
3315 
3316 		/* Initialize the new block. */
3317 		bnode->nr_records = 0;
3318 		bnode->next = (*krcp)->bkvhead[idx];
3319 
3320 		/* Attach it to the head. */
3321 		(*krcp)->bkvhead[idx] = bnode;
3322 	}
3323 
3324 	/* Finally insert. */
3325 	(*krcp)->bkvhead[idx]->records
3326 		[(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3327 
3328 	return true;
3329 }
3330 
3331 /*
3332  * Queue a request for lazy invocation of the appropriate free routine
3333  * after a grace period.  Please note that three paths are maintained,
3334  * two for the common case using arrays of pointers and a third one that
3335  * is used only when the main paths cannot be used, for example, due to
3336  * memory pressure.
3337  *
3338  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3339  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3340  * be free'd in workqueue context. This allows us to: batch requests together to
3341  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3342  */
3343 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3344 {
3345 	unsigned long flags;
3346 	struct kfree_rcu_cpu *krcp;
3347 	bool success;
3348 	void *ptr;
3349 
3350 	if (head) {
3351 		ptr = (void *) head - (unsigned long) func;
3352 	} else {
3353 		/*
3354 		 * Please note there is a limitation for the head-less
3355 		 * variant, that is why there is a clear rule for such
3356 		 * objects: it can be used from might_sleep() context
3357 		 * only. For other places please embed an rcu_head to
3358 		 * your data.
3359 		 */
3360 		might_sleep();
3361 		ptr = (unsigned long *) func;
3362 	}
3363 
3364 	// Queue the object but don't yet schedule the batch.
3365 	if (debug_rcu_head_queue(ptr)) {
3366 		// Probable double kfree_rcu(), just leak.
3367 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3368 			  __func__, head);
3369 
3370 		// Mark as success and leave.
3371 		return;
3372 	}
3373 
3374 	kasan_record_aux_stack_noalloc(ptr);
3375 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3376 	if (!success) {
3377 		run_page_cache_worker(krcp);
3378 
3379 		if (head == NULL)
3380 			// Inline if kvfree_rcu(one_arg) call.
3381 			goto unlock_return;
3382 
3383 		head->func = func;
3384 		head->next = krcp->head;
3385 		krcp->head = head;
3386 		success = true;
3387 	}
3388 
3389 	WRITE_ONCE(krcp->count, krcp->count + 1);
3390 
3391 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3392 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3393 		schedule_delayed_monitor_work(krcp);
3394 
3395 unlock_return:
3396 	krc_this_cpu_unlock(krcp, flags);
3397 
3398 	/*
3399 	 * Inline kvfree() after synchronize_rcu(). We can do
3400 	 * it from might_sleep() context only, so the current
3401 	 * CPU can pass the QS state.
3402 	 */
3403 	if (!success) {
3404 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3405 		synchronize_rcu();
3406 		kvfree(ptr);
3407 	}
3408 }
3409 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3410 
3411 static unsigned long
3412 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3413 {
3414 	int cpu;
3415 	unsigned long count = 0;
3416 
3417 	/* Snapshot count of all CPUs */
3418 	for_each_possible_cpu(cpu) {
3419 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3420 
3421 		count += READ_ONCE(krcp->count);
3422 		count += READ_ONCE(krcp->nr_bkv_objs);
3423 		atomic_set(&krcp->backoff_page_cache_fill, 1);
3424 	}
3425 
3426 	return count == 0 ? SHRINK_EMPTY : count;
3427 }
3428 
3429 static unsigned long
3430 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3431 {
3432 	int cpu, freed = 0;
3433 
3434 	for_each_possible_cpu(cpu) {
3435 		int count;
3436 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3437 
3438 		count = krcp->count;
3439 		count += drain_page_cache(krcp);
3440 		kfree_rcu_monitor(&krcp->monitor_work.work);
3441 
3442 		sc->nr_to_scan -= count;
3443 		freed += count;
3444 
3445 		if (sc->nr_to_scan <= 0)
3446 			break;
3447 	}
3448 
3449 	return freed == 0 ? SHRINK_STOP : freed;
3450 }
3451 
3452 static struct shrinker kfree_rcu_shrinker = {
3453 	.count_objects = kfree_rcu_shrink_count,
3454 	.scan_objects = kfree_rcu_shrink_scan,
3455 	.batch = 0,
3456 	.seeks = DEFAULT_SEEKS,
3457 };
3458 
3459 void __init kfree_rcu_scheduler_running(void)
3460 {
3461 	int cpu;
3462 	unsigned long flags;
3463 
3464 	for_each_possible_cpu(cpu) {
3465 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3466 
3467 		raw_spin_lock_irqsave(&krcp->lock, flags);
3468 		if (need_offload_krc(krcp))
3469 			schedule_delayed_monitor_work(krcp);
3470 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3471 	}
3472 }
3473 
3474 /*
3475  * During early boot, any blocking grace-period wait automatically
3476  * implies a grace period.
3477  *
3478  * Later on, this could in theory be the case for kernels built with
3479  * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3480  * is not a common case.  Furthermore, this optimization would cause
3481  * the rcu_gp_oldstate structure to expand by 50%, so this potential
3482  * grace-period optimization is ignored once the scheduler is running.
3483  */
3484 static int rcu_blocking_is_gp(void)
3485 {
3486 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
3487 		return false;
3488 	might_sleep();  /* Check for RCU read-side critical section. */
3489 	return true;
3490 }
3491 
3492 /**
3493  * synchronize_rcu - wait until a grace period has elapsed.
3494  *
3495  * Control will return to the caller some time after a full grace
3496  * period has elapsed, in other words after all currently executing RCU
3497  * read-side critical sections have completed.  Note, however, that
3498  * upon return from synchronize_rcu(), the caller might well be executing
3499  * concurrently with new RCU read-side critical sections that began while
3500  * synchronize_rcu() was waiting.
3501  *
3502  * RCU read-side critical sections are delimited by rcu_read_lock()
3503  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3504  * v5.0 and later, regions of code across which interrupts, preemption,
3505  * or softirqs have been disabled also serve as RCU read-side critical
3506  * sections.  This includes hardware interrupt handlers, softirq handlers,
3507  * and NMI handlers.
3508  *
3509  * Note that this guarantee implies further memory-ordering guarantees.
3510  * On systems with more than one CPU, when synchronize_rcu() returns,
3511  * each CPU is guaranteed to have executed a full memory barrier since
3512  * the end of its last RCU read-side critical section whose beginning
3513  * preceded the call to synchronize_rcu().  In addition, each CPU having
3514  * an RCU read-side critical section that extends beyond the return from
3515  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3516  * after the beginning of synchronize_rcu() and before the beginning of
3517  * that RCU read-side critical section.  Note that these guarantees include
3518  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3519  * that are executing in the kernel.
3520  *
3521  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3522  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3523  * to have executed a full memory barrier during the execution of
3524  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3525  * again only if the system has more than one CPU).
3526  *
3527  * Implementation of these memory-ordering guarantees is described here:
3528  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3529  */
3530 void synchronize_rcu(void)
3531 {
3532 	unsigned long flags;
3533 	struct rcu_node *rnp;
3534 
3535 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3536 			 lock_is_held(&rcu_lock_map) ||
3537 			 lock_is_held(&rcu_sched_lock_map),
3538 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3539 	if (!rcu_blocking_is_gp()) {
3540 		if (rcu_gp_is_expedited())
3541 			synchronize_rcu_expedited();
3542 		else
3543 			wait_rcu_gp(call_rcu_hurry);
3544 		return;
3545 	}
3546 
3547 	// Context allows vacuous grace periods.
3548 	// Note well that this code runs with !PREEMPT && !SMP.
3549 	// In addition, all code that advances grace periods runs at
3550 	// process level.  Therefore, this normal GP overlaps with other
3551 	// normal GPs only by being fully nested within them, which allows
3552 	// reuse of ->gp_seq_polled_snap.
3553 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3554 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3555 
3556 	// Update the normal grace-period counters to record
3557 	// this grace period, but only those used by the boot CPU.
3558 	// The rcu_scheduler_starting() will take care of the rest of
3559 	// these counters.
3560 	local_irq_save(flags);
3561 	WARN_ON_ONCE(num_online_cpus() > 1);
3562 	rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3563 	for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3564 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3565 	local_irq_restore(flags);
3566 }
3567 EXPORT_SYMBOL_GPL(synchronize_rcu);
3568 
3569 /**
3570  * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3571  * @rgosp: Place to put state cookie
3572  *
3573  * Stores into @rgosp a value that will always be treated by functions
3574  * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3575  * has already completed.
3576  */
3577 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3578 {
3579 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3580 	rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3581 }
3582 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3583 
3584 /**
3585  * get_state_synchronize_rcu - Snapshot current RCU state
3586  *
3587  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3588  * or poll_state_synchronize_rcu() to determine whether or not a full
3589  * grace period has elapsed in the meantime.
3590  */
3591 unsigned long get_state_synchronize_rcu(void)
3592 {
3593 	/*
3594 	 * Any prior manipulation of RCU-protected data must happen
3595 	 * before the load from ->gp_seq.
3596 	 */
3597 	smp_mb();  /* ^^^ */
3598 	return rcu_seq_snap(&rcu_state.gp_seq_polled);
3599 }
3600 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3601 
3602 /**
3603  * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3604  * @rgosp: location to place combined normal/expedited grace-period state
3605  *
3606  * Places the normal and expedited grace-period states in @rgosp.  This
3607  * state value can be passed to a later call to cond_synchronize_rcu_full()
3608  * or poll_state_synchronize_rcu_full() to determine whether or not a
3609  * grace period (whether normal or expedited) has elapsed in the meantime.
3610  * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3611  * long, but is guaranteed to see all grace periods.  In contrast, the
3612  * combined state occupies less memory, but can sometimes fail to take
3613  * grace periods into account.
3614  *
3615  * This does not guarantee that the needed grace period will actually
3616  * start.
3617  */
3618 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3619 {
3620 	struct rcu_node *rnp = rcu_get_root();
3621 
3622 	/*
3623 	 * Any prior manipulation of RCU-protected data must happen
3624 	 * before the loads from ->gp_seq and ->expedited_sequence.
3625 	 */
3626 	smp_mb();  /* ^^^ */
3627 	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3628 	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3629 }
3630 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3631 
3632 /*
3633  * Helper function for start_poll_synchronize_rcu() and
3634  * start_poll_synchronize_rcu_full().
3635  */
3636 static void start_poll_synchronize_rcu_common(void)
3637 {
3638 	unsigned long flags;
3639 	bool needwake;
3640 	struct rcu_data *rdp;
3641 	struct rcu_node *rnp;
3642 
3643 	lockdep_assert_irqs_enabled();
3644 	local_irq_save(flags);
3645 	rdp = this_cpu_ptr(&rcu_data);
3646 	rnp = rdp->mynode;
3647 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3648 	// Note it is possible for a grace period to have elapsed between
3649 	// the above call to get_state_synchronize_rcu() and the below call
3650 	// to rcu_seq_snap.  This is OK, the worst that happens is that we
3651 	// get a grace period that no one needed.  These accesses are ordered
3652 	// by smp_mb(), and we are accessing them in the opposite order
3653 	// from which they are updated at grace-period start, as required.
3654 	needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3655 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3656 	if (needwake)
3657 		rcu_gp_kthread_wake();
3658 }
3659 
3660 /**
3661  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3662  *
3663  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3664  * or poll_state_synchronize_rcu() to determine whether or not a full
3665  * grace period has elapsed in the meantime.  If the needed grace period
3666  * is not already slated to start, notifies RCU core of the need for that
3667  * grace period.
3668  *
3669  * Interrupts must be enabled for the case where it is necessary to awaken
3670  * the grace-period kthread.
3671  */
3672 unsigned long start_poll_synchronize_rcu(void)
3673 {
3674 	unsigned long gp_seq = get_state_synchronize_rcu();
3675 
3676 	start_poll_synchronize_rcu_common();
3677 	return gp_seq;
3678 }
3679 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3680 
3681 /**
3682  * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3683  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3684  *
3685  * Places the normal and expedited grace-period states in *@rgos.  This
3686  * state value can be passed to a later call to cond_synchronize_rcu_full()
3687  * or poll_state_synchronize_rcu_full() to determine whether or not a
3688  * grace period (whether normal or expedited) has elapsed in the meantime.
3689  * If the needed grace period is not already slated to start, notifies
3690  * RCU core of the need for that grace period.
3691  *
3692  * Interrupts must be enabled for the case where it is necessary to awaken
3693  * the grace-period kthread.
3694  */
3695 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3696 {
3697 	get_state_synchronize_rcu_full(rgosp);
3698 
3699 	start_poll_synchronize_rcu_common();
3700 }
3701 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3702 
3703 /**
3704  * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3705  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3706  *
3707  * If a full RCU grace period has elapsed since the earlier call from
3708  * which @oldstate was obtained, return @true, otherwise return @false.
3709  * If @false is returned, it is the caller's responsibility to invoke this
3710  * function later on until it does return @true.  Alternatively, the caller
3711  * can explicitly wait for a grace period, for example, by passing @oldstate
3712  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3713  *
3714  * Yes, this function does not take counter wrap into account.
3715  * But counter wrap is harmless.  If the counter wraps, we have waited for
3716  * more than a billion grace periods (and way more on a 64-bit system!).
3717  * Those needing to keep old state values for very long time periods
3718  * (many hours even on 32-bit systems) should check them occasionally and
3719  * either refresh them or set a flag indicating that the grace period has
3720  * completed.  Alternatively, they can use get_completed_synchronize_rcu()
3721  * to get a guaranteed-completed grace-period state.
3722  *
3723  * This function provides the same memory-ordering guarantees that
3724  * would be provided by a synchronize_rcu() that was invoked at the call
3725  * to the function that provided @oldstate, and that returned at the end
3726  * of this function.
3727  */
3728 bool poll_state_synchronize_rcu(unsigned long oldstate)
3729 {
3730 	if (oldstate == RCU_GET_STATE_COMPLETED ||
3731 	    rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3732 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3733 		return true;
3734 	}
3735 	return false;
3736 }
3737 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3738 
3739 /**
3740  * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3741  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3742  *
3743  * If a full RCU grace period has elapsed since the earlier call from
3744  * which *rgosp was obtained, return @true, otherwise return @false.
3745  * If @false is returned, it is the caller's responsibility to invoke this
3746  * function later on until it does return @true.  Alternatively, the caller
3747  * can explicitly wait for a grace period, for example, by passing @rgosp
3748  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3749  *
3750  * Yes, this function does not take counter wrap into account.
3751  * But counter wrap is harmless.  If the counter wraps, we have waited
3752  * for more than a billion grace periods (and way more on a 64-bit
3753  * system!).  Those needing to keep rcu_gp_oldstate values for very
3754  * long time periods (many hours even on 32-bit systems) should check
3755  * them occasionally and either refresh them or set a flag indicating
3756  * that the grace period has completed.  Alternatively, they can use
3757  * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3758  * grace-period state.
3759  *
3760  * This function provides the same memory-ordering guarantees that would
3761  * be provided by a synchronize_rcu() that was invoked at the call to
3762  * the function that provided @rgosp, and that returned at the end of this
3763  * function.  And this guarantee requires that the root rcu_node structure's
3764  * ->gp_seq field be checked instead of that of the rcu_state structure.
3765  * The problem is that the just-ending grace-period's callbacks can be
3766  * invoked between the time that the root rcu_node structure's ->gp_seq
3767  * field is updated and the time that the rcu_state structure's ->gp_seq
3768  * field is updated.  Therefore, if a single synchronize_rcu() is to
3769  * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3770  * then the root rcu_node structure is the one that needs to be polled.
3771  */
3772 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3773 {
3774 	struct rcu_node *rnp = rcu_get_root();
3775 
3776 	smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3777 	if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3778 	    rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3779 	    rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3780 	    rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3781 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3782 		return true;
3783 	}
3784 	return false;
3785 }
3786 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3787 
3788 /**
3789  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3790  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3791  *
3792  * If a full RCU grace period has elapsed since the earlier call to
3793  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3794  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3795  *
3796  * Yes, this function does not take counter wrap into account.
3797  * But counter wrap is harmless.  If the counter wraps, we have waited for
3798  * more than 2 billion grace periods (and way more on a 64-bit system!),
3799  * so waiting for a couple of additional grace periods should be just fine.
3800  *
3801  * This function provides the same memory-ordering guarantees that
3802  * would be provided by a synchronize_rcu() that was invoked at the call
3803  * to the function that provided @oldstate and that returned at the end
3804  * of this function.
3805  */
3806 void cond_synchronize_rcu(unsigned long oldstate)
3807 {
3808 	if (!poll_state_synchronize_rcu(oldstate))
3809 		synchronize_rcu();
3810 }
3811 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3812 
3813 /**
3814  * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3815  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3816  *
3817  * If a full RCU grace period has elapsed since the call to
3818  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3819  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3820  * obtained, just return.  Otherwise, invoke synchronize_rcu() to wait
3821  * for a full grace period.
3822  *
3823  * Yes, this function does not take counter wrap into account.
3824  * But counter wrap is harmless.  If the counter wraps, we have waited for
3825  * more than 2 billion grace periods (and way more on a 64-bit system!),
3826  * so waiting for a couple of additional grace periods should be just fine.
3827  *
3828  * This function provides the same memory-ordering guarantees that
3829  * would be provided by a synchronize_rcu() that was invoked at the call
3830  * to the function that provided @rgosp and that returned at the end of
3831  * this function.
3832  */
3833 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3834 {
3835 	if (!poll_state_synchronize_rcu_full(rgosp))
3836 		synchronize_rcu();
3837 }
3838 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3839 
3840 /*
3841  * Check to see if there is any immediate RCU-related work to be done by
3842  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3843  * in order of increasing expense: checks that can be carried out against
3844  * CPU-local state are performed first.  However, we must check for CPU
3845  * stalls first, else we might not get a chance.
3846  */
3847 static int rcu_pending(int user)
3848 {
3849 	bool gp_in_progress;
3850 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3851 	struct rcu_node *rnp = rdp->mynode;
3852 
3853 	lockdep_assert_irqs_disabled();
3854 
3855 	/* Check for CPU stalls, if enabled. */
3856 	check_cpu_stall(rdp);
3857 
3858 	/* Does this CPU need a deferred NOCB wakeup? */
3859 	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3860 		return 1;
3861 
3862 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3863 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3864 		return 0;
3865 
3866 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3867 	gp_in_progress = rcu_gp_in_progress();
3868 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3869 		return 1;
3870 
3871 	/* Does this CPU have callbacks ready to invoke? */
3872 	if (!rcu_rdp_is_offloaded(rdp) &&
3873 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3874 		return 1;
3875 
3876 	/* Has RCU gone idle with this CPU needing another grace period? */
3877 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3878 	    !rcu_rdp_is_offloaded(rdp) &&
3879 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3880 		return 1;
3881 
3882 	/* Have RCU grace period completed or started?  */
3883 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3884 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3885 		return 1;
3886 
3887 	/* nothing to do */
3888 	return 0;
3889 }
3890 
3891 /*
3892  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3893  * the compiler is expected to optimize this away.
3894  */
3895 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3896 {
3897 	trace_rcu_barrier(rcu_state.name, s, cpu,
3898 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3899 }
3900 
3901 /*
3902  * RCU callback function for rcu_barrier().  If we are last, wake
3903  * up the task executing rcu_barrier().
3904  *
3905  * Note that the value of rcu_state.barrier_sequence must be captured
3906  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3907  * other CPUs might count the value down to zero before this CPU gets
3908  * around to invoking rcu_barrier_trace(), which might result in bogus
3909  * data from the next instance of rcu_barrier().
3910  */
3911 static void rcu_barrier_callback(struct rcu_head *rhp)
3912 {
3913 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3914 
3915 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3916 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3917 		complete(&rcu_state.barrier_completion);
3918 	} else {
3919 		rcu_barrier_trace(TPS("CB"), -1, s);
3920 	}
3921 }
3922 
3923 /*
3924  * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3925  */
3926 static void rcu_barrier_entrain(struct rcu_data *rdp)
3927 {
3928 	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3929 	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3930 	bool wake_nocb = false;
3931 	bool was_alldone = false;
3932 
3933 	lockdep_assert_held(&rcu_state.barrier_lock);
3934 	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3935 		return;
3936 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3937 	rdp->barrier_head.func = rcu_barrier_callback;
3938 	debug_rcu_head_queue(&rdp->barrier_head);
3939 	rcu_nocb_lock(rdp);
3940 	/*
3941 	 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3942 	 * queue. This way we don't wait for bypass timer that can reach seconds
3943 	 * if it's fully lazy.
3944 	 */
3945 	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3946 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3947 	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3948 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3949 		atomic_inc(&rcu_state.barrier_cpu_count);
3950 	} else {
3951 		debug_rcu_head_unqueue(&rdp->barrier_head);
3952 		rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3953 	}
3954 	rcu_nocb_unlock(rdp);
3955 	if (wake_nocb)
3956 		wake_nocb_gp(rdp, false);
3957 	smp_store_release(&rdp->barrier_seq_snap, gseq);
3958 }
3959 
3960 /*
3961  * Called with preemption disabled, and from cross-cpu IRQ context.
3962  */
3963 static void rcu_barrier_handler(void *cpu_in)
3964 {
3965 	uintptr_t cpu = (uintptr_t)cpu_in;
3966 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3967 
3968 	lockdep_assert_irqs_disabled();
3969 	WARN_ON_ONCE(cpu != rdp->cpu);
3970 	WARN_ON_ONCE(cpu != smp_processor_id());
3971 	raw_spin_lock(&rcu_state.barrier_lock);
3972 	rcu_barrier_entrain(rdp);
3973 	raw_spin_unlock(&rcu_state.barrier_lock);
3974 }
3975 
3976 /**
3977  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3978  *
3979  * Note that this primitive does not necessarily wait for an RCU grace period
3980  * to complete.  For example, if there are no RCU callbacks queued anywhere
3981  * in the system, then rcu_barrier() is within its rights to return
3982  * immediately, without waiting for anything, much less an RCU grace period.
3983  */
3984 void rcu_barrier(void)
3985 {
3986 	uintptr_t cpu;
3987 	unsigned long flags;
3988 	unsigned long gseq;
3989 	struct rcu_data *rdp;
3990 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3991 
3992 	rcu_barrier_trace(TPS("Begin"), -1, s);
3993 
3994 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3995 	mutex_lock(&rcu_state.barrier_mutex);
3996 
3997 	/* Did someone else do our work for us? */
3998 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3999 		rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4000 		smp_mb(); /* caller's subsequent code after above check. */
4001 		mutex_unlock(&rcu_state.barrier_mutex);
4002 		return;
4003 	}
4004 
4005 	/* Mark the start of the barrier operation. */
4006 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4007 	rcu_seq_start(&rcu_state.barrier_sequence);
4008 	gseq = rcu_state.barrier_sequence;
4009 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4010 
4011 	/*
4012 	 * Initialize the count to two rather than to zero in order
4013 	 * to avoid a too-soon return to zero in case of an immediate
4014 	 * invocation of the just-enqueued callback (or preemption of
4015 	 * this task).  Exclude CPU-hotplug operations to ensure that no
4016 	 * offline non-offloaded CPU has callbacks queued.
4017 	 */
4018 	init_completion(&rcu_state.barrier_completion);
4019 	atomic_set(&rcu_state.barrier_cpu_count, 2);
4020 	raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4021 
4022 	/*
4023 	 * Force each CPU with callbacks to register a new callback.
4024 	 * When that callback is invoked, we will know that all of the
4025 	 * corresponding CPU's preceding callbacks have been invoked.
4026 	 */
4027 	for_each_possible_cpu(cpu) {
4028 		rdp = per_cpu_ptr(&rcu_data, cpu);
4029 retry:
4030 		if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4031 			continue;
4032 		raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4033 		if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4034 			WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4035 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4036 			rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4037 			continue;
4038 		}
4039 		if (!rcu_rdp_cpu_online(rdp)) {
4040 			rcu_barrier_entrain(rdp);
4041 			WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4042 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4043 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4044 			continue;
4045 		}
4046 		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4047 		if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4048 			schedule_timeout_uninterruptible(1);
4049 			goto retry;
4050 		}
4051 		WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4052 		rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4053 	}
4054 
4055 	/*
4056 	 * Now that we have an rcu_barrier_callback() callback on each
4057 	 * CPU, and thus each counted, remove the initial count.
4058 	 */
4059 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4060 		complete(&rcu_state.barrier_completion);
4061 
4062 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4063 	wait_for_completion(&rcu_state.barrier_completion);
4064 
4065 	/* Mark the end of the barrier operation. */
4066 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4067 	rcu_seq_end(&rcu_state.barrier_sequence);
4068 	gseq = rcu_state.barrier_sequence;
4069 	for_each_possible_cpu(cpu) {
4070 		rdp = per_cpu_ptr(&rcu_data, cpu);
4071 
4072 		WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4073 	}
4074 
4075 	/* Other rcu_barrier() invocations can now safely proceed. */
4076 	mutex_unlock(&rcu_state.barrier_mutex);
4077 }
4078 EXPORT_SYMBOL_GPL(rcu_barrier);
4079 
4080 /*
4081  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4082  * first CPU in a given leaf rcu_node structure coming online.  The caller
4083  * must hold the corresponding leaf rcu_node ->lock with interrupts
4084  * disabled.
4085  */
4086 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4087 {
4088 	long mask;
4089 	long oldmask;
4090 	struct rcu_node *rnp = rnp_leaf;
4091 
4092 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4093 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4094 	for (;;) {
4095 		mask = rnp->grpmask;
4096 		rnp = rnp->parent;
4097 		if (rnp == NULL)
4098 			return;
4099 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4100 		oldmask = rnp->qsmaskinit;
4101 		rnp->qsmaskinit |= mask;
4102 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4103 		if (oldmask)
4104 			return;
4105 	}
4106 }
4107 
4108 /*
4109  * Do boot-time initialization of a CPU's per-CPU RCU data.
4110  */
4111 static void __init
4112 rcu_boot_init_percpu_data(int cpu)
4113 {
4114 	struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4115 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4116 
4117 	/* Set up local state, ensuring consistent view of global state. */
4118 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4119 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4120 	WARN_ON_ONCE(ct->dynticks_nesting != 1);
4121 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4122 	rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4123 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4124 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4125 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4126 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4127 	rdp->last_sched_clock = jiffies;
4128 	rdp->cpu = cpu;
4129 	rcu_boot_init_nocb_percpu_data(rdp);
4130 }
4131 
4132 /*
4133  * Invoked early in the CPU-online process, when pretty much all services
4134  * are available.  The incoming CPU is not present.
4135  *
4136  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4137  * offline event can be happening at a given time.  Note also that we can
4138  * accept some slop in the rsp->gp_seq access due to the fact that this
4139  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4140  * And any offloaded callbacks are being numbered elsewhere.
4141  */
4142 int rcutree_prepare_cpu(unsigned int cpu)
4143 {
4144 	unsigned long flags;
4145 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4146 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4147 	struct rcu_node *rnp = rcu_get_root();
4148 
4149 	/* Set up local state, ensuring consistent view of global state. */
4150 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4151 	rdp->qlen_last_fqs_check = 0;
4152 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4153 	rdp->blimit = blimit;
4154 	ct->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4155 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4156 
4157 	/*
4158 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4159 	 * (re-)initialized.
4160 	 */
4161 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4162 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4163 
4164 	/*
4165 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4166 	 * propagation up the rcu_node tree will happen at the beginning
4167 	 * of the next grace period.
4168 	 */
4169 	rnp = rdp->mynode;
4170 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4171 	rdp->beenonline = true;	 /* We have now been online. */
4172 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4173 	rdp->gp_seq_needed = rdp->gp_seq;
4174 	rdp->cpu_no_qs.b.norm = true;
4175 	rdp->core_needs_qs = false;
4176 	rdp->rcu_iw_pending = false;
4177 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4178 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4179 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4180 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4181 	rcu_spawn_one_boost_kthread(rnp);
4182 	rcu_spawn_cpu_nocb_kthread(cpu);
4183 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4184 
4185 	return 0;
4186 }
4187 
4188 /*
4189  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4190  */
4191 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4192 {
4193 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4194 
4195 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4196 }
4197 
4198 /*
4199  * Near the end of the CPU-online process.  Pretty much all services
4200  * enabled, and the CPU is now very much alive.
4201  */
4202 int rcutree_online_cpu(unsigned int cpu)
4203 {
4204 	unsigned long flags;
4205 	struct rcu_data *rdp;
4206 	struct rcu_node *rnp;
4207 
4208 	rdp = per_cpu_ptr(&rcu_data, cpu);
4209 	rnp = rdp->mynode;
4210 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4211 	rnp->ffmask |= rdp->grpmask;
4212 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4213 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4214 		return 0; /* Too early in boot for scheduler work. */
4215 	sync_sched_exp_online_cleanup(cpu);
4216 	rcutree_affinity_setting(cpu, -1);
4217 
4218 	// Stop-machine done, so allow nohz_full to disable tick.
4219 	tick_dep_clear(TICK_DEP_BIT_RCU);
4220 	return 0;
4221 }
4222 
4223 /*
4224  * Near the beginning of the process.  The CPU is still very much alive
4225  * with pretty much all services enabled.
4226  */
4227 int rcutree_offline_cpu(unsigned int cpu)
4228 {
4229 	unsigned long flags;
4230 	struct rcu_data *rdp;
4231 	struct rcu_node *rnp;
4232 
4233 	rdp = per_cpu_ptr(&rcu_data, cpu);
4234 	rnp = rdp->mynode;
4235 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4236 	rnp->ffmask &= ~rdp->grpmask;
4237 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4238 
4239 	rcutree_affinity_setting(cpu, cpu);
4240 
4241 	// nohz_full CPUs need the tick for stop-machine to work quickly
4242 	tick_dep_set(TICK_DEP_BIT_RCU);
4243 	return 0;
4244 }
4245 
4246 /*
4247  * Mark the specified CPU as being online so that subsequent grace periods
4248  * (both expedited and normal) will wait on it.  Note that this means that
4249  * incoming CPUs are not allowed to use RCU read-side critical sections
4250  * until this function is called.  Failing to observe this restriction
4251  * will result in lockdep splats.
4252  *
4253  * Note that this function is special in that it is invoked directly
4254  * from the incoming CPU rather than from the cpuhp_step mechanism.
4255  * This is because this function must be invoked at a precise location.
4256  */
4257 void rcu_cpu_starting(unsigned int cpu)
4258 {
4259 	unsigned long flags;
4260 	unsigned long mask;
4261 	struct rcu_data *rdp;
4262 	struct rcu_node *rnp;
4263 	bool newcpu;
4264 
4265 	rdp = per_cpu_ptr(&rcu_data, cpu);
4266 	if (rdp->cpu_started)
4267 		return;
4268 	rdp->cpu_started = true;
4269 
4270 	rnp = rdp->mynode;
4271 	mask = rdp->grpmask;
4272 	local_irq_save(flags);
4273 	arch_spin_lock(&rcu_state.ofl_lock);
4274 	rcu_dynticks_eqs_online();
4275 	raw_spin_lock(&rcu_state.barrier_lock);
4276 	raw_spin_lock_rcu_node(rnp);
4277 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4278 	raw_spin_unlock(&rcu_state.barrier_lock);
4279 	newcpu = !(rnp->expmaskinitnext & mask);
4280 	rnp->expmaskinitnext |= mask;
4281 	/* Allow lockless access for expedited grace periods. */
4282 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4283 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4284 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4285 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4286 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4287 
4288 	/* An incoming CPU should never be blocking a grace period. */
4289 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4290 		/* rcu_report_qs_rnp() *really* wants some flags to restore */
4291 		unsigned long flags2;
4292 
4293 		local_irq_save(flags2);
4294 		rcu_disable_urgency_upon_qs(rdp);
4295 		/* Report QS -after- changing ->qsmaskinitnext! */
4296 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
4297 	} else {
4298 		raw_spin_unlock_rcu_node(rnp);
4299 	}
4300 	arch_spin_unlock(&rcu_state.ofl_lock);
4301 	local_irq_restore(flags);
4302 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4303 }
4304 
4305 /*
4306  * The outgoing function has no further need of RCU, so remove it from
4307  * the rcu_node tree's ->qsmaskinitnext bit masks.
4308  *
4309  * Note that this function is special in that it is invoked directly
4310  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4311  * This is because this function must be invoked at a precise location.
4312  */
4313 void rcu_report_dead(unsigned int cpu)
4314 {
4315 	unsigned long flags, seq_flags;
4316 	unsigned long mask;
4317 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4318 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4319 
4320 	// Do any dangling deferred wakeups.
4321 	do_nocb_deferred_wakeup(rdp);
4322 
4323 	rcu_preempt_deferred_qs(current);
4324 
4325 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4326 	mask = rdp->grpmask;
4327 	local_irq_save(seq_flags);
4328 	arch_spin_lock(&rcu_state.ofl_lock);
4329 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4330 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4331 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4332 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4333 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4334 		rcu_disable_urgency_upon_qs(rdp);
4335 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4336 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4337 	}
4338 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4339 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4340 	arch_spin_unlock(&rcu_state.ofl_lock);
4341 	local_irq_restore(seq_flags);
4342 
4343 	rdp->cpu_started = false;
4344 }
4345 
4346 #ifdef CONFIG_HOTPLUG_CPU
4347 /*
4348  * The outgoing CPU has just passed through the dying-idle state, and we
4349  * are being invoked from the CPU that was IPIed to continue the offline
4350  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4351  */
4352 void rcutree_migrate_callbacks(int cpu)
4353 {
4354 	unsigned long flags;
4355 	struct rcu_data *my_rdp;
4356 	struct rcu_node *my_rnp;
4357 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4358 	bool needwake;
4359 
4360 	if (rcu_rdp_is_offloaded(rdp) ||
4361 	    rcu_segcblist_empty(&rdp->cblist))
4362 		return;  /* No callbacks to migrate. */
4363 
4364 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4365 	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4366 	rcu_barrier_entrain(rdp);
4367 	my_rdp = this_cpu_ptr(&rcu_data);
4368 	my_rnp = my_rdp->mynode;
4369 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4370 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4371 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4372 	/* Leverage recent GPs and set GP for new callbacks. */
4373 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4374 		   rcu_advance_cbs(my_rnp, my_rdp);
4375 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4376 	raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4377 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4378 	rcu_segcblist_disable(&rdp->cblist);
4379 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4380 	check_cb_ovld_locked(my_rdp, my_rnp);
4381 	if (rcu_rdp_is_offloaded(my_rdp)) {
4382 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4383 		__call_rcu_nocb_wake(my_rdp, true, flags);
4384 	} else {
4385 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4386 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4387 	}
4388 	if (needwake)
4389 		rcu_gp_kthread_wake();
4390 	lockdep_assert_irqs_enabled();
4391 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4392 		  !rcu_segcblist_empty(&rdp->cblist),
4393 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4394 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4395 		  rcu_segcblist_first_cb(&rdp->cblist));
4396 }
4397 #endif
4398 
4399 /*
4400  * On non-huge systems, use expedited RCU grace periods to make suspend
4401  * and hibernation run faster.
4402  */
4403 static int rcu_pm_notify(struct notifier_block *self,
4404 			 unsigned long action, void *hcpu)
4405 {
4406 	switch (action) {
4407 	case PM_HIBERNATION_PREPARE:
4408 	case PM_SUSPEND_PREPARE:
4409 		rcu_expedite_gp();
4410 		break;
4411 	case PM_POST_HIBERNATION:
4412 	case PM_POST_SUSPEND:
4413 		rcu_unexpedite_gp();
4414 		break;
4415 	default:
4416 		break;
4417 	}
4418 	return NOTIFY_OK;
4419 }
4420 
4421 #ifdef CONFIG_RCU_EXP_KTHREAD
4422 struct kthread_worker *rcu_exp_gp_kworker;
4423 struct kthread_worker *rcu_exp_par_gp_kworker;
4424 
4425 static void __init rcu_start_exp_gp_kworkers(void)
4426 {
4427 	const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4428 	const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4429 	struct sched_param param = { .sched_priority = kthread_prio };
4430 
4431 	rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4432 	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4433 		pr_err("Failed to create %s!\n", gp_kworker_name);
4434 		return;
4435 	}
4436 
4437 	rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4438 	if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4439 		pr_err("Failed to create %s!\n", par_gp_kworker_name);
4440 		kthread_destroy_worker(rcu_exp_gp_kworker);
4441 		return;
4442 	}
4443 
4444 	sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4445 	sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4446 				   &param);
4447 }
4448 
4449 static inline void rcu_alloc_par_gp_wq(void)
4450 {
4451 }
4452 #else /* !CONFIG_RCU_EXP_KTHREAD */
4453 struct workqueue_struct *rcu_par_gp_wq;
4454 
4455 static void __init rcu_start_exp_gp_kworkers(void)
4456 {
4457 }
4458 
4459 static inline void rcu_alloc_par_gp_wq(void)
4460 {
4461 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4462 	WARN_ON(!rcu_par_gp_wq);
4463 }
4464 #endif /* CONFIG_RCU_EXP_KTHREAD */
4465 
4466 /*
4467  * Spawn the kthreads that handle RCU's grace periods.
4468  */
4469 static int __init rcu_spawn_gp_kthread(void)
4470 {
4471 	unsigned long flags;
4472 	struct rcu_node *rnp;
4473 	struct sched_param sp;
4474 	struct task_struct *t;
4475 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4476 
4477 	rcu_scheduler_fully_active = 1;
4478 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4479 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4480 		return 0;
4481 	if (kthread_prio) {
4482 		sp.sched_priority = kthread_prio;
4483 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4484 	}
4485 	rnp = rcu_get_root();
4486 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4487 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4488 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4489 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4490 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4491 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4492 	wake_up_process(t);
4493 	/* This is a pre-SMP initcall, we expect a single CPU */
4494 	WARN_ON(num_online_cpus() > 1);
4495 	/*
4496 	 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4497 	 * due to rcu_scheduler_fully_active.
4498 	 */
4499 	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4500 	rcu_spawn_one_boost_kthread(rdp->mynode);
4501 	rcu_spawn_core_kthreads();
4502 	/* Create kthread worker for expedited GPs */
4503 	rcu_start_exp_gp_kworkers();
4504 	return 0;
4505 }
4506 early_initcall(rcu_spawn_gp_kthread);
4507 
4508 /*
4509  * This function is invoked towards the end of the scheduler's
4510  * initialization process.  Before this is called, the idle task might
4511  * contain synchronous grace-period primitives (during which time, this idle
4512  * task is booting the system, and such primitives are no-ops).  After this
4513  * function is called, any synchronous grace-period primitives are run as
4514  * expedited, with the requesting task driving the grace period forward.
4515  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4516  * runtime RCU functionality.
4517  */
4518 void rcu_scheduler_starting(void)
4519 {
4520 	unsigned long flags;
4521 	struct rcu_node *rnp;
4522 
4523 	WARN_ON(num_online_cpus() != 1);
4524 	WARN_ON(nr_context_switches() > 0);
4525 	rcu_test_sync_prims();
4526 
4527 	// Fix up the ->gp_seq counters.
4528 	local_irq_save(flags);
4529 	rcu_for_each_node_breadth_first(rnp)
4530 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4531 	local_irq_restore(flags);
4532 
4533 	// Switch out of early boot mode.
4534 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4535 	rcu_test_sync_prims();
4536 }
4537 
4538 /*
4539  * Helper function for rcu_init() that initializes the rcu_state structure.
4540  */
4541 static void __init rcu_init_one(void)
4542 {
4543 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4544 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4545 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4546 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4547 
4548 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4549 	int cpustride = 1;
4550 	int i;
4551 	int j;
4552 	struct rcu_node *rnp;
4553 
4554 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4555 
4556 	/* Silence gcc 4.8 false positive about array index out of range. */
4557 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4558 		panic("rcu_init_one: rcu_num_lvls out of range");
4559 
4560 	/* Initialize the level-tracking arrays. */
4561 
4562 	for (i = 1; i < rcu_num_lvls; i++)
4563 		rcu_state.level[i] =
4564 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4565 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4566 
4567 	/* Initialize the elements themselves, starting from the leaves. */
4568 
4569 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4570 		cpustride *= levelspread[i];
4571 		rnp = rcu_state.level[i];
4572 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4573 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4574 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4575 						   &rcu_node_class[i], buf[i]);
4576 			raw_spin_lock_init(&rnp->fqslock);
4577 			lockdep_set_class_and_name(&rnp->fqslock,
4578 						   &rcu_fqs_class[i], fqs[i]);
4579 			rnp->gp_seq = rcu_state.gp_seq;
4580 			rnp->gp_seq_needed = rcu_state.gp_seq;
4581 			rnp->completedqs = rcu_state.gp_seq;
4582 			rnp->qsmask = 0;
4583 			rnp->qsmaskinit = 0;
4584 			rnp->grplo = j * cpustride;
4585 			rnp->grphi = (j + 1) * cpustride - 1;
4586 			if (rnp->grphi >= nr_cpu_ids)
4587 				rnp->grphi = nr_cpu_ids - 1;
4588 			if (i == 0) {
4589 				rnp->grpnum = 0;
4590 				rnp->grpmask = 0;
4591 				rnp->parent = NULL;
4592 			} else {
4593 				rnp->grpnum = j % levelspread[i - 1];
4594 				rnp->grpmask = BIT(rnp->grpnum);
4595 				rnp->parent = rcu_state.level[i - 1] +
4596 					      j / levelspread[i - 1];
4597 			}
4598 			rnp->level = i;
4599 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4600 			rcu_init_one_nocb(rnp);
4601 			init_waitqueue_head(&rnp->exp_wq[0]);
4602 			init_waitqueue_head(&rnp->exp_wq[1]);
4603 			init_waitqueue_head(&rnp->exp_wq[2]);
4604 			init_waitqueue_head(&rnp->exp_wq[3]);
4605 			spin_lock_init(&rnp->exp_lock);
4606 			mutex_init(&rnp->boost_kthread_mutex);
4607 			raw_spin_lock_init(&rnp->exp_poll_lock);
4608 			rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4609 			INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4610 		}
4611 	}
4612 
4613 	init_swait_queue_head(&rcu_state.gp_wq);
4614 	init_swait_queue_head(&rcu_state.expedited_wq);
4615 	rnp = rcu_first_leaf_node();
4616 	for_each_possible_cpu(i) {
4617 		while (i > rnp->grphi)
4618 			rnp++;
4619 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4620 		rcu_boot_init_percpu_data(i);
4621 	}
4622 }
4623 
4624 /*
4625  * Force priority from the kernel command-line into range.
4626  */
4627 static void __init sanitize_kthread_prio(void)
4628 {
4629 	int kthread_prio_in = kthread_prio;
4630 
4631 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4632 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4633 		kthread_prio = 2;
4634 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4635 		kthread_prio = 1;
4636 	else if (kthread_prio < 0)
4637 		kthread_prio = 0;
4638 	else if (kthread_prio > 99)
4639 		kthread_prio = 99;
4640 
4641 	if (kthread_prio != kthread_prio_in)
4642 		pr_alert("%s: Limited prio to %d from %d\n",
4643 			 __func__, kthread_prio, kthread_prio_in);
4644 }
4645 
4646 /*
4647  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4648  * replace the definitions in tree.h because those are needed to size
4649  * the ->node array in the rcu_state structure.
4650  */
4651 void rcu_init_geometry(void)
4652 {
4653 	ulong d;
4654 	int i;
4655 	static unsigned long old_nr_cpu_ids;
4656 	int rcu_capacity[RCU_NUM_LVLS];
4657 	static bool initialized;
4658 
4659 	if (initialized) {
4660 		/*
4661 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4662 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4663 		 */
4664 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4665 		return;
4666 	}
4667 
4668 	old_nr_cpu_ids = nr_cpu_ids;
4669 	initialized = true;
4670 
4671 	/*
4672 	 * Initialize any unspecified boot parameters.
4673 	 * The default values of jiffies_till_first_fqs and
4674 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4675 	 * value, which is a function of HZ, then adding one for each
4676 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4677 	 */
4678 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4679 	if (jiffies_till_first_fqs == ULONG_MAX)
4680 		jiffies_till_first_fqs = d;
4681 	if (jiffies_till_next_fqs == ULONG_MAX)
4682 		jiffies_till_next_fqs = d;
4683 	adjust_jiffies_till_sched_qs();
4684 
4685 	/* If the compile-time values are accurate, just leave. */
4686 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4687 	    nr_cpu_ids == NR_CPUS)
4688 		return;
4689 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4690 		rcu_fanout_leaf, nr_cpu_ids);
4691 
4692 	/*
4693 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4694 	 * and cannot exceed the number of bits in the rcu_node masks.
4695 	 * Complain and fall back to the compile-time values if this
4696 	 * limit is exceeded.
4697 	 */
4698 	if (rcu_fanout_leaf < 2 ||
4699 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4700 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4701 		WARN_ON(1);
4702 		return;
4703 	}
4704 
4705 	/*
4706 	 * Compute number of nodes that can be handled an rcu_node tree
4707 	 * with the given number of levels.
4708 	 */
4709 	rcu_capacity[0] = rcu_fanout_leaf;
4710 	for (i = 1; i < RCU_NUM_LVLS; i++)
4711 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4712 
4713 	/*
4714 	 * The tree must be able to accommodate the configured number of CPUs.
4715 	 * If this limit is exceeded, fall back to the compile-time values.
4716 	 */
4717 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4718 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4719 		WARN_ON(1);
4720 		return;
4721 	}
4722 
4723 	/* Calculate the number of levels in the tree. */
4724 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4725 	}
4726 	rcu_num_lvls = i + 1;
4727 
4728 	/* Calculate the number of rcu_nodes at each level of the tree. */
4729 	for (i = 0; i < rcu_num_lvls; i++) {
4730 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4731 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4732 	}
4733 
4734 	/* Calculate the total number of rcu_node structures. */
4735 	rcu_num_nodes = 0;
4736 	for (i = 0; i < rcu_num_lvls; i++)
4737 		rcu_num_nodes += num_rcu_lvl[i];
4738 }
4739 
4740 /*
4741  * Dump out the structure of the rcu_node combining tree associated
4742  * with the rcu_state structure.
4743  */
4744 static void __init rcu_dump_rcu_node_tree(void)
4745 {
4746 	int level = 0;
4747 	struct rcu_node *rnp;
4748 
4749 	pr_info("rcu_node tree layout dump\n");
4750 	pr_info(" ");
4751 	rcu_for_each_node_breadth_first(rnp) {
4752 		if (rnp->level != level) {
4753 			pr_cont("\n");
4754 			pr_info(" ");
4755 			level = rnp->level;
4756 		}
4757 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4758 	}
4759 	pr_cont("\n");
4760 }
4761 
4762 struct workqueue_struct *rcu_gp_wq;
4763 
4764 static void __init kfree_rcu_batch_init(void)
4765 {
4766 	int cpu;
4767 	int i;
4768 
4769 	/* Clamp it to [0:100] seconds interval. */
4770 	if (rcu_delay_page_cache_fill_msec < 0 ||
4771 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4772 
4773 		rcu_delay_page_cache_fill_msec =
4774 			clamp(rcu_delay_page_cache_fill_msec, 0,
4775 				(int) (100 * MSEC_PER_SEC));
4776 
4777 		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4778 			rcu_delay_page_cache_fill_msec);
4779 	}
4780 
4781 	for_each_possible_cpu(cpu) {
4782 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4783 
4784 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4785 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4786 			krcp->krw_arr[i].krcp = krcp;
4787 		}
4788 
4789 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4790 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4791 		krcp->initialized = true;
4792 	}
4793 	if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
4794 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4795 }
4796 
4797 void __init rcu_init(void)
4798 {
4799 	int cpu = smp_processor_id();
4800 
4801 	rcu_early_boot_tests();
4802 
4803 	kfree_rcu_batch_init();
4804 	rcu_bootup_announce();
4805 	sanitize_kthread_prio();
4806 	rcu_init_geometry();
4807 	rcu_init_one();
4808 	if (dump_tree)
4809 		rcu_dump_rcu_node_tree();
4810 	if (use_softirq)
4811 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4812 
4813 	/*
4814 	 * We don't need protection against CPU-hotplug here because
4815 	 * this is called early in boot, before either interrupts
4816 	 * or the scheduler are operational.
4817 	 */
4818 	pm_notifier(rcu_pm_notify, 0);
4819 	WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4820 	rcutree_prepare_cpu(cpu);
4821 	rcu_cpu_starting(cpu);
4822 	rcutree_online_cpu(cpu);
4823 
4824 	/* Create workqueue for Tree SRCU and for expedited GPs. */
4825 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4826 	WARN_ON(!rcu_gp_wq);
4827 	rcu_alloc_par_gp_wq();
4828 
4829 	/* Fill in default value for rcutree.qovld boot parameter. */
4830 	/* -After- the rcu_node ->lock fields are initialized! */
4831 	if (qovld < 0)
4832 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4833 	else
4834 		qovld_calc = qovld;
4835 
4836 	// Kick-start any polled grace periods that started early.
4837 	if (!(per_cpu_ptr(&rcu_data, cpu)->mynode->exp_seq_poll_rq & 0x1))
4838 		(void)start_poll_synchronize_rcu_expedited();
4839 }
4840 
4841 #include "tree_stall.h"
4842 #include "tree_exp.h"
4843 #include "tree_nocb.h"
4844 #include "tree_plugin.h"
4845