xref: /openbmc/linux/kernel/rcu/tree.c (revision a325f174)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  *
5  * Copyright IBM Corporation, 2008
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *	    Paul E. McKenney <paulmck@linux.ibm.com>
10  *
11  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13  *
14  * For detailed explanation of Read-Copy Update mechanism see -
15  *	Documentation/RCU
16  */
17 
18 #define pr_fmt(fmt) "rcu: " fmt
19 
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/panic.h>
36 #include <linux/panic_notifier.h>
37 #include <linux/percpu.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <linux/mutex.h>
41 #include <linux/time.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/wait.h>
44 #include <linux/kthread.h>
45 #include <uapi/linux/sched/types.h>
46 #include <linux/prefetch.h>
47 #include <linux/delay.h>
48 #include <linux/random.h>
49 #include <linux/trace_events.h>
50 #include <linux/suspend.h>
51 #include <linux/ftrace.h>
52 #include <linux/tick.h>
53 #include <linux/sysrq.h>
54 #include <linux/kprobes.h>
55 #include <linux/gfp.h>
56 #include <linux/oom.h>
57 #include <linux/smpboot.h>
58 #include <linux/jiffies.h>
59 #include <linux/slab.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/sched/clock.h>
62 #include <linux/vmalloc.h>
63 #include <linux/mm.h>
64 #include <linux/kasan.h>
65 #include <linux/context_tracking.h>
66 #include "../time/tick-internal.h"
67 
68 #include "tree.h"
69 #include "rcu.h"
70 
71 #ifdef MODULE_PARAM_PREFIX
72 #undef MODULE_PARAM_PREFIX
73 #endif
74 #define MODULE_PARAM_PREFIX "rcutree."
75 
76 /* Data structures. */
77 
78 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
79 	.gpwrap = true,
80 #ifdef CONFIG_RCU_NOCB_CPU
81 	.cblist.flags = SEGCBLIST_RCU_CORE,
82 #endif
83 };
84 static struct rcu_state rcu_state = {
85 	.level = { &rcu_state.node[0] },
86 	.gp_state = RCU_GP_IDLE,
87 	.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
88 	.barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
89 	.barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
90 	.name = RCU_NAME,
91 	.abbr = RCU_ABBR,
92 	.exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
93 	.exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
94 	.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
95 };
96 
97 /* Dump rcu_node combining tree at boot to verify correct setup. */
98 static bool dump_tree;
99 module_param(dump_tree, bool, 0444);
100 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
101 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
102 #ifndef CONFIG_PREEMPT_RT
103 module_param(use_softirq, bool, 0444);
104 #endif
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 
116 /*
117  * The rcu_scheduler_active variable is initialized to the value
118  * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119  * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
120  * RCU can assume that there is but one task, allowing RCU to (for example)
121  * optimize synchronize_rcu() to a simple barrier().  When this variable
122  * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123  * to detect real grace periods.  This variable is also used to suppress
124  * boot-time false positives from lockdep-RCU error checking.  Finally, it
125  * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126  * is fully initialized, including all of its kthreads having been spawned.
127  */
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 
131 /*
132  * The rcu_scheduler_fully_active variable transitions from zero to one
133  * during the early_initcall() processing, which is after the scheduler
134  * is capable of creating new tasks.  So RCU processing (for example,
135  * creating tasks for RCU priority boosting) must be delayed until after
136  * rcu_scheduler_fully_active transitions from zero to one.  We also
137  * currently delay invocation of any RCU callbacks until after this point.
138  *
139  * It might later prove better for people registering RCU callbacks during
140  * early boot to take responsibility for these callbacks, but one step at
141  * a time.
142  */
143 static int rcu_scheduler_fully_active __read_mostly;
144 
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 			      unsigned long gps, unsigned long flags);
147 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
148 static void invoke_rcu_core(void);
149 static void rcu_report_exp_rdp(struct rcu_data *rdp);
150 static void sync_sched_exp_online_cleanup(int cpu);
151 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
152 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
153 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
154 static bool rcu_init_invoked(void);
155 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
156 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
157 
158 /*
159  * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
160  * real-time priority(enabling/disabling) is controlled by
161  * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
162  */
163 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
164 module_param(kthread_prio, int, 0444);
165 
166 /* Delay in jiffies for grace-period initialization delays, debug only. */
167 
168 static int gp_preinit_delay;
169 module_param(gp_preinit_delay, int, 0444);
170 static int gp_init_delay;
171 module_param(gp_init_delay, int, 0444);
172 static int gp_cleanup_delay;
173 module_param(gp_cleanup_delay, int, 0444);
174 
175 // Add delay to rcu_read_unlock() for strict grace periods.
176 static int rcu_unlock_delay;
177 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
178 module_param(rcu_unlock_delay, int, 0444);
179 #endif
180 
181 /*
182  * This rcu parameter is runtime-read-only. It reflects
183  * a minimum allowed number of objects which can be cached
184  * per-CPU. Object size is equal to one page. This value
185  * can be changed at boot time.
186  */
187 static int rcu_min_cached_objs = 5;
188 module_param(rcu_min_cached_objs, int, 0444);
189 
190 // A page shrinker can ask for pages to be freed to make them
191 // available for other parts of the system. This usually happens
192 // under low memory conditions, and in that case we should also
193 // defer page-cache filling for a short time period.
194 //
195 // The default value is 5 seconds, which is long enough to reduce
196 // interference with the shrinker while it asks other systems to
197 // drain their caches.
198 static int rcu_delay_page_cache_fill_msec = 5000;
199 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
200 
201 /* Retrieve RCU kthreads priority for rcutorture */
202 int rcu_get_gp_kthreads_prio(void)
203 {
204 	return kthread_prio;
205 }
206 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
207 
208 /*
209  * Number of grace periods between delays, normalized by the duration of
210  * the delay.  The longer the delay, the more the grace periods between
211  * each delay.  The reason for this normalization is that it means that,
212  * for non-zero delays, the overall slowdown of grace periods is constant
213  * regardless of the duration of the delay.  This arrangement balances
214  * the need for long delays to increase some race probabilities with the
215  * need for fast grace periods to increase other race probabilities.
216  */
217 #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */
218 
219 /*
220  * Return true if an RCU grace period is in progress.  The READ_ONCE()s
221  * permit this function to be invoked without holding the root rcu_node
222  * structure's ->lock, but of course results can be subject to change.
223  */
224 static int rcu_gp_in_progress(void)
225 {
226 	return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
227 }
228 
229 /*
230  * Return the number of callbacks queued on the specified CPU.
231  * Handles both the nocbs and normal cases.
232  */
233 static long rcu_get_n_cbs_cpu(int cpu)
234 {
235 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
236 
237 	if (rcu_segcblist_is_enabled(&rdp->cblist))
238 		return rcu_segcblist_n_cbs(&rdp->cblist);
239 	return 0;
240 }
241 
242 void rcu_softirq_qs(void)
243 {
244 	rcu_qs();
245 	rcu_preempt_deferred_qs(current);
246 	rcu_tasks_qs(current, false);
247 }
248 
249 /*
250  * Reset the current CPU's ->dynticks counter to indicate that the
251  * newly onlined CPU is no longer in an extended quiescent state.
252  * This will either leave the counter unchanged, or increment it
253  * to the next non-quiescent value.
254  *
255  * The non-atomic test/increment sequence works because the upper bits
256  * of the ->dynticks counter are manipulated only by the corresponding CPU,
257  * or when the corresponding CPU is offline.
258  */
259 static void rcu_dynticks_eqs_online(void)
260 {
261 	if (ct_dynticks() & RCU_DYNTICKS_IDX)
262 		return;
263 	ct_state_inc(RCU_DYNTICKS_IDX);
264 }
265 
266 /*
267  * Snapshot the ->dynticks counter with full ordering so as to allow
268  * stable comparison of this counter with past and future snapshots.
269  */
270 static int rcu_dynticks_snap(int cpu)
271 {
272 	smp_mb();  // Fundamental RCU ordering guarantee.
273 	return ct_dynticks_cpu_acquire(cpu);
274 }
275 
276 /*
277  * Return true if the snapshot returned from rcu_dynticks_snap()
278  * indicates that RCU is in an extended quiescent state.
279  */
280 static bool rcu_dynticks_in_eqs(int snap)
281 {
282 	return !(snap & RCU_DYNTICKS_IDX);
283 }
284 
285 /*
286  * Return true if the CPU corresponding to the specified rcu_data
287  * structure has spent some time in an extended quiescent state since
288  * rcu_dynticks_snap() returned the specified snapshot.
289  */
290 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
291 {
292 	return snap != rcu_dynticks_snap(rdp->cpu);
293 }
294 
295 /*
296  * Return true if the referenced integer is zero while the specified
297  * CPU remains within a single extended quiescent state.
298  */
299 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
300 {
301 	int snap;
302 
303 	// If not quiescent, force back to earlier extended quiescent state.
304 	snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
305 	smp_rmb(); // Order ->dynticks and *vp reads.
306 	if (READ_ONCE(*vp))
307 		return false;  // Non-zero, so report failure;
308 	smp_rmb(); // Order *vp read and ->dynticks re-read.
309 
310 	// If still in the same extended quiescent state, we are good!
311 	return snap == ct_dynticks_cpu(cpu);
312 }
313 
314 /*
315  * Let the RCU core know that this CPU has gone through the scheduler,
316  * which is a quiescent state.  This is called when the need for a
317  * quiescent state is urgent, so we burn an atomic operation and full
318  * memory barriers to let the RCU core know about it, regardless of what
319  * this CPU might (or might not) do in the near future.
320  *
321  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
322  *
323  * The caller must have disabled interrupts and must not be idle.
324  */
325 notrace void rcu_momentary_dyntick_idle(void)
326 {
327 	int seq;
328 
329 	raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
330 	seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
331 	/* It is illegal to call this from idle state. */
332 	WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
333 	rcu_preempt_deferred_qs(current);
334 }
335 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
336 
337 /**
338  * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
339  *
340  * If the current CPU is idle and running at a first-level (not nested)
341  * interrupt, or directly, from idle, return true.
342  *
343  * The caller must have at least disabled IRQs.
344  */
345 static int rcu_is_cpu_rrupt_from_idle(void)
346 {
347 	long nesting;
348 
349 	/*
350 	 * Usually called from the tick; but also used from smp_function_call()
351 	 * for expedited grace periods. This latter can result in running from
352 	 * the idle task, instead of an actual IPI.
353 	 */
354 	lockdep_assert_irqs_disabled();
355 
356 	/* Check for counter underflows */
357 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
358 			 "RCU dynticks_nesting counter underflow!");
359 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
360 			 "RCU dynticks_nmi_nesting counter underflow/zero!");
361 
362 	/* Are we at first interrupt nesting level? */
363 	nesting = ct_dynticks_nmi_nesting();
364 	if (nesting > 1)
365 		return false;
366 
367 	/*
368 	 * If we're not in an interrupt, we must be in the idle task!
369 	 */
370 	WARN_ON_ONCE(!nesting && !is_idle_task(current));
371 
372 	/* Does CPU appear to be idle from an RCU standpoint? */
373 	return ct_dynticks_nesting() == 0;
374 }
375 
376 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
377 				// Maximum callbacks per rcu_do_batch ...
378 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
379 static long blimit = DEFAULT_RCU_BLIMIT;
380 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
381 static long qhimark = DEFAULT_RCU_QHIMARK;
382 #define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
383 static long qlowmark = DEFAULT_RCU_QLOMARK;
384 #define DEFAULT_RCU_QOVLD_MULT 2
385 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
386 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
387 static long qovld_calc = -1;	  // No pre-initialization lock acquisitions!
388 
389 module_param(blimit, long, 0444);
390 module_param(qhimark, long, 0444);
391 module_param(qlowmark, long, 0444);
392 module_param(qovld, long, 0444);
393 
394 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
395 static ulong jiffies_till_next_fqs = ULONG_MAX;
396 static bool rcu_kick_kthreads;
397 static int rcu_divisor = 7;
398 module_param(rcu_divisor, int, 0644);
399 
400 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
401 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
402 module_param(rcu_resched_ns, long, 0644);
403 
404 /*
405  * How long the grace period must be before we start recruiting
406  * quiescent-state help from rcu_note_context_switch().
407  */
408 static ulong jiffies_till_sched_qs = ULONG_MAX;
409 module_param(jiffies_till_sched_qs, ulong, 0444);
410 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
411 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
412 
413 /*
414  * Make sure that we give the grace-period kthread time to detect any
415  * idle CPUs before taking active measures to force quiescent states.
416  * However, don't go below 100 milliseconds, adjusted upwards for really
417  * large systems.
418  */
419 static void adjust_jiffies_till_sched_qs(void)
420 {
421 	unsigned long j;
422 
423 	/* If jiffies_till_sched_qs was specified, respect the request. */
424 	if (jiffies_till_sched_qs != ULONG_MAX) {
425 		WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
426 		return;
427 	}
428 	/* Otherwise, set to third fqs scan, but bound below on large system. */
429 	j = READ_ONCE(jiffies_till_first_fqs) +
430 		      2 * READ_ONCE(jiffies_till_next_fqs);
431 	if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
432 		j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
433 	pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
434 	WRITE_ONCE(jiffies_to_sched_qs, j);
435 }
436 
437 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
438 {
439 	ulong j;
440 	int ret = kstrtoul(val, 0, &j);
441 
442 	if (!ret) {
443 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
444 		adjust_jiffies_till_sched_qs();
445 	}
446 	return ret;
447 }
448 
449 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
450 {
451 	ulong j;
452 	int ret = kstrtoul(val, 0, &j);
453 
454 	if (!ret) {
455 		WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
456 		adjust_jiffies_till_sched_qs();
457 	}
458 	return ret;
459 }
460 
461 static const struct kernel_param_ops first_fqs_jiffies_ops = {
462 	.set = param_set_first_fqs_jiffies,
463 	.get = param_get_ulong,
464 };
465 
466 static const struct kernel_param_ops next_fqs_jiffies_ops = {
467 	.set = param_set_next_fqs_jiffies,
468 	.get = param_get_ulong,
469 };
470 
471 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
472 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
473 module_param(rcu_kick_kthreads, bool, 0644);
474 
475 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
476 static int rcu_pending(int user);
477 
478 /*
479  * Return the number of RCU GPs completed thus far for debug & stats.
480  */
481 unsigned long rcu_get_gp_seq(void)
482 {
483 	return READ_ONCE(rcu_state.gp_seq);
484 }
485 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
486 
487 /*
488  * Return the number of RCU expedited batches completed thus far for
489  * debug & stats.  Odd numbers mean that a batch is in progress, even
490  * numbers mean idle.  The value returned will thus be roughly double
491  * the cumulative batches since boot.
492  */
493 unsigned long rcu_exp_batches_completed(void)
494 {
495 	return rcu_state.expedited_sequence;
496 }
497 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
498 
499 /*
500  * Return the root node of the rcu_state structure.
501  */
502 static struct rcu_node *rcu_get_root(void)
503 {
504 	return &rcu_state.node[0];
505 }
506 
507 /*
508  * Send along grace-period-related data for rcutorture diagnostics.
509  */
510 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
511 			    unsigned long *gp_seq)
512 {
513 	switch (test_type) {
514 	case RCU_FLAVOR:
515 		*flags = READ_ONCE(rcu_state.gp_flags);
516 		*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
517 		break;
518 	default:
519 		break;
520 	}
521 }
522 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
523 
524 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
525 /*
526  * An empty function that will trigger a reschedule on
527  * IRQ tail once IRQs get re-enabled on userspace/guest resume.
528  */
529 static void late_wakeup_func(struct irq_work *work)
530 {
531 }
532 
533 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
534 	IRQ_WORK_INIT(late_wakeup_func);
535 
536 /*
537  * If either:
538  *
539  * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
540  * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
541  *
542  * In these cases the late RCU wake ups aren't supported in the resched loops and our
543  * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
544  * get re-enabled again.
545  */
546 noinstr void rcu_irq_work_resched(void)
547 {
548 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
549 
550 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
551 		return;
552 
553 	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
554 		return;
555 
556 	instrumentation_begin();
557 	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
558 		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
559 	}
560 	instrumentation_end();
561 }
562 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
563 
564 #ifdef CONFIG_PROVE_RCU
565 /**
566  * rcu_irq_exit_check_preempt - Validate that scheduling is possible
567  */
568 void rcu_irq_exit_check_preempt(void)
569 {
570 	lockdep_assert_irqs_disabled();
571 
572 	RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
573 			 "RCU dynticks_nesting counter underflow/zero!");
574 	RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
575 			 DYNTICK_IRQ_NONIDLE,
576 			 "Bad RCU  dynticks_nmi_nesting counter\n");
577 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
578 			 "RCU in extended quiescent state!");
579 }
580 #endif /* #ifdef CONFIG_PROVE_RCU */
581 
582 #ifdef CONFIG_NO_HZ_FULL
583 /**
584  * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
585  *
586  * The scheduler tick is not normally enabled when CPUs enter the kernel
587  * from nohz_full userspace execution.  After all, nohz_full userspace
588  * execution is an RCU quiescent state and the time executing in the kernel
589  * is quite short.  Except of course when it isn't.  And it is not hard to
590  * cause a large system to spend tens of seconds or even minutes looping
591  * in the kernel, which can cause a number of problems, include RCU CPU
592  * stall warnings.
593  *
594  * Therefore, if a nohz_full CPU fails to report a quiescent state
595  * in a timely manner, the RCU grace-period kthread sets that CPU's
596  * ->rcu_urgent_qs flag with the expectation that the next interrupt or
597  * exception will invoke this function, which will turn on the scheduler
598  * tick, which will enable RCU to detect that CPU's quiescent states,
599  * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
600  * The tick will be disabled once a quiescent state is reported for
601  * this CPU.
602  *
603  * Of course, in carefully tuned systems, there might never be an
604  * interrupt or exception.  In that case, the RCU grace-period kthread
605  * will eventually cause one to happen.  However, in less carefully
606  * controlled environments, this function allows RCU to get what it
607  * needs without creating otherwise useless interruptions.
608  */
609 void __rcu_irq_enter_check_tick(void)
610 {
611 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
612 
613 	// If we're here from NMI there's nothing to do.
614 	if (in_nmi())
615 		return;
616 
617 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
618 			 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
619 
620 	if (!tick_nohz_full_cpu(rdp->cpu) ||
621 	    !READ_ONCE(rdp->rcu_urgent_qs) ||
622 	    READ_ONCE(rdp->rcu_forced_tick)) {
623 		// RCU doesn't need nohz_full help from this CPU, or it is
624 		// already getting that help.
625 		return;
626 	}
627 
628 	// We get here only when not in an extended quiescent state and
629 	// from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
630 	// already watching and (2) The fact that we are in an interrupt
631 	// handler and that the rcu_node lock is an irq-disabled lock
632 	// prevents self-deadlock.  So we can safely recheck under the lock.
633 	// Note that the nohz_full state currently cannot change.
634 	raw_spin_lock_rcu_node(rdp->mynode);
635 	if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
636 		// A nohz_full CPU is in the kernel and RCU needs a
637 		// quiescent state.  Turn on the tick!
638 		WRITE_ONCE(rdp->rcu_forced_tick, true);
639 		tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
640 	}
641 	raw_spin_unlock_rcu_node(rdp->mynode);
642 }
643 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
644 #endif /* CONFIG_NO_HZ_FULL */
645 
646 /*
647  * Check to see if any future non-offloaded RCU-related work will need
648  * to be done by the current CPU, even if none need be done immediately,
649  * returning 1 if so.  This function is part of the RCU implementation;
650  * it is -not- an exported member of the RCU API.  This is used by
651  * the idle-entry code to figure out whether it is safe to disable the
652  * scheduler-clock interrupt.
653  *
654  * Just check whether or not this CPU has non-offloaded RCU callbacks
655  * queued.
656  */
657 int rcu_needs_cpu(void)
658 {
659 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
660 		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
661 }
662 
663 /*
664  * If any sort of urgency was applied to the current CPU (for example,
665  * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
666  * to get to a quiescent state, disable it.
667  */
668 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
669 {
670 	raw_lockdep_assert_held_rcu_node(rdp->mynode);
671 	WRITE_ONCE(rdp->rcu_urgent_qs, false);
672 	WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
673 	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
674 		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
675 		WRITE_ONCE(rdp->rcu_forced_tick, false);
676 	}
677 }
678 
679 /**
680  * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
681  *
682  * Return @true if RCU is watching the running CPU and @false otherwise.
683  * An @true return means that this CPU can safely enter RCU read-side
684  * critical sections.
685  *
686  * Although calls to rcu_is_watching() from most parts of the kernel
687  * will return @true, there are important exceptions.  For example, if the
688  * current CPU is deep within its idle loop, in kernel entry/exit code,
689  * or offline, rcu_is_watching() will return @false.
690  *
691  * Make notrace because it can be called by the internal functions of
692  * ftrace, and making this notrace removes unnecessary recursion calls.
693  */
694 notrace bool rcu_is_watching(void)
695 {
696 	bool ret;
697 
698 	preempt_disable_notrace();
699 	ret = !rcu_dynticks_curr_cpu_in_eqs();
700 	preempt_enable_notrace();
701 	return ret;
702 }
703 EXPORT_SYMBOL_GPL(rcu_is_watching);
704 
705 /*
706  * If a holdout task is actually running, request an urgent quiescent
707  * state from its CPU.  This is unsynchronized, so migrations can cause
708  * the request to go to the wrong CPU.  Which is OK, all that will happen
709  * is that the CPU's next context switch will be a bit slower and next
710  * time around this task will generate another request.
711  */
712 void rcu_request_urgent_qs_task(struct task_struct *t)
713 {
714 	int cpu;
715 
716 	barrier();
717 	cpu = task_cpu(t);
718 	if (!task_curr(t))
719 		return; /* This task is not running on that CPU. */
720 	smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
721 }
722 
723 /*
724  * When trying to report a quiescent state on behalf of some other CPU,
725  * it is our responsibility to check for and handle potential overflow
726  * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
727  * After all, the CPU might be in deep idle state, and thus executing no
728  * code whatsoever.
729  */
730 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
731 {
732 	raw_lockdep_assert_held_rcu_node(rnp);
733 	if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
734 			 rnp->gp_seq))
735 		WRITE_ONCE(rdp->gpwrap, true);
736 	if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
737 		rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
738 }
739 
740 /*
741  * Snapshot the specified CPU's dynticks counter so that we can later
742  * credit them with an implicit quiescent state.  Return 1 if this CPU
743  * is in dynticks idle mode, which is an extended quiescent state.
744  */
745 static int dyntick_save_progress_counter(struct rcu_data *rdp)
746 {
747 	rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
748 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
749 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
750 		rcu_gpnum_ovf(rdp->mynode, rdp);
751 		return 1;
752 	}
753 	return 0;
754 }
755 
756 /*
757  * Return true if the specified CPU has passed through a quiescent
758  * state by virtue of being in or having passed through an dynticks
759  * idle state since the last call to dyntick_save_progress_counter()
760  * for this same CPU, or by virtue of having been offline.
761  */
762 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
763 {
764 	unsigned long jtsq;
765 	struct rcu_node *rnp = rdp->mynode;
766 
767 	/*
768 	 * If the CPU passed through or entered a dynticks idle phase with
769 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
770 	 * already acknowledged the request to pass through a quiescent
771 	 * state.  Either way, that CPU cannot possibly be in an RCU
772 	 * read-side critical section that started before the beginning
773 	 * of the current RCU grace period.
774 	 */
775 	if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
776 		trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
777 		rcu_gpnum_ovf(rnp, rdp);
778 		return 1;
779 	}
780 
781 	/*
782 	 * Complain if a CPU that is considered to be offline from RCU's
783 	 * perspective has not yet reported a quiescent state.  After all,
784 	 * the offline CPU should have reported a quiescent state during
785 	 * the CPU-offline process, or, failing that, by rcu_gp_init()
786 	 * if it ran concurrently with either the CPU going offline or the
787 	 * last task on a leaf rcu_node structure exiting its RCU read-side
788 	 * critical section while all CPUs corresponding to that structure
789 	 * are offline.  This added warning detects bugs in any of these
790 	 * code paths.
791 	 *
792 	 * The rcu_node structure's ->lock is held here, which excludes
793 	 * the relevant portions the CPU-hotplug code, the grace-period
794 	 * initialization code, and the rcu_read_unlock() code paths.
795 	 *
796 	 * For more detail, please refer to the "Hotplug CPU" section
797 	 * of RCU's Requirements documentation.
798 	 */
799 	if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
800 		struct rcu_node *rnp1;
801 
802 		pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
803 			__func__, rnp->grplo, rnp->grphi, rnp->level,
804 			(long)rnp->gp_seq, (long)rnp->completedqs);
805 		for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
806 			pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
807 				__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
808 		pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
809 			__func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
810 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
811 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
812 		return 1; /* Break things loose after complaining. */
813 	}
814 
815 	/*
816 	 * A CPU running for an extended time within the kernel can
817 	 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
818 	 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
819 	 * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
820 	 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
821 	 * variable are safe because the assignments are repeated if this
822 	 * CPU failed to pass through a quiescent state.  This code
823 	 * also checks .jiffies_resched in case jiffies_to_sched_qs
824 	 * is set way high.
825 	 */
826 	jtsq = READ_ONCE(jiffies_to_sched_qs);
827 	if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
828 	    (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
829 	     time_after(jiffies, rcu_state.jiffies_resched) ||
830 	     rcu_state.cbovld)) {
831 		WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
832 		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
833 		smp_store_release(&rdp->rcu_urgent_qs, true);
834 	} else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
835 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
836 	}
837 
838 	/*
839 	 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
840 	 * The above code handles this, but only for straight cond_resched().
841 	 * And some in-kernel loops check need_resched() before calling
842 	 * cond_resched(), which defeats the above code for CPUs that are
843 	 * running in-kernel with scheduling-clock interrupts disabled.
844 	 * So hit them over the head with the resched_cpu() hammer!
845 	 */
846 	if (tick_nohz_full_cpu(rdp->cpu) &&
847 	    (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
848 	     rcu_state.cbovld)) {
849 		WRITE_ONCE(rdp->rcu_urgent_qs, true);
850 		resched_cpu(rdp->cpu);
851 		WRITE_ONCE(rdp->last_fqs_resched, jiffies);
852 	}
853 
854 	/*
855 	 * If more than halfway to RCU CPU stall-warning time, invoke
856 	 * resched_cpu() more frequently to try to loosen things up a bit.
857 	 * Also check to see if the CPU is getting hammered with interrupts,
858 	 * but only once per grace period, just to keep the IPIs down to
859 	 * a dull roar.
860 	 */
861 	if (time_after(jiffies, rcu_state.jiffies_resched)) {
862 		if (time_after(jiffies,
863 			       READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
864 			resched_cpu(rdp->cpu);
865 			WRITE_ONCE(rdp->last_fqs_resched, jiffies);
866 		}
867 		if (IS_ENABLED(CONFIG_IRQ_WORK) &&
868 		    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
869 		    (rnp->ffmask & rdp->grpmask)) {
870 			rdp->rcu_iw_pending = true;
871 			rdp->rcu_iw_gp_seq = rnp->gp_seq;
872 			irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
873 		}
874 
875 		if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
876 			int cpu = rdp->cpu;
877 			struct rcu_snap_record *rsrp;
878 			struct kernel_cpustat *kcsp;
879 
880 			kcsp = &kcpustat_cpu(cpu);
881 
882 			rsrp = &rdp->snap_record;
883 			rsrp->cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
884 			rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
885 			rsrp->cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
886 			rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
887 			rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
888 			rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
889 			rsrp->jiffies = jiffies;
890 			rsrp->gp_seq = rdp->gp_seq;
891 		}
892 	}
893 
894 	return 0;
895 }
896 
897 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
898 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
899 			      unsigned long gp_seq_req, const char *s)
900 {
901 	trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
902 				      gp_seq_req, rnp->level,
903 				      rnp->grplo, rnp->grphi, s);
904 }
905 
906 /*
907  * rcu_start_this_gp - Request the start of a particular grace period
908  * @rnp_start: The leaf node of the CPU from which to start.
909  * @rdp: The rcu_data corresponding to the CPU from which to start.
910  * @gp_seq_req: The gp_seq of the grace period to start.
911  *
912  * Start the specified grace period, as needed to handle newly arrived
913  * callbacks.  The required future grace periods are recorded in each
914  * rcu_node structure's ->gp_seq_needed field.  Returns true if there
915  * is reason to awaken the grace-period kthread.
916  *
917  * The caller must hold the specified rcu_node structure's ->lock, which
918  * is why the caller is responsible for waking the grace-period kthread.
919  *
920  * Returns true if the GP thread needs to be awakened else false.
921  */
922 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
923 			      unsigned long gp_seq_req)
924 {
925 	bool ret = false;
926 	struct rcu_node *rnp;
927 
928 	/*
929 	 * Use funnel locking to either acquire the root rcu_node
930 	 * structure's lock or bail out if the need for this grace period
931 	 * has already been recorded -- or if that grace period has in
932 	 * fact already started.  If there is already a grace period in
933 	 * progress in a non-leaf node, no recording is needed because the
934 	 * end of the grace period will scan the leaf rcu_node structures.
935 	 * Note that rnp_start->lock must not be released.
936 	 */
937 	raw_lockdep_assert_held_rcu_node(rnp_start);
938 	trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
939 	for (rnp = rnp_start; 1; rnp = rnp->parent) {
940 		if (rnp != rnp_start)
941 			raw_spin_lock_rcu_node(rnp);
942 		if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
943 		    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
944 		    (rnp != rnp_start &&
945 		     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
946 			trace_rcu_this_gp(rnp, rdp, gp_seq_req,
947 					  TPS("Prestarted"));
948 			goto unlock_out;
949 		}
950 		WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
951 		if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
952 			/*
953 			 * We just marked the leaf or internal node, and a
954 			 * grace period is in progress, which means that
955 			 * rcu_gp_cleanup() will see the marking.  Bail to
956 			 * reduce contention.
957 			 */
958 			trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
959 					  TPS("Startedleaf"));
960 			goto unlock_out;
961 		}
962 		if (rnp != rnp_start && rnp->parent != NULL)
963 			raw_spin_unlock_rcu_node(rnp);
964 		if (!rnp->parent)
965 			break;  /* At root, and perhaps also leaf. */
966 	}
967 
968 	/* If GP already in progress, just leave, otherwise start one. */
969 	if (rcu_gp_in_progress()) {
970 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
971 		goto unlock_out;
972 	}
973 	trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
974 	WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
975 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
976 	if (!READ_ONCE(rcu_state.gp_kthread)) {
977 		trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
978 		goto unlock_out;
979 	}
980 	trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
981 	ret = true;  /* Caller must wake GP kthread. */
982 unlock_out:
983 	/* Push furthest requested GP to leaf node and rcu_data structure. */
984 	if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
985 		WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
986 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
987 	}
988 	if (rnp != rnp_start)
989 		raw_spin_unlock_rcu_node(rnp);
990 	return ret;
991 }
992 
993 /*
994  * Clean up any old requests for the just-ended grace period.  Also return
995  * whether any additional grace periods have been requested.
996  */
997 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
998 {
999 	bool needmore;
1000 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1001 
1002 	needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1003 	if (!needmore)
1004 		rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1005 	trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1006 			  needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1007 	return needmore;
1008 }
1009 
1010 /*
1011  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1012  * interrupt or softirq handler, in which case we just might immediately
1013  * sleep upon return, resulting in a grace-period hang), and don't bother
1014  * awakening when there is nothing for the grace-period kthread to do
1015  * (as in several CPUs raced to awaken, we lost), and finally don't try
1016  * to awaken a kthread that has not yet been created.  If all those checks
1017  * are passed, track some debug information and awaken.
1018  *
1019  * So why do the self-wakeup when in an interrupt or softirq handler
1020  * in the grace-period kthread's context?  Because the kthread might have
1021  * been interrupted just as it was going to sleep, and just after the final
1022  * pre-sleep check of the awaken condition.  In this case, a wakeup really
1023  * is required, and is therefore supplied.
1024  */
1025 static void rcu_gp_kthread_wake(void)
1026 {
1027 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1028 
1029 	if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1030 	    !READ_ONCE(rcu_state.gp_flags) || !t)
1031 		return;
1032 	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1033 	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1034 	swake_up_one(&rcu_state.gp_wq);
1035 }
1036 
1037 /*
1038  * If there is room, assign a ->gp_seq number to any callbacks on this
1039  * CPU that have not already been assigned.  Also accelerate any callbacks
1040  * that were previously assigned a ->gp_seq number that has since proven
1041  * to be too conservative, which can happen if callbacks get assigned a
1042  * ->gp_seq number while RCU is idle, but with reference to a non-root
1043  * rcu_node structure.  This function is idempotent, so it does not hurt
1044  * to call it repeatedly.  Returns an flag saying that we should awaken
1045  * the RCU grace-period kthread.
1046  *
1047  * The caller must hold rnp->lock with interrupts disabled.
1048  */
1049 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1050 {
1051 	unsigned long gp_seq_req;
1052 	bool ret = false;
1053 
1054 	rcu_lockdep_assert_cblist_protected(rdp);
1055 	raw_lockdep_assert_held_rcu_node(rnp);
1056 
1057 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1058 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1059 		return false;
1060 
1061 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1062 
1063 	/*
1064 	 * Callbacks are often registered with incomplete grace-period
1065 	 * information.  Something about the fact that getting exact
1066 	 * information requires acquiring a global lock...  RCU therefore
1067 	 * makes a conservative estimate of the grace period number at which
1068 	 * a given callback will become ready to invoke.	The following
1069 	 * code checks this estimate and improves it when possible, thus
1070 	 * accelerating callback invocation to an earlier grace-period
1071 	 * number.
1072 	 */
1073 	gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1074 	if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1075 		ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1076 
1077 	/* Trace depending on how much we were able to accelerate. */
1078 	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1079 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1080 	else
1081 		trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1082 
1083 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1084 
1085 	return ret;
1086 }
1087 
1088 /*
1089  * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1090  * rcu_node structure's ->lock be held.  It consults the cached value
1091  * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1092  * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1093  * while holding the leaf rcu_node structure's ->lock.
1094  */
1095 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1096 					struct rcu_data *rdp)
1097 {
1098 	unsigned long c;
1099 	bool needwake;
1100 
1101 	rcu_lockdep_assert_cblist_protected(rdp);
1102 	c = rcu_seq_snap(&rcu_state.gp_seq);
1103 	if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1104 		/* Old request still live, so mark recent callbacks. */
1105 		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
1106 		return;
1107 	}
1108 	raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1109 	needwake = rcu_accelerate_cbs(rnp, rdp);
1110 	raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1111 	if (needwake)
1112 		rcu_gp_kthread_wake();
1113 }
1114 
1115 /*
1116  * Move any callbacks whose grace period has completed to the
1117  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1118  * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1119  * sublist.  This function is idempotent, so it does not hurt to
1120  * invoke it repeatedly.  As long as it is not invoked -too- often...
1121  * Returns true if the RCU grace-period kthread needs to be awakened.
1122  *
1123  * The caller must hold rnp->lock with interrupts disabled.
1124  */
1125 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1126 {
1127 	rcu_lockdep_assert_cblist_protected(rdp);
1128 	raw_lockdep_assert_held_rcu_node(rnp);
1129 
1130 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1131 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1132 		return false;
1133 
1134 	/*
1135 	 * Find all callbacks whose ->gp_seq numbers indicate that they
1136 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1137 	 */
1138 	rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1139 
1140 	/* Classify any remaining callbacks. */
1141 	return rcu_accelerate_cbs(rnp, rdp);
1142 }
1143 
1144 /*
1145  * Move and classify callbacks, but only if doing so won't require
1146  * that the RCU grace-period kthread be awakened.
1147  */
1148 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1149 						  struct rcu_data *rdp)
1150 {
1151 	rcu_lockdep_assert_cblist_protected(rdp);
1152 	if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1153 		return;
1154 	// The grace period cannot end while we hold the rcu_node lock.
1155 	if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1156 		WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1157 	raw_spin_unlock_rcu_node(rnp);
1158 }
1159 
1160 /*
1161  * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1162  * quiescent state.  This is intended to be invoked when the CPU notices
1163  * a new grace period.
1164  */
1165 static void rcu_strict_gp_check_qs(void)
1166 {
1167 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1168 		rcu_read_lock();
1169 		rcu_read_unlock();
1170 	}
1171 }
1172 
1173 /*
1174  * Update CPU-local rcu_data state to record the beginnings and ends of
1175  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1176  * structure corresponding to the current CPU, and must have irqs disabled.
1177  * Returns true if the grace-period kthread needs to be awakened.
1178  */
1179 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1180 {
1181 	bool ret = false;
1182 	bool need_qs;
1183 	const bool offloaded = rcu_rdp_is_offloaded(rdp);
1184 
1185 	raw_lockdep_assert_held_rcu_node(rnp);
1186 
1187 	if (rdp->gp_seq == rnp->gp_seq)
1188 		return false; /* Nothing to do. */
1189 
1190 	/* Handle the ends of any preceding grace periods first. */
1191 	if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1192 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1193 		if (!offloaded)
1194 			ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1195 		rdp->core_needs_qs = false;
1196 		trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1197 	} else {
1198 		if (!offloaded)
1199 			ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1200 		if (rdp->core_needs_qs)
1201 			rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1202 	}
1203 
1204 	/* Now handle the beginnings of any new-to-this-CPU grace periods. */
1205 	if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1206 	    unlikely(READ_ONCE(rdp->gpwrap))) {
1207 		/*
1208 		 * If the current grace period is waiting for this CPU,
1209 		 * set up to detect a quiescent state, otherwise don't
1210 		 * go looking for one.
1211 		 */
1212 		trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1213 		need_qs = !!(rnp->qsmask & rdp->grpmask);
1214 		rdp->cpu_no_qs.b.norm = need_qs;
1215 		rdp->core_needs_qs = need_qs;
1216 		zero_cpu_stall_ticks(rdp);
1217 	}
1218 	rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1219 	if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1220 		WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1221 	if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1222 		WRITE_ONCE(rdp->last_sched_clock, jiffies);
1223 	WRITE_ONCE(rdp->gpwrap, false);
1224 	rcu_gpnum_ovf(rnp, rdp);
1225 	return ret;
1226 }
1227 
1228 static void note_gp_changes(struct rcu_data *rdp)
1229 {
1230 	unsigned long flags;
1231 	bool needwake;
1232 	struct rcu_node *rnp;
1233 
1234 	local_irq_save(flags);
1235 	rnp = rdp->mynode;
1236 	if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1237 	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1238 	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1239 		local_irq_restore(flags);
1240 		return;
1241 	}
1242 	needwake = __note_gp_changes(rnp, rdp);
1243 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1244 	rcu_strict_gp_check_qs();
1245 	if (needwake)
1246 		rcu_gp_kthread_wake();
1247 }
1248 
1249 static atomic_t *rcu_gp_slow_suppress;
1250 
1251 /* Register a counter to suppress debugging grace-period delays. */
1252 void rcu_gp_slow_register(atomic_t *rgssp)
1253 {
1254 	WARN_ON_ONCE(rcu_gp_slow_suppress);
1255 
1256 	WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1257 }
1258 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1259 
1260 /* Unregister a counter, with NULL for not caring which. */
1261 void rcu_gp_slow_unregister(atomic_t *rgssp)
1262 {
1263 	WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
1264 
1265 	WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1266 }
1267 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1268 
1269 static bool rcu_gp_slow_is_suppressed(void)
1270 {
1271 	atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1272 
1273 	return rgssp && atomic_read(rgssp);
1274 }
1275 
1276 static void rcu_gp_slow(int delay)
1277 {
1278 	if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1279 	    !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1280 		schedule_timeout_idle(delay);
1281 }
1282 
1283 static unsigned long sleep_duration;
1284 
1285 /* Allow rcutorture to stall the grace-period kthread. */
1286 void rcu_gp_set_torture_wait(int duration)
1287 {
1288 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1289 		WRITE_ONCE(sleep_duration, duration);
1290 }
1291 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1292 
1293 /* Actually implement the aforementioned wait. */
1294 static void rcu_gp_torture_wait(void)
1295 {
1296 	unsigned long duration;
1297 
1298 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1299 		return;
1300 	duration = xchg(&sleep_duration, 0UL);
1301 	if (duration > 0) {
1302 		pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1303 		schedule_timeout_idle(duration);
1304 		pr_alert("%s: Wait complete\n", __func__);
1305 	}
1306 }
1307 
1308 /*
1309  * Handler for on_each_cpu() to invoke the target CPU's RCU core
1310  * processing.
1311  */
1312 static void rcu_strict_gp_boundary(void *unused)
1313 {
1314 	invoke_rcu_core();
1315 }
1316 
1317 // Make the polled API aware of the beginning of a grace period.
1318 static void rcu_poll_gp_seq_start(unsigned long *snap)
1319 {
1320 	struct rcu_node *rnp = rcu_get_root();
1321 
1322 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1323 		raw_lockdep_assert_held_rcu_node(rnp);
1324 
1325 	// If RCU was idle, note beginning of GP.
1326 	if (!rcu_seq_state(rcu_state.gp_seq_polled))
1327 		rcu_seq_start(&rcu_state.gp_seq_polled);
1328 
1329 	// Either way, record current state.
1330 	*snap = rcu_state.gp_seq_polled;
1331 }
1332 
1333 // Make the polled API aware of the end of a grace period.
1334 static void rcu_poll_gp_seq_end(unsigned long *snap)
1335 {
1336 	struct rcu_node *rnp = rcu_get_root();
1337 
1338 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1339 		raw_lockdep_assert_held_rcu_node(rnp);
1340 
1341 	// If the previously noted GP is still in effect, record the
1342 	// end of that GP.  Either way, zero counter to avoid counter-wrap
1343 	// problems.
1344 	if (*snap && *snap == rcu_state.gp_seq_polled) {
1345 		rcu_seq_end(&rcu_state.gp_seq_polled);
1346 		rcu_state.gp_seq_polled_snap = 0;
1347 		rcu_state.gp_seq_polled_exp_snap = 0;
1348 	} else {
1349 		*snap = 0;
1350 	}
1351 }
1352 
1353 // Make the polled API aware of the beginning of a grace period, but
1354 // where caller does not hold the root rcu_node structure's lock.
1355 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1356 {
1357 	unsigned long flags;
1358 	struct rcu_node *rnp = rcu_get_root();
1359 
1360 	if (rcu_init_invoked()) {
1361 		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1362 			lockdep_assert_irqs_enabled();
1363 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1364 	}
1365 	rcu_poll_gp_seq_start(snap);
1366 	if (rcu_init_invoked())
1367 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1368 }
1369 
1370 // Make the polled API aware of the end of a grace period, but where
1371 // caller does not hold the root rcu_node structure's lock.
1372 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1373 {
1374 	unsigned long flags;
1375 	struct rcu_node *rnp = rcu_get_root();
1376 
1377 	if (rcu_init_invoked()) {
1378 		if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1379 			lockdep_assert_irqs_enabled();
1380 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1381 	}
1382 	rcu_poll_gp_seq_end(snap);
1383 	if (rcu_init_invoked())
1384 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1385 }
1386 
1387 /*
1388  * Initialize a new grace period.  Return false if no grace period required.
1389  */
1390 static noinline_for_stack bool rcu_gp_init(void)
1391 {
1392 	unsigned long flags;
1393 	unsigned long oldmask;
1394 	unsigned long mask;
1395 	struct rcu_data *rdp;
1396 	struct rcu_node *rnp = rcu_get_root();
1397 
1398 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1399 	raw_spin_lock_irq_rcu_node(rnp);
1400 	if (!READ_ONCE(rcu_state.gp_flags)) {
1401 		/* Spurious wakeup, tell caller to go back to sleep.  */
1402 		raw_spin_unlock_irq_rcu_node(rnp);
1403 		return false;
1404 	}
1405 	WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1406 
1407 	if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1408 		/*
1409 		 * Grace period already in progress, don't start another.
1410 		 * Not supposed to be able to happen.
1411 		 */
1412 		raw_spin_unlock_irq_rcu_node(rnp);
1413 		return false;
1414 	}
1415 
1416 	/* Advance to a new grace period and initialize state. */
1417 	record_gp_stall_check_time();
1418 	/* Record GP times before starting GP, hence rcu_seq_start(). */
1419 	rcu_seq_start(&rcu_state.gp_seq);
1420 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1421 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1422 	rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1423 	raw_spin_unlock_irq_rcu_node(rnp);
1424 
1425 	/*
1426 	 * Apply per-leaf buffered online and offline operations to
1427 	 * the rcu_node tree. Note that this new grace period need not
1428 	 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1429 	 * offlining path, when combined with checks in this function,
1430 	 * will handle CPUs that are currently going offline or that will
1431 	 * go offline later.  Please also refer to "Hotplug CPU" section
1432 	 * of RCU's Requirements documentation.
1433 	 */
1434 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1435 	/* Exclude CPU hotplug operations. */
1436 	rcu_for_each_leaf_node(rnp) {
1437 		local_irq_save(flags);
1438 		arch_spin_lock(&rcu_state.ofl_lock);
1439 		raw_spin_lock_rcu_node(rnp);
1440 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1441 		    !rnp->wait_blkd_tasks) {
1442 			/* Nothing to do on this leaf rcu_node structure. */
1443 			raw_spin_unlock_rcu_node(rnp);
1444 			arch_spin_unlock(&rcu_state.ofl_lock);
1445 			local_irq_restore(flags);
1446 			continue;
1447 		}
1448 
1449 		/* Record old state, apply changes to ->qsmaskinit field. */
1450 		oldmask = rnp->qsmaskinit;
1451 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1452 
1453 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1454 		if (!oldmask != !rnp->qsmaskinit) {
1455 			if (!oldmask) { /* First online CPU for rcu_node. */
1456 				if (!rnp->wait_blkd_tasks) /* Ever offline? */
1457 					rcu_init_new_rnp(rnp);
1458 			} else if (rcu_preempt_has_tasks(rnp)) {
1459 				rnp->wait_blkd_tasks = true; /* blocked tasks */
1460 			} else { /* Last offline CPU and can propagate. */
1461 				rcu_cleanup_dead_rnp(rnp);
1462 			}
1463 		}
1464 
1465 		/*
1466 		 * If all waited-on tasks from prior grace period are
1467 		 * done, and if all this rcu_node structure's CPUs are
1468 		 * still offline, propagate up the rcu_node tree and
1469 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1470 		 * rcu_node structure's CPUs has since come back online,
1471 		 * simply clear ->wait_blkd_tasks.
1472 		 */
1473 		if (rnp->wait_blkd_tasks &&
1474 		    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1475 			rnp->wait_blkd_tasks = false;
1476 			if (!rnp->qsmaskinit)
1477 				rcu_cleanup_dead_rnp(rnp);
1478 		}
1479 
1480 		raw_spin_unlock_rcu_node(rnp);
1481 		arch_spin_unlock(&rcu_state.ofl_lock);
1482 		local_irq_restore(flags);
1483 	}
1484 	rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1485 
1486 	/*
1487 	 * Set the quiescent-state-needed bits in all the rcu_node
1488 	 * structures for all currently online CPUs in breadth-first
1489 	 * order, starting from the root rcu_node structure, relying on the
1490 	 * layout of the tree within the rcu_state.node[] array.  Note that
1491 	 * other CPUs will access only the leaves of the hierarchy, thus
1492 	 * seeing that no grace period is in progress, at least until the
1493 	 * corresponding leaf node has been initialized.
1494 	 *
1495 	 * The grace period cannot complete until the initialization
1496 	 * process finishes, because this kthread handles both.
1497 	 */
1498 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1499 	rcu_for_each_node_breadth_first(rnp) {
1500 		rcu_gp_slow(gp_init_delay);
1501 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1502 		rdp = this_cpu_ptr(&rcu_data);
1503 		rcu_preempt_check_blocked_tasks(rnp);
1504 		rnp->qsmask = rnp->qsmaskinit;
1505 		WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1506 		if (rnp == rdp->mynode)
1507 			(void)__note_gp_changes(rnp, rdp);
1508 		rcu_preempt_boost_start_gp(rnp);
1509 		trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1510 					    rnp->level, rnp->grplo,
1511 					    rnp->grphi, rnp->qsmask);
1512 		/* Quiescent states for tasks on any now-offline CPUs. */
1513 		mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1514 		rnp->rcu_gp_init_mask = mask;
1515 		if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1516 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1517 		else
1518 			raw_spin_unlock_irq_rcu_node(rnp);
1519 		cond_resched_tasks_rcu_qs();
1520 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1521 	}
1522 
1523 	// If strict, make all CPUs aware of new grace period.
1524 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1525 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1526 
1527 	return true;
1528 }
1529 
1530 /*
1531  * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1532  * time.
1533  */
1534 static bool rcu_gp_fqs_check_wake(int *gfp)
1535 {
1536 	struct rcu_node *rnp = rcu_get_root();
1537 
1538 	// If under overload conditions, force an immediate FQS scan.
1539 	if (*gfp & RCU_GP_FLAG_OVLD)
1540 		return true;
1541 
1542 	// Someone like call_rcu() requested a force-quiescent-state scan.
1543 	*gfp = READ_ONCE(rcu_state.gp_flags);
1544 	if (*gfp & RCU_GP_FLAG_FQS)
1545 		return true;
1546 
1547 	// The current grace period has completed.
1548 	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1549 		return true;
1550 
1551 	return false;
1552 }
1553 
1554 /*
1555  * Do one round of quiescent-state forcing.
1556  */
1557 static void rcu_gp_fqs(bool first_time)
1558 {
1559 	struct rcu_node *rnp = rcu_get_root();
1560 
1561 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1562 	WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1563 	if (first_time) {
1564 		/* Collect dyntick-idle snapshots. */
1565 		force_qs_rnp(dyntick_save_progress_counter);
1566 	} else {
1567 		/* Handle dyntick-idle and offline CPUs. */
1568 		force_qs_rnp(rcu_implicit_dynticks_qs);
1569 	}
1570 	/* Clear flag to prevent immediate re-entry. */
1571 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1572 		raw_spin_lock_irq_rcu_node(rnp);
1573 		WRITE_ONCE(rcu_state.gp_flags,
1574 			   READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1575 		raw_spin_unlock_irq_rcu_node(rnp);
1576 	}
1577 }
1578 
1579 /*
1580  * Loop doing repeated quiescent-state forcing until the grace period ends.
1581  */
1582 static noinline_for_stack void rcu_gp_fqs_loop(void)
1583 {
1584 	bool first_gp_fqs = true;
1585 	int gf = 0;
1586 	unsigned long j;
1587 	int ret;
1588 	struct rcu_node *rnp = rcu_get_root();
1589 
1590 	j = READ_ONCE(jiffies_till_first_fqs);
1591 	if (rcu_state.cbovld)
1592 		gf = RCU_GP_FLAG_OVLD;
1593 	ret = 0;
1594 	for (;;) {
1595 		if (rcu_state.cbovld) {
1596 			j = (j + 2) / 3;
1597 			if (j <= 0)
1598 				j = 1;
1599 		}
1600 		if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1601 			WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1602 			/*
1603 			 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1604 			 * update; required for stall checks.
1605 			 */
1606 			smp_wmb();
1607 			WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1608 				   jiffies + (j ? 3 * j : 2));
1609 		}
1610 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1611 				       TPS("fqswait"));
1612 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1613 		(void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1614 				 rcu_gp_fqs_check_wake(&gf), j);
1615 		rcu_gp_torture_wait();
1616 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1617 		/* Locking provides needed memory barriers. */
1618 		/*
1619 		 * Exit the loop if the root rcu_node structure indicates that the grace period
1620 		 * has ended, leave the loop.  The rcu_preempt_blocked_readers_cgp(rnp) check
1621 		 * is required only for single-node rcu_node trees because readers blocking
1622 		 * the current grace period are queued only on leaf rcu_node structures.
1623 		 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1624 		 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1625 		 * the corresponding leaf nodes have passed through their quiescent state.
1626 		 */
1627 		if (!READ_ONCE(rnp->qsmask) &&
1628 		    !rcu_preempt_blocked_readers_cgp(rnp))
1629 			break;
1630 		/* If time for quiescent-state forcing, do it. */
1631 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1632 		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1633 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1634 					       TPS("fqsstart"));
1635 			rcu_gp_fqs(first_gp_fqs);
1636 			gf = 0;
1637 			if (first_gp_fqs) {
1638 				first_gp_fqs = false;
1639 				gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1640 			}
1641 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1642 					       TPS("fqsend"));
1643 			cond_resched_tasks_rcu_qs();
1644 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1645 			ret = 0; /* Force full wait till next FQS. */
1646 			j = READ_ONCE(jiffies_till_next_fqs);
1647 		} else {
1648 			/* Deal with stray signal. */
1649 			cond_resched_tasks_rcu_qs();
1650 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1651 			WARN_ON(signal_pending(current));
1652 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1653 					       TPS("fqswaitsig"));
1654 			ret = 1; /* Keep old FQS timing. */
1655 			j = jiffies;
1656 			if (time_after(jiffies, rcu_state.jiffies_force_qs))
1657 				j = 1;
1658 			else
1659 				j = rcu_state.jiffies_force_qs - j;
1660 			gf = 0;
1661 		}
1662 	}
1663 }
1664 
1665 /*
1666  * Clean up after the old grace period.
1667  */
1668 static noinline void rcu_gp_cleanup(void)
1669 {
1670 	int cpu;
1671 	bool needgp = false;
1672 	unsigned long gp_duration;
1673 	unsigned long new_gp_seq;
1674 	bool offloaded;
1675 	struct rcu_data *rdp;
1676 	struct rcu_node *rnp = rcu_get_root();
1677 	struct swait_queue_head *sq;
1678 
1679 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
1680 	raw_spin_lock_irq_rcu_node(rnp);
1681 	rcu_state.gp_end = jiffies;
1682 	gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1683 	if (gp_duration > rcu_state.gp_max)
1684 		rcu_state.gp_max = gp_duration;
1685 
1686 	/*
1687 	 * We know the grace period is complete, but to everyone else
1688 	 * it appears to still be ongoing.  But it is also the case
1689 	 * that to everyone else it looks like there is nothing that
1690 	 * they can do to advance the grace period.  It is therefore
1691 	 * safe for us to drop the lock in order to mark the grace
1692 	 * period as completed in all of the rcu_node structures.
1693 	 */
1694 	rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1695 	raw_spin_unlock_irq_rcu_node(rnp);
1696 
1697 	/*
1698 	 * Propagate new ->gp_seq value to rcu_node structures so that
1699 	 * other CPUs don't have to wait until the start of the next grace
1700 	 * period to process their callbacks.  This also avoids some nasty
1701 	 * RCU grace-period initialization races by forcing the end of
1702 	 * the current grace period to be completely recorded in all of
1703 	 * the rcu_node structures before the beginning of the next grace
1704 	 * period is recorded in any of the rcu_node structures.
1705 	 */
1706 	new_gp_seq = rcu_state.gp_seq;
1707 	rcu_seq_end(&new_gp_seq);
1708 	rcu_for_each_node_breadth_first(rnp) {
1709 		raw_spin_lock_irq_rcu_node(rnp);
1710 		if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1711 			dump_blkd_tasks(rnp, 10);
1712 		WARN_ON_ONCE(rnp->qsmask);
1713 		WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1714 		if (!rnp->parent)
1715 			smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1716 		rdp = this_cpu_ptr(&rcu_data);
1717 		if (rnp == rdp->mynode)
1718 			needgp = __note_gp_changes(rnp, rdp) || needgp;
1719 		/* smp_mb() provided by prior unlock-lock pair. */
1720 		needgp = rcu_future_gp_cleanup(rnp) || needgp;
1721 		// Reset overload indication for CPUs no longer overloaded
1722 		if (rcu_is_leaf_node(rnp))
1723 			for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1724 				rdp = per_cpu_ptr(&rcu_data, cpu);
1725 				check_cb_ovld_locked(rdp, rnp);
1726 			}
1727 		sq = rcu_nocb_gp_get(rnp);
1728 		raw_spin_unlock_irq_rcu_node(rnp);
1729 		rcu_nocb_gp_cleanup(sq);
1730 		cond_resched_tasks_rcu_qs();
1731 		WRITE_ONCE(rcu_state.gp_activity, jiffies);
1732 		rcu_gp_slow(gp_cleanup_delay);
1733 	}
1734 	rnp = rcu_get_root();
1735 	raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1736 
1737 	/* Declare grace period done, trace first to use old GP number. */
1738 	trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1739 	rcu_seq_end(&rcu_state.gp_seq);
1740 	ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1741 	WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1742 	/* Check for GP requests since above loop. */
1743 	rdp = this_cpu_ptr(&rcu_data);
1744 	if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1745 		trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1746 				  TPS("CleanupMore"));
1747 		needgp = true;
1748 	}
1749 	/* Advance CBs to reduce false positives below. */
1750 	offloaded = rcu_rdp_is_offloaded(rdp);
1751 	if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1752 
1753 		// We get here if a grace period was needed (“needgp”)
1754 		// and the above call to rcu_accelerate_cbs() did not set
1755 		// the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1756 		// the need for another grace period).  The purpose
1757 		// of the “offloaded” check is to avoid invoking
1758 		// rcu_accelerate_cbs() on an offloaded CPU because we do not
1759 		// hold the ->nocb_lock needed to safely access an offloaded
1760 		// ->cblist.  We do not want to acquire that lock because
1761 		// it can be heavily contended during callback floods.
1762 
1763 		WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1764 		WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1765 		trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1766 	} else {
1767 
1768 		// We get here either if there is no need for an
1769 		// additional grace period or if rcu_accelerate_cbs() has
1770 		// already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 
1771 		// So all we need to do is to clear all of the other
1772 		// ->gp_flags bits.
1773 
1774 		WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1775 	}
1776 	raw_spin_unlock_irq_rcu_node(rnp);
1777 
1778 	// If strict, make all CPUs aware of the end of the old grace period.
1779 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1780 		on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1781 }
1782 
1783 /*
1784  * Body of kthread that handles grace periods.
1785  */
1786 static int __noreturn rcu_gp_kthread(void *unused)
1787 {
1788 	rcu_bind_gp_kthread();
1789 	for (;;) {
1790 
1791 		/* Handle grace-period start. */
1792 		for (;;) {
1793 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1794 					       TPS("reqwait"));
1795 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1796 			swait_event_idle_exclusive(rcu_state.gp_wq,
1797 					 READ_ONCE(rcu_state.gp_flags) &
1798 					 RCU_GP_FLAG_INIT);
1799 			rcu_gp_torture_wait();
1800 			WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1801 			/* Locking provides needed memory barrier. */
1802 			if (rcu_gp_init())
1803 				break;
1804 			cond_resched_tasks_rcu_qs();
1805 			WRITE_ONCE(rcu_state.gp_activity, jiffies);
1806 			WARN_ON(signal_pending(current));
1807 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1808 					       TPS("reqwaitsig"));
1809 		}
1810 
1811 		/* Handle quiescent-state forcing. */
1812 		rcu_gp_fqs_loop();
1813 
1814 		/* Handle grace-period end. */
1815 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1816 		rcu_gp_cleanup();
1817 		WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1818 	}
1819 }
1820 
1821 /*
1822  * Report a full set of quiescent states to the rcu_state data structure.
1823  * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1824  * another grace period is required.  Whether we wake the grace-period
1825  * kthread or it awakens itself for the next round of quiescent-state
1826  * forcing, that kthread will clean up after the just-completed grace
1827  * period.  Note that the caller must hold rnp->lock, which is released
1828  * before return.
1829  */
1830 static void rcu_report_qs_rsp(unsigned long flags)
1831 	__releases(rcu_get_root()->lock)
1832 {
1833 	raw_lockdep_assert_held_rcu_node(rcu_get_root());
1834 	WARN_ON_ONCE(!rcu_gp_in_progress());
1835 	WRITE_ONCE(rcu_state.gp_flags,
1836 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1837 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1838 	rcu_gp_kthread_wake();
1839 }
1840 
1841 /*
1842  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1843  * Allows quiescent states for a group of CPUs to be reported at one go
1844  * to the specified rcu_node structure, though all the CPUs in the group
1845  * must be represented by the same rcu_node structure (which need not be a
1846  * leaf rcu_node structure, though it often will be).  The gps parameter
1847  * is the grace-period snapshot, which means that the quiescent states
1848  * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
1849  * must be held upon entry, and it is released before return.
1850  *
1851  * As a special case, if mask is zero, the bit-already-cleared check is
1852  * disabled.  This allows propagating quiescent state due to resumed tasks
1853  * during grace-period initialization.
1854  */
1855 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1856 			      unsigned long gps, unsigned long flags)
1857 	__releases(rnp->lock)
1858 {
1859 	unsigned long oldmask = 0;
1860 	struct rcu_node *rnp_c;
1861 
1862 	raw_lockdep_assert_held_rcu_node(rnp);
1863 
1864 	/* Walk up the rcu_node hierarchy. */
1865 	for (;;) {
1866 		if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1867 
1868 			/*
1869 			 * Our bit has already been cleared, or the
1870 			 * relevant grace period is already over, so done.
1871 			 */
1872 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1873 			return;
1874 		}
1875 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1876 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1877 			     rcu_preempt_blocked_readers_cgp(rnp));
1878 		WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1879 		trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1880 						 mask, rnp->qsmask, rnp->level,
1881 						 rnp->grplo, rnp->grphi,
1882 						 !!rnp->gp_tasks);
1883 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1884 
1885 			/* Other bits still set at this level, so done. */
1886 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1887 			return;
1888 		}
1889 		rnp->completedqs = rnp->gp_seq;
1890 		mask = rnp->grpmask;
1891 		if (rnp->parent == NULL) {
1892 
1893 			/* No more levels.  Exit loop holding root lock. */
1894 
1895 			break;
1896 		}
1897 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1898 		rnp_c = rnp;
1899 		rnp = rnp->parent;
1900 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
1901 		oldmask = READ_ONCE(rnp_c->qsmask);
1902 	}
1903 
1904 	/*
1905 	 * Get here if we are the last CPU to pass through a quiescent
1906 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1907 	 * to clean up and start the next grace period if one is needed.
1908 	 */
1909 	rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1910 }
1911 
1912 /*
1913  * Record a quiescent state for all tasks that were previously queued
1914  * on the specified rcu_node structure and that were blocking the current
1915  * RCU grace period.  The caller must hold the corresponding rnp->lock with
1916  * irqs disabled, and this lock is released upon return, but irqs remain
1917  * disabled.
1918  */
1919 static void __maybe_unused
1920 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1921 	__releases(rnp->lock)
1922 {
1923 	unsigned long gps;
1924 	unsigned long mask;
1925 	struct rcu_node *rnp_p;
1926 
1927 	raw_lockdep_assert_held_rcu_node(rnp);
1928 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1929 	    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1930 	    rnp->qsmask != 0) {
1931 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1932 		return;  /* Still need more quiescent states! */
1933 	}
1934 
1935 	rnp->completedqs = rnp->gp_seq;
1936 	rnp_p = rnp->parent;
1937 	if (rnp_p == NULL) {
1938 		/*
1939 		 * Only one rcu_node structure in the tree, so don't
1940 		 * try to report up to its nonexistent parent!
1941 		 */
1942 		rcu_report_qs_rsp(flags);
1943 		return;
1944 	}
1945 
1946 	/* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1947 	gps = rnp->gp_seq;
1948 	mask = rnp->grpmask;
1949 	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
1950 	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
1951 	rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1952 }
1953 
1954 /*
1955  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1956  * structure.  This must be called from the specified CPU.
1957  */
1958 static void
1959 rcu_report_qs_rdp(struct rcu_data *rdp)
1960 {
1961 	unsigned long flags;
1962 	unsigned long mask;
1963 	bool needacc = false;
1964 	struct rcu_node *rnp;
1965 
1966 	WARN_ON_ONCE(rdp->cpu != smp_processor_id());
1967 	rnp = rdp->mynode;
1968 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1969 	if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
1970 	    rdp->gpwrap) {
1971 
1972 		/*
1973 		 * The grace period in which this quiescent state was
1974 		 * recorded has ended, so don't report it upwards.
1975 		 * We will instead need a new quiescent state that lies
1976 		 * within the current grace period.
1977 		 */
1978 		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
1979 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1980 		return;
1981 	}
1982 	mask = rdp->grpmask;
1983 	rdp->core_needs_qs = false;
1984 	if ((rnp->qsmask & mask) == 0) {
1985 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1986 	} else {
1987 		/*
1988 		 * This GP can't end until cpu checks in, so all of our
1989 		 * callbacks can be processed during the next GP.
1990 		 *
1991 		 * NOCB kthreads have their own way to deal with that...
1992 		 */
1993 		if (!rcu_rdp_is_offloaded(rdp)) {
1994 			/*
1995 			 * The current GP has not yet ended, so it
1996 			 * should not be possible for rcu_accelerate_cbs()
1997 			 * to return true.  So complain, but don't awaken.
1998 			 */
1999 			WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2000 		} else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2001 			/*
2002 			 * ...but NOCB kthreads may miss or delay callbacks acceleration
2003 			 * if in the middle of a (de-)offloading process.
2004 			 */
2005 			needacc = true;
2006 		}
2007 
2008 		rcu_disable_urgency_upon_qs(rdp);
2009 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2010 		/* ^^^ Released rnp->lock */
2011 
2012 		if (needacc) {
2013 			rcu_nocb_lock_irqsave(rdp, flags);
2014 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2015 			rcu_nocb_unlock_irqrestore(rdp, flags);
2016 		}
2017 	}
2018 }
2019 
2020 /*
2021  * Check to see if there is a new grace period of which this CPU
2022  * is not yet aware, and if so, set up local rcu_data state for it.
2023  * Otherwise, see if this CPU has just passed through its first
2024  * quiescent state for this grace period, and record that fact if so.
2025  */
2026 static void
2027 rcu_check_quiescent_state(struct rcu_data *rdp)
2028 {
2029 	/* Check for grace-period ends and beginnings. */
2030 	note_gp_changes(rdp);
2031 
2032 	/*
2033 	 * Does this CPU still need to do its part for current grace period?
2034 	 * If no, return and let the other CPUs do their part as well.
2035 	 */
2036 	if (!rdp->core_needs_qs)
2037 		return;
2038 
2039 	/*
2040 	 * Was there a quiescent state since the beginning of the grace
2041 	 * period? If no, then exit and wait for the next call.
2042 	 */
2043 	if (rdp->cpu_no_qs.b.norm)
2044 		return;
2045 
2046 	/*
2047 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2048 	 * judge of that).
2049 	 */
2050 	rcu_report_qs_rdp(rdp);
2051 }
2052 
2053 /* Return true if callback-invocation time limit exceeded. */
2054 static bool rcu_do_batch_check_time(long count, long tlimit,
2055 				    bool jlimit_check, unsigned long jlimit)
2056 {
2057 	// Invoke local_clock() only once per 32 consecutive callbacks.
2058 	return unlikely(tlimit) &&
2059 	       (!likely(count & 31) ||
2060 		(IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2061 		 jlimit_check && time_after(jiffies, jlimit))) &&
2062 	       local_clock() >= tlimit;
2063 }
2064 
2065 /*
2066  * Invoke any RCU callbacks that have made it to the end of their grace
2067  * period.  Throttle as specified by rdp->blimit.
2068  */
2069 static void rcu_do_batch(struct rcu_data *rdp)
2070 {
2071 	long bl;
2072 	long count = 0;
2073 	int div;
2074 	bool __maybe_unused empty;
2075 	unsigned long flags;
2076 	unsigned long jlimit;
2077 	bool jlimit_check = false;
2078 	long pending;
2079 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2080 	struct rcu_head *rhp;
2081 	long tlimit = 0;
2082 
2083 	/* If no callbacks are ready, just return. */
2084 	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2085 		trace_rcu_batch_start(rcu_state.name,
2086 				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2087 		trace_rcu_batch_end(rcu_state.name, 0,
2088 				    !rcu_segcblist_empty(&rdp->cblist),
2089 				    need_resched(), is_idle_task(current),
2090 				    rcu_is_callbacks_kthread(rdp));
2091 		return;
2092 	}
2093 
2094 	/*
2095 	 * Extract the list of ready callbacks, disabling IRQs to prevent
2096 	 * races with call_rcu() from interrupt handlers.  Leave the
2097 	 * callback counts, as rcu_barrier() needs to be conservative.
2098 	 */
2099 	rcu_nocb_lock_irqsave(rdp, flags);
2100 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2101 	pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2102 	div = READ_ONCE(rcu_divisor);
2103 	div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2104 	bl = max(rdp->blimit, pending >> div);
2105 	if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2106 	    (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2107 		const long npj = NSEC_PER_SEC / HZ;
2108 		long rrn = READ_ONCE(rcu_resched_ns);
2109 
2110 		rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2111 		tlimit = local_clock() + rrn;
2112 		jlimit = jiffies + (rrn + npj + 1) / npj;
2113 		jlimit_check = true;
2114 	}
2115 	trace_rcu_batch_start(rcu_state.name,
2116 			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
2117 	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2118 	if (rcu_rdp_is_offloaded(rdp))
2119 		rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2120 
2121 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2122 	rcu_nocb_unlock_irqrestore(rdp, flags);
2123 
2124 	/* Invoke callbacks. */
2125 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2126 	rhp = rcu_cblist_dequeue(&rcl);
2127 
2128 	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2129 		rcu_callback_t f;
2130 
2131 		count++;
2132 		debug_rcu_head_unqueue(rhp);
2133 
2134 		rcu_lock_acquire(&rcu_callback_map);
2135 		trace_rcu_invoke_callback(rcu_state.name, rhp);
2136 
2137 		f = rhp->func;
2138 		WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2139 		f(rhp);
2140 
2141 		rcu_lock_release(&rcu_callback_map);
2142 
2143 		/*
2144 		 * Stop only if limit reached and CPU has something to do.
2145 		 */
2146 		if (in_serving_softirq()) {
2147 			if (count >= bl && (need_resched() || !is_idle_task(current)))
2148 				break;
2149 			/*
2150 			 * Make sure we don't spend too much time here and deprive other
2151 			 * softirq vectors of CPU cycles.
2152 			 */
2153 			if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2154 				break;
2155 		} else {
2156 			// In rcuc/rcuoc context, so no worries about
2157 			// depriving other softirq vectors of CPU cycles.
2158 			local_bh_enable();
2159 			lockdep_assert_irqs_enabled();
2160 			cond_resched_tasks_rcu_qs();
2161 			lockdep_assert_irqs_enabled();
2162 			local_bh_disable();
2163 			// But rcuc kthreads can delay quiescent-state
2164 			// reporting, so check time limits for them.
2165 			if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2166 			    rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2167 				rdp->rcu_cpu_has_work = 1;
2168 				break;
2169 			}
2170 		}
2171 	}
2172 
2173 	rcu_nocb_lock_irqsave(rdp, flags);
2174 	rdp->n_cbs_invoked += count;
2175 	trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2176 			    is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2177 
2178 	/* Update counts and requeue any remaining callbacks. */
2179 	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2180 	rcu_segcblist_add_len(&rdp->cblist, -count);
2181 
2182 	/* Reinstate batch limit if we have worked down the excess. */
2183 	count = rcu_segcblist_n_cbs(&rdp->cblist);
2184 	if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2185 		rdp->blimit = blimit;
2186 
2187 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2188 	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2189 		rdp->qlen_last_fqs_check = 0;
2190 		rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2191 	} else if (count < rdp->qlen_last_fqs_check - qhimark)
2192 		rdp->qlen_last_fqs_check = count;
2193 
2194 	/*
2195 	 * The following usually indicates a double call_rcu().  To track
2196 	 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2197 	 */
2198 	empty = rcu_segcblist_empty(&rdp->cblist);
2199 	WARN_ON_ONCE(count == 0 && !empty);
2200 	WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2201 		     count != 0 && empty);
2202 	WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2203 	WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2204 
2205 	rcu_nocb_unlock_irqrestore(rdp, flags);
2206 
2207 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2208 }
2209 
2210 /*
2211  * This function is invoked from each scheduling-clock interrupt,
2212  * and checks to see if this CPU is in a non-context-switch quiescent
2213  * state, for example, user mode or idle loop.  It also schedules RCU
2214  * core processing.  If the current grace period has gone on too long,
2215  * it will ask the scheduler to manufacture a context switch for the sole
2216  * purpose of providing the needed quiescent state.
2217  */
2218 void rcu_sched_clock_irq(int user)
2219 {
2220 	unsigned long j;
2221 
2222 	if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2223 		j = jiffies;
2224 		WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2225 		__this_cpu_write(rcu_data.last_sched_clock, j);
2226 	}
2227 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2228 	lockdep_assert_irqs_disabled();
2229 	raw_cpu_inc(rcu_data.ticks_this_gp);
2230 	/* The load-acquire pairs with the store-release setting to true. */
2231 	if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2232 		/* Idle and userspace execution already are quiescent states. */
2233 		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2234 			set_tsk_need_resched(current);
2235 			set_preempt_need_resched();
2236 		}
2237 		__this_cpu_write(rcu_data.rcu_urgent_qs, false);
2238 	}
2239 	rcu_flavor_sched_clock_irq(user);
2240 	if (rcu_pending(user))
2241 		invoke_rcu_core();
2242 	if (user || rcu_is_cpu_rrupt_from_idle())
2243 		rcu_note_voluntary_context_switch(current);
2244 	lockdep_assert_irqs_disabled();
2245 
2246 	trace_rcu_utilization(TPS("End scheduler-tick"));
2247 }
2248 
2249 /*
2250  * Scan the leaf rcu_node structures.  For each structure on which all
2251  * CPUs have reported a quiescent state and on which there are tasks
2252  * blocking the current grace period, initiate RCU priority boosting.
2253  * Otherwise, invoke the specified function to check dyntick state for
2254  * each CPU that has not yet reported a quiescent state.
2255  */
2256 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2257 {
2258 	int cpu;
2259 	unsigned long flags;
2260 	unsigned long mask;
2261 	struct rcu_data *rdp;
2262 	struct rcu_node *rnp;
2263 
2264 	rcu_state.cbovld = rcu_state.cbovldnext;
2265 	rcu_state.cbovldnext = false;
2266 	rcu_for_each_leaf_node(rnp) {
2267 		cond_resched_tasks_rcu_qs();
2268 		mask = 0;
2269 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
2270 		rcu_state.cbovldnext |= !!rnp->cbovldmask;
2271 		if (rnp->qsmask == 0) {
2272 			if (rcu_preempt_blocked_readers_cgp(rnp)) {
2273 				/*
2274 				 * No point in scanning bits because they
2275 				 * are all zero.  But we might need to
2276 				 * priority-boost blocked readers.
2277 				 */
2278 				rcu_initiate_boost(rnp, flags);
2279 				/* rcu_initiate_boost() releases rnp->lock */
2280 				continue;
2281 			}
2282 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2283 			continue;
2284 		}
2285 		for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2286 			rdp = per_cpu_ptr(&rcu_data, cpu);
2287 			if (f(rdp)) {
2288 				mask |= rdp->grpmask;
2289 				rcu_disable_urgency_upon_qs(rdp);
2290 			}
2291 		}
2292 		if (mask != 0) {
2293 			/* Idle/offline CPUs, report (releases rnp->lock). */
2294 			rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2295 		} else {
2296 			/* Nothing to do here, so just drop the lock. */
2297 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2298 		}
2299 	}
2300 }
2301 
2302 /*
2303  * Force quiescent states on reluctant CPUs, and also detect which
2304  * CPUs are in dyntick-idle mode.
2305  */
2306 void rcu_force_quiescent_state(void)
2307 {
2308 	unsigned long flags;
2309 	bool ret;
2310 	struct rcu_node *rnp;
2311 	struct rcu_node *rnp_old = NULL;
2312 
2313 	/* Funnel through hierarchy to reduce memory contention. */
2314 	rnp = raw_cpu_read(rcu_data.mynode);
2315 	for (; rnp != NULL; rnp = rnp->parent) {
2316 		ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2317 		       !raw_spin_trylock(&rnp->fqslock);
2318 		if (rnp_old != NULL)
2319 			raw_spin_unlock(&rnp_old->fqslock);
2320 		if (ret)
2321 			return;
2322 		rnp_old = rnp;
2323 	}
2324 	/* rnp_old == rcu_get_root(), rnp == NULL. */
2325 
2326 	/* Reached the root of the rcu_node tree, acquire lock. */
2327 	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2328 	raw_spin_unlock(&rnp_old->fqslock);
2329 	if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2330 		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2331 		return;  /* Someone beat us to it. */
2332 	}
2333 	WRITE_ONCE(rcu_state.gp_flags,
2334 		   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2335 	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2336 	rcu_gp_kthread_wake();
2337 }
2338 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2339 
2340 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2341 // grace periods.
2342 static void strict_work_handler(struct work_struct *work)
2343 {
2344 	rcu_read_lock();
2345 	rcu_read_unlock();
2346 }
2347 
2348 /* Perform RCU core processing work for the current CPU.  */
2349 static __latent_entropy void rcu_core(void)
2350 {
2351 	unsigned long flags;
2352 	struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2353 	struct rcu_node *rnp = rdp->mynode;
2354 	/*
2355 	 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2356 	 * Therefore this function can race with concurrent NOCB (de-)offloading
2357 	 * on this CPU and the below condition must be considered volatile.
2358 	 * However if we race with:
2359 	 *
2360 	 * _ Offloading:   In the worst case we accelerate or process callbacks
2361 	 *                 concurrently with NOCB kthreads. We are guaranteed to
2362 	 *                 call rcu_nocb_lock() if that happens.
2363 	 *
2364 	 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2365 	 *                 processing. This is fine because the early stage
2366 	 *                 of deoffloading invokes rcu_core() after setting
2367 	 *                 SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2368 	 *                 what could have been dismissed without the need to wait
2369 	 *                 for the next rcu_pending() check in the next jiffy.
2370 	 */
2371 	const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2372 
2373 	if (cpu_is_offline(smp_processor_id()))
2374 		return;
2375 	trace_rcu_utilization(TPS("Start RCU core"));
2376 	WARN_ON_ONCE(!rdp->beenonline);
2377 
2378 	/* Report any deferred quiescent states if preemption enabled. */
2379 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2380 		rcu_preempt_deferred_qs(current);
2381 	} else if (rcu_preempt_need_deferred_qs(current)) {
2382 		set_tsk_need_resched(current);
2383 		set_preempt_need_resched();
2384 	}
2385 
2386 	/* Update RCU state based on any recent quiescent states. */
2387 	rcu_check_quiescent_state(rdp);
2388 
2389 	/* No grace period and unregistered callbacks? */
2390 	if (!rcu_gp_in_progress() &&
2391 	    rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2392 		rcu_nocb_lock_irqsave(rdp, flags);
2393 		if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2394 			rcu_accelerate_cbs_unlocked(rnp, rdp);
2395 		rcu_nocb_unlock_irqrestore(rdp, flags);
2396 	}
2397 
2398 	rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2399 
2400 	/* If there are callbacks ready, invoke them. */
2401 	if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2402 	    likely(READ_ONCE(rcu_scheduler_fully_active))) {
2403 		rcu_do_batch(rdp);
2404 		/* Re-invoke RCU core processing if there are callbacks remaining. */
2405 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
2406 			invoke_rcu_core();
2407 	}
2408 
2409 	/* Do any needed deferred wakeups of rcuo kthreads. */
2410 	do_nocb_deferred_wakeup(rdp);
2411 	trace_rcu_utilization(TPS("End RCU core"));
2412 
2413 	// If strict GPs, schedule an RCU reader in a clean environment.
2414 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2415 		queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2416 }
2417 
2418 static void rcu_core_si(struct softirq_action *h)
2419 {
2420 	rcu_core();
2421 }
2422 
2423 static void rcu_wake_cond(struct task_struct *t, int status)
2424 {
2425 	/*
2426 	 * If the thread is yielding, only wake it when this
2427 	 * is invoked from idle
2428 	 */
2429 	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2430 		wake_up_process(t);
2431 }
2432 
2433 static void invoke_rcu_core_kthread(void)
2434 {
2435 	struct task_struct *t;
2436 	unsigned long flags;
2437 
2438 	local_irq_save(flags);
2439 	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2440 	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2441 	if (t != NULL && t != current)
2442 		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2443 	local_irq_restore(flags);
2444 }
2445 
2446 /*
2447  * Wake up this CPU's rcuc kthread to do RCU core processing.
2448  */
2449 static void invoke_rcu_core(void)
2450 {
2451 	if (!cpu_online(smp_processor_id()))
2452 		return;
2453 	if (use_softirq)
2454 		raise_softirq(RCU_SOFTIRQ);
2455 	else
2456 		invoke_rcu_core_kthread();
2457 }
2458 
2459 static void rcu_cpu_kthread_park(unsigned int cpu)
2460 {
2461 	per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2462 }
2463 
2464 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2465 {
2466 	return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2467 }
2468 
2469 /*
2470  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2471  * the RCU softirq used in configurations of RCU that do not support RCU
2472  * priority boosting.
2473  */
2474 static void rcu_cpu_kthread(unsigned int cpu)
2475 {
2476 	unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2477 	char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2478 	unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2479 	int spincnt;
2480 
2481 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2482 	for (spincnt = 0; spincnt < 10; spincnt++) {
2483 		WRITE_ONCE(*j, jiffies);
2484 		local_bh_disable();
2485 		*statusp = RCU_KTHREAD_RUNNING;
2486 		local_irq_disable();
2487 		work = *workp;
2488 		WRITE_ONCE(*workp, 0);
2489 		local_irq_enable();
2490 		if (work)
2491 			rcu_core();
2492 		local_bh_enable();
2493 		if (!READ_ONCE(*workp)) {
2494 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2495 			*statusp = RCU_KTHREAD_WAITING;
2496 			return;
2497 		}
2498 	}
2499 	*statusp = RCU_KTHREAD_YIELDING;
2500 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2501 	schedule_timeout_idle(2);
2502 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2503 	*statusp = RCU_KTHREAD_WAITING;
2504 	WRITE_ONCE(*j, jiffies);
2505 }
2506 
2507 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2508 	.store			= &rcu_data.rcu_cpu_kthread_task,
2509 	.thread_should_run	= rcu_cpu_kthread_should_run,
2510 	.thread_fn		= rcu_cpu_kthread,
2511 	.thread_comm		= "rcuc/%u",
2512 	.setup			= rcu_cpu_kthread_setup,
2513 	.park			= rcu_cpu_kthread_park,
2514 };
2515 
2516 /*
2517  * Spawn per-CPU RCU core processing kthreads.
2518  */
2519 static int __init rcu_spawn_core_kthreads(void)
2520 {
2521 	int cpu;
2522 
2523 	for_each_possible_cpu(cpu)
2524 		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2525 	if (use_softirq)
2526 		return 0;
2527 	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2528 		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2529 	return 0;
2530 }
2531 
2532 /*
2533  * Handle any core-RCU processing required by a call_rcu() invocation.
2534  */
2535 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2536 			    unsigned long flags)
2537 {
2538 	/*
2539 	 * If called from an extended quiescent state, invoke the RCU
2540 	 * core in order to force a re-evaluation of RCU's idleness.
2541 	 */
2542 	if (!rcu_is_watching())
2543 		invoke_rcu_core();
2544 
2545 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2546 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2547 		return;
2548 
2549 	/*
2550 	 * Force the grace period if too many callbacks or too long waiting.
2551 	 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2552 	 * if some other CPU has recently done so.  Also, don't bother
2553 	 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2554 	 * is the only one waiting for a grace period to complete.
2555 	 */
2556 	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2557 		     rdp->qlen_last_fqs_check + qhimark)) {
2558 
2559 		/* Are we ignoring a completed grace period? */
2560 		note_gp_changes(rdp);
2561 
2562 		/* Start a new grace period if one not already started. */
2563 		if (!rcu_gp_in_progress()) {
2564 			rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2565 		} else {
2566 			/* Give the grace period a kick. */
2567 			rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2568 			if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2569 			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2570 				rcu_force_quiescent_state();
2571 			rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2572 			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2573 		}
2574 	}
2575 }
2576 
2577 /*
2578  * RCU callback function to leak a callback.
2579  */
2580 static void rcu_leak_callback(struct rcu_head *rhp)
2581 {
2582 }
2583 
2584 /*
2585  * Check and if necessary update the leaf rcu_node structure's
2586  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2587  * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2588  * structure's ->lock.
2589  */
2590 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2591 {
2592 	raw_lockdep_assert_held_rcu_node(rnp);
2593 	if (qovld_calc <= 0)
2594 		return; // Early boot and wildcard value set.
2595 	if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2596 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2597 	else
2598 		WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2599 }
2600 
2601 /*
2602  * Check and if necessary update the leaf rcu_node structure's
2603  * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2604  * number of queued RCU callbacks.  No locks need be held, but the
2605  * caller must have disabled interrupts.
2606  *
2607  * Note that this function ignores the possibility that there are a lot
2608  * of callbacks all of which have already seen the end of their respective
2609  * grace periods.  This omission is due to the need for no-CBs CPUs to
2610  * be holding ->nocb_lock to do this check, which is too heavy for a
2611  * common-case operation.
2612  */
2613 static void check_cb_ovld(struct rcu_data *rdp)
2614 {
2615 	struct rcu_node *const rnp = rdp->mynode;
2616 
2617 	if (qovld_calc <= 0 ||
2618 	    ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2619 	     !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2620 		return; // Early boot wildcard value or already set correctly.
2621 	raw_spin_lock_rcu_node(rnp);
2622 	check_cb_ovld_locked(rdp, rnp);
2623 	raw_spin_unlock_rcu_node(rnp);
2624 }
2625 
2626 static void
2627 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
2628 {
2629 	static atomic_t doublefrees;
2630 	unsigned long flags;
2631 	bool lazy;
2632 	struct rcu_data *rdp;
2633 	bool was_alldone;
2634 
2635 	/* Misaligned rcu_head! */
2636 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2637 
2638 	if (debug_rcu_head_queue(head)) {
2639 		/*
2640 		 * Probable double call_rcu(), so leak the callback.
2641 		 * Use rcu:rcu_callback trace event to find the previous
2642 		 * time callback was passed to call_rcu().
2643 		 */
2644 		if (atomic_inc_return(&doublefrees) < 4) {
2645 			pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
2646 			mem_dump_obj(head);
2647 		}
2648 		WRITE_ONCE(head->func, rcu_leak_callback);
2649 		return;
2650 	}
2651 	head->func = func;
2652 	head->next = NULL;
2653 	kasan_record_aux_stack_noalloc(head);
2654 	local_irq_save(flags);
2655 	rdp = this_cpu_ptr(&rcu_data);
2656 	lazy = lazy_in && !rcu_async_should_hurry();
2657 
2658 	/* Add the callback to our list. */
2659 	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2660 		// This can trigger due to call_rcu() from offline CPU:
2661 		WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2662 		WARN_ON_ONCE(!rcu_is_watching());
2663 		// Very early boot, before rcu_init().  Initialize if needed
2664 		// and then drop through to queue the callback.
2665 		if (rcu_segcblist_empty(&rdp->cblist))
2666 			rcu_segcblist_init(&rdp->cblist);
2667 	}
2668 
2669 	check_cb_ovld(rdp);
2670 	if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2671 		return; // Enqueued onto ->nocb_bypass, so just leave.
2672 	// If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2673 	rcu_segcblist_enqueue(&rdp->cblist, head);
2674 	if (__is_kvfree_rcu_offset((unsigned long)func))
2675 		trace_rcu_kvfree_callback(rcu_state.name, head,
2676 					 (unsigned long)func,
2677 					 rcu_segcblist_n_cbs(&rdp->cblist));
2678 	else
2679 		trace_rcu_callback(rcu_state.name, head,
2680 				   rcu_segcblist_n_cbs(&rdp->cblist));
2681 
2682 	trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2683 
2684 	/* Go handle any RCU core processing required. */
2685 	if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2686 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2687 	} else {
2688 		__call_rcu_core(rdp, head, flags);
2689 		local_irq_restore(flags);
2690 	}
2691 }
2692 
2693 #ifdef CONFIG_RCU_LAZY
2694 /**
2695  * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2696  * flush all lazy callbacks (including the new one) to the main ->cblist while
2697  * doing so.
2698  *
2699  * @head: structure to be used for queueing the RCU updates.
2700  * @func: actual callback function to be invoked after the grace period
2701  *
2702  * The callback function will be invoked some time after a full grace
2703  * period elapses, in other words after all pre-existing RCU read-side
2704  * critical sections have completed.
2705  *
2706  * Use this API instead of call_rcu() if you don't want the callback to be
2707  * invoked after very long periods of time, which can happen on systems without
2708  * memory pressure and on systems which are lightly loaded or mostly idle.
2709  * This function will cause callbacks to be invoked sooner than later at the
2710  * expense of extra power. Other than that, this function is identical to, and
2711  * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2712  * ordering and other functionality.
2713  */
2714 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2715 {
2716 	return __call_rcu_common(head, func, false);
2717 }
2718 EXPORT_SYMBOL_GPL(call_rcu_hurry);
2719 #endif
2720 
2721 /**
2722  * call_rcu() - Queue an RCU callback for invocation after a grace period.
2723  * By default the callbacks are 'lazy' and are kept hidden from the main
2724  * ->cblist to prevent starting of grace periods too soon.
2725  * If you desire grace periods to start very soon, use call_rcu_hurry().
2726  *
2727  * @head: structure to be used for queueing the RCU updates.
2728  * @func: actual callback function to be invoked after the grace period
2729  *
2730  * The callback function will be invoked some time after a full grace
2731  * period elapses, in other words after all pre-existing RCU read-side
2732  * critical sections have completed.  However, the callback function
2733  * might well execute concurrently with RCU read-side critical sections
2734  * that started after call_rcu() was invoked.
2735  *
2736  * RCU read-side critical sections are delimited by rcu_read_lock()
2737  * and rcu_read_unlock(), and may be nested.  In addition, but only in
2738  * v5.0 and later, regions of code across which interrupts, preemption,
2739  * or softirqs have been disabled also serve as RCU read-side critical
2740  * sections.  This includes hardware interrupt handlers, softirq handlers,
2741  * and NMI handlers.
2742  *
2743  * Note that all CPUs must agree that the grace period extended beyond
2744  * all pre-existing RCU read-side critical section.  On systems with more
2745  * than one CPU, this means that when "func()" is invoked, each CPU is
2746  * guaranteed to have executed a full memory barrier since the end of its
2747  * last RCU read-side critical section whose beginning preceded the call
2748  * to call_rcu().  It also means that each CPU executing an RCU read-side
2749  * critical section that continues beyond the start of "func()" must have
2750  * executed a memory barrier after the call_rcu() but before the beginning
2751  * of that RCU read-side critical section.  Note that these guarantees
2752  * include CPUs that are offline, idle, or executing in user mode, as
2753  * well as CPUs that are executing in the kernel.
2754  *
2755  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2756  * resulting RCU callback function "func()", then both CPU A and CPU B are
2757  * guaranteed to execute a full memory barrier during the time interval
2758  * between the call to call_rcu() and the invocation of "func()" -- even
2759  * if CPU A and CPU B are the same CPU (but again only if the system has
2760  * more than one CPU).
2761  *
2762  * Implementation of these memory-ordering guarantees is described here:
2763  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2764  */
2765 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2766 {
2767 	return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2768 }
2769 EXPORT_SYMBOL_GPL(call_rcu);
2770 
2771 /* Maximum number of jiffies to wait before draining a batch. */
2772 #define KFREE_DRAIN_JIFFIES (5 * HZ)
2773 #define KFREE_N_BATCHES 2
2774 #define FREE_N_CHANNELS 2
2775 
2776 /**
2777  * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2778  * @list: List node. All blocks are linked between each other
2779  * @gp_snap: Snapshot of RCU state for objects placed to this bulk
2780  * @nr_records: Number of active pointers in the array
2781  * @records: Array of the kvfree_rcu() pointers
2782  */
2783 struct kvfree_rcu_bulk_data {
2784 	struct list_head list;
2785 	struct rcu_gp_oldstate gp_snap;
2786 	unsigned long nr_records;
2787 	void *records[];
2788 };
2789 
2790 /*
2791  * This macro defines how many entries the "records" array
2792  * will contain. It is based on the fact that the size of
2793  * kvfree_rcu_bulk_data structure becomes exactly one page.
2794  */
2795 #define KVFREE_BULK_MAX_ENTR \
2796 	((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2797 
2798 /**
2799  * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2800  * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2801  * @head_free: List of kfree_rcu() objects waiting for a grace period
2802  * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees.
2803  * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2804  * @krcp: Pointer to @kfree_rcu_cpu structure
2805  */
2806 
2807 struct kfree_rcu_cpu_work {
2808 	struct rcu_work rcu_work;
2809 	struct rcu_head *head_free;
2810 	struct rcu_gp_oldstate head_free_gp_snap;
2811 	struct list_head bulk_head_free[FREE_N_CHANNELS];
2812 	struct kfree_rcu_cpu *krcp;
2813 };
2814 
2815 /**
2816  * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2817  * @head: List of kfree_rcu() objects not yet waiting for a grace period
2818  * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
2819  * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2820  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2821  * @lock: Synchronize access to this structure
2822  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2823  * @initialized: The @rcu_work fields have been initialized
2824  * @head_count: Number of objects in rcu_head singular list
2825  * @bulk_count: Number of objects in bulk-list
2826  * @bkvcache:
2827  *	A simple cache list that contains objects for reuse purpose.
2828  *	In order to save some per-cpu space the list is singular.
2829  *	Even though it is lockless an access has to be protected by the
2830  *	per-cpu lock.
2831  * @page_cache_work: A work to refill the cache when it is empty
2832  * @backoff_page_cache_fill: Delay cache refills
2833  * @work_in_progress: Indicates that page_cache_work is running
2834  * @hrtimer: A hrtimer for scheduling a page_cache_work
2835  * @nr_bkv_objs: number of allocated objects at @bkvcache.
2836  *
2837  * This is a per-CPU structure.  The reason that it is not included in
2838  * the rcu_data structure is to permit this code to be extracted from
2839  * the RCU files.  Such extraction could allow further optimization of
2840  * the interactions with the slab allocators.
2841  */
2842 struct kfree_rcu_cpu {
2843 	// Objects queued on a linked list
2844 	// through their rcu_head structures.
2845 	struct rcu_head *head;
2846 	unsigned long head_gp_snap;
2847 	atomic_t head_count;
2848 
2849 	// Objects queued on a bulk-list.
2850 	struct list_head bulk_head[FREE_N_CHANNELS];
2851 	atomic_t bulk_count[FREE_N_CHANNELS];
2852 
2853 	struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2854 	raw_spinlock_t lock;
2855 	struct delayed_work monitor_work;
2856 	bool initialized;
2857 
2858 	struct delayed_work page_cache_work;
2859 	atomic_t backoff_page_cache_fill;
2860 	atomic_t work_in_progress;
2861 	struct hrtimer hrtimer;
2862 
2863 	struct llist_head bkvcache;
2864 	int nr_bkv_objs;
2865 };
2866 
2867 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2868 	.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2869 };
2870 
2871 static __always_inline void
2872 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2873 {
2874 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2875 	int i;
2876 
2877 	for (i = 0; i < bhead->nr_records; i++)
2878 		debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2879 #endif
2880 }
2881 
2882 static inline struct kfree_rcu_cpu *
2883 krc_this_cpu_lock(unsigned long *flags)
2884 {
2885 	struct kfree_rcu_cpu *krcp;
2886 
2887 	local_irq_save(*flags);	// For safely calling this_cpu_ptr().
2888 	krcp = this_cpu_ptr(&krc);
2889 	raw_spin_lock(&krcp->lock);
2890 
2891 	return krcp;
2892 }
2893 
2894 static inline void
2895 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2896 {
2897 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2898 }
2899 
2900 static inline struct kvfree_rcu_bulk_data *
2901 get_cached_bnode(struct kfree_rcu_cpu *krcp)
2902 {
2903 	if (!krcp->nr_bkv_objs)
2904 		return NULL;
2905 
2906 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2907 	return (struct kvfree_rcu_bulk_data *)
2908 		llist_del_first(&krcp->bkvcache);
2909 }
2910 
2911 static inline bool
2912 put_cached_bnode(struct kfree_rcu_cpu *krcp,
2913 	struct kvfree_rcu_bulk_data *bnode)
2914 {
2915 	// Check the limit.
2916 	if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
2917 		return false;
2918 
2919 	llist_add((struct llist_node *) bnode, &krcp->bkvcache);
2920 	WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
2921 	return true;
2922 }
2923 
2924 static int
2925 drain_page_cache(struct kfree_rcu_cpu *krcp)
2926 {
2927 	unsigned long flags;
2928 	struct llist_node *page_list, *pos, *n;
2929 	int freed = 0;
2930 
2931 	if (!rcu_min_cached_objs)
2932 		return 0;
2933 
2934 	raw_spin_lock_irqsave(&krcp->lock, flags);
2935 	page_list = llist_del_all(&krcp->bkvcache);
2936 	WRITE_ONCE(krcp->nr_bkv_objs, 0);
2937 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2938 
2939 	llist_for_each_safe(pos, n, page_list) {
2940 		free_page((unsigned long)pos);
2941 		freed++;
2942 	}
2943 
2944 	return freed;
2945 }
2946 
2947 static void
2948 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
2949 	struct kvfree_rcu_bulk_data *bnode, int idx)
2950 {
2951 	unsigned long flags;
2952 	int i;
2953 
2954 	if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
2955 		debug_rcu_bhead_unqueue(bnode);
2956 		rcu_lock_acquire(&rcu_callback_map);
2957 		if (idx == 0) { // kmalloc() / kfree().
2958 			trace_rcu_invoke_kfree_bulk_callback(
2959 				rcu_state.name, bnode->nr_records,
2960 				bnode->records);
2961 
2962 			kfree_bulk(bnode->nr_records, bnode->records);
2963 		} else { // vmalloc() / vfree().
2964 			for (i = 0; i < bnode->nr_records; i++) {
2965 				trace_rcu_invoke_kvfree_callback(
2966 					rcu_state.name, bnode->records[i], 0);
2967 
2968 				vfree(bnode->records[i]);
2969 			}
2970 		}
2971 		rcu_lock_release(&rcu_callback_map);
2972 	}
2973 
2974 	raw_spin_lock_irqsave(&krcp->lock, flags);
2975 	if (put_cached_bnode(krcp, bnode))
2976 		bnode = NULL;
2977 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
2978 
2979 	if (bnode)
2980 		free_page((unsigned long) bnode);
2981 
2982 	cond_resched_tasks_rcu_qs();
2983 }
2984 
2985 static void
2986 kvfree_rcu_list(struct rcu_head *head)
2987 {
2988 	struct rcu_head *next;
2989 
2990 	for (; head; head = next) {
2991 		void *ptr = (void *) head->func;
2992 		unsigned long offset = (void *) head - ptr;
2993 
2994 		next = head->next;
2995 		debug_rcu_head_unqueue((struct rcu_head *)ptr);
2996 		rcu_lock_acquire(&rcu_callback_map);
2997 		trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
2998 
2999 		if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3000 			kvfree(ptr);
3001 
3002 		rcu_lock_release(&rcu_callback_map);
3003 		cond_resched_tasks_rcu_qs();
3004 	}
3005 }
3006 
3007 /*
3008  * This function is invoked in workqueue context after a grace period.
3009  * It frees all the objects queued on ->bulk_head_free or ->head_free.
3010  */
3011 static void kfree_rcu_work(struct work_struct *work)
3012 {
3013 	unsigned long flags;
3014 	struct kvfree_rcu_bulk_data *bnode, *n;
3015 	struct list_head bulk_head[FREE_N_CHANNELS];
3016 	struct rcu_head *head;
3017 	struct kfree_rcu_cpu *krcp;
3018 	struct kfree_rcu_cpu_work *krwp;
3019 	struct rcu_gp_oldstate head_gp_snap;
3020 	int i;
3021 
3022 	krwp = container_of(to_rcu_work(work),
3023 		struct kfree_rcu_cpu_work, rcu_work);
3024 	krcp = krwp->krcp;
3025 
3026 	raw_spin_lock_irqsave(&krcp->lock, flags);
3027 	// Channels 1 and 2.
3028 	for (i = 0; i < FREE_N_CHANNELS; i++)
3029 		list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
3030 
3031 	// Channel 3.
3032 	head = krwp->head_free;
3033 	krwp->head_free = NULL;
3034 	head_gp_snap = krwp->head_free_gp_snap;
3035 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3036 
3037 	// Handle the first two channels.
3038 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3039 		// Start from the tail page, so a GP is likely passed for it.
3040 		list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
3041 			kvfree_rcu_bulk(krcp, bnode, i);
3042 	}
3043 
3044 	/*
3045 	 * This is used when the "bulk" path can not be used for the
3046 	 * double-argument of kvfree_rcu().  This happens when the
3047 	 * page-cache is empty, which means that objects are instead
3048 	 * queued on a linked list through their rcu_head structures.
3049 	 * This list is named "Channel 3".
3050 	 */
3051 	if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap)))
3052 		kvfree_rcu_list(head);
3053 }
3054 
3055 static bool
3056 need_offload_krc(struct kfree_rcu_cpu *krcp)
3057 {
3058 	int i;
3059 
3060 	for (i = 0; i < FREE_N_CHANNELS; i++)
3061 		if (!list_empty(&krcp->bulk_head[i]))
3062 			return true;
3063 
3064 	return !!READ_ONCE(krcp->head);
3065 }
3066 
3067 static bool
3068 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3069 {
3070 	int i;
3071 
3072 	for (i = 0; i < FREE_N_CHANNELS; i++)
3073 		if (!list_empty(&krwp->bulk_head_free[i]))
3074 			return true;
3075 
3076 	return !!krwp->head_free;
3077 }
3078 
3079 static int krc_count(struct kfree_rcu_cpu *krcp)
3080 {
3081 	int sum = atomic_read(&krcp->head_count);
3082 	int i;
3083 
3084 	for (i = 0; i < FREE_N_CHANNELS; i++)
3085 		sum += atomic_read(&krcp->bulk_count[i]);
3086 
3087 	return sum;
3088 }
3089 
3090 static void
3091 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3092 {
3093 	long delay, delay_left;
3094 
3095 	delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3096 	if (delayed_work_pending(&krcp->monitor_work)) {
3097 		delay_left = krcp->monitor_work.timer.expires - jiffies;
3098 		if (delay < delay_left)
3099 			mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3100 		return;
3101 	}
3102 	queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3103 }
3104 
3105 static void
3106 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
3107 {
3108 	struct list_head bulk_ready[FREE_N_CHANNELS];
3109 	struct kvfree_rcu_bulk_data *bnode, *n;
3110 	struct rcu_head *head_ready = NULL;
3111 	unsigned long flags;
3112 	int i;
3113 
3114 	raw_spin_lock_irqsave(&krcp->lock, flags);
3115 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3116 		INIT_LIST_HEAD(&bulk_ready[i]);
3117 
3118 		list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
3119 			if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
3120 				break;
3121 
3122 			atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
3123 			list_move(&bnode->list, &bulk_ready[i]);
3124 		}
3125 	}
3126 
3127 	if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) {
3128 		head_ready = krcp->head;
3129 		atomic_set(&krcp->head_count, 0);
3130 		WRITE_ONCE(krcp->head, NULL);
3131 	}
3132 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3133 
3134 	for (i = 0; i < FREE_N_CHANNELS; i++) {
3135 		list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
3136 			kvfree_rcu_bulk(krcp, bnode, i);
3137 	}
3138 
3139 	if (head_ready)
3140 		kvfree_rcu_list(head_ready);
3141 }
3142 
3143 /*
3144  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3145  */
3146 static void kfree_rcu_monitor(struct work_struct *work)
3147 {
3148 	struct kfree_rcu_cpu *krcp = container_of(work,
3149 		struct kfree_rcu_cpu, monitor_work.work);
3150 	unsigned long flags;
3151 	int i, j;
3152 
3153 	// Drain ready for reclaim.
3154 	kvfree_rcu_drain_ready(krcp);
3155 
3156 	raw_spin_lock_irqsave(&krcp->lock, flags);
3157 
3158 	// Attempt to start a new batch.
3159 	for (i = 0; i < KFREE_N_BATCHES; i++) {
3160 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3161 
3162 		// Try to detach bulk_head or head and attach it, only when
3163 		// all channels are free.  Any channel is not free means at krwp
3164 		// there is on-going rcu work to handle krwp's free business.
3165 		if (need_wait_for_krwp_work(krwp))
3166 			continue;
3167 
3168 		// kvfree_rcu_drain_ready() might handle this krcp, if so give up.
3169 		if (need_offload_krc(krcp)) {
3170 			// Channel 1 corresponds to the SLAB-pointer bulk path.
3171 			// Channel 2 corresponds to vmalloc-pointer bulk path.
3172 			for (j = 0; j < FREE_N_CHANNELS; j++) {
3173 				if (list_empty(&krwp->bulk_head_free[j])) {
3174 					atomic_set(&krcp->bulk_count[j], 0);
3175 					list_replace_init(&krcp->bulk_head[j],
3176 						&krwp->bulk_head_free[j]);
3177 				}
3178 			}
3179 
3180 			// Channel 3 corresponds to both SLAB and vmalloc
3181 			// objects queued on the linked list.
3182 			if (!krwp->head_free) {
3183 				krwp->head_free = krcp->head;
3184 				get_state_synchronize_rcu_full(&krwp->head_free_gp_snap);
3185 				atomic_set(&krcp->head_count, 0);
3186 				WRITE_ONCE(krcp->head, NULL);
3187 			}
3188 
3189 			// One work is per one batch, so there are three
3190 			// "free channels", the batch can handle. It can
3191 			// be that the work is in the pending state when
3192 			// channels have been detached following by each
3193 			// other.
3194 			queue_rcu_work(system_wq, &krwp->rcu_work);
3195 		}
3196 	}
3197 
3198 	raw_spin_unlock_irqrestore(&krcp->lock, flags);
3199 
3200 	// If there is nothing to detach, it means that our job is
3201 	// successfully done here. In case of having at least one
3202 	// of the channels that is still busy we should rearm the
3203 	// work to repeat an attempt. Because previous batches are
3204 	// still in progress.
3205 	if (need_offload_krc(krcp))
3206 		schedule_delayed_monitor_work(krcp);
3207 }
3208 
3209 static enum hrtimer_restart
3210 schedule_page_work_fn(struct hrtimer *t)
3211 {
3212 	struct kfree_rcu_cpu *krcp =
3213 		container_of(t, struct kfree_rcu_cpu, hrtimer);
3214 
3215 	queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3216 	return HRTIMER_NORESTART;
3217 }
3218 
3219 static void fill_page_cache_func(struct work_struct *work)
3220 {
3221 	struct kvfree_rcu_bulk_data *bnode;
3222 	struct kfree_rcu_cpu *krcp =
3223 		container_of(work, struct kfree_rcu_cpu,
3224 			page_cache_work.work);
3225 	unsigned long flags;
3226 	int nr_pages;
3227 	bool pushed;
3228 	int i;
3229 
3230 	nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3231 		1 : rcu_min_cached_objs;
3232 
3233 	for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) {
3234 		bnode = (struct kvfree_rcu_bulk_data *)
3235 			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3236 
3237 		if (!bnode)
3238 			break;
3239 
3240 		raw_spin_lock_irqsave(&krcp->lock, flags);
3241 		pushed = put_cached_bnode(krcp, bnode);
3242 		raw_spin_unlock_irqrestore(&krcp->lock, flags);
3243 
3244 		if (!pushed) {
3245 			free_page((unsigned long) bnode);
3246 			break;
3247 		}
3248 	}
3249 
3250 	atomic_set(&krcp->work_in_progress, 0);
3251 	atomic_set(&krcp->backoff_page_cache_fill, 0);
3252 }
3253 
3254 static void
3255 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3256 {
3257 	// If cache disabled, bail out.
3258 	if (!rcu_min_cached_objs)
3259 		return;
3260 
3261 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3262 			!atomic_xchg(&krcp->work_in_progress, 1)) {
3263 		if (atomic_read(&krcp->backoff_page_cache_fill)) {
3264 			queue_delayed_work(system_wq,
3265 				&krcp->page_cache_work,
3266 					msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3267 		} else {
3268 			hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3269 			krcp->hrtimer.function = schedule_page_work_fn;
3270 			hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3271 		}
3272 	}
3273 }
3274 
3275 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3276 // state specified by flags.  If can_alloc is true, the caller must
3277 // be schedulable and not be holding any locks or mutexes that might be
3278 // acquired by the memory allocator or anything that it might invoke.
3279 // Returns true if ptr was successfully recorded, else the caller must
3280 // use a fallback.
3281 static inline bool
3282 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3283 	unsigned long *flags, void *ptr, bool can_alloc)
3284 {
3285 	struct kvfree_rcu_bulk_data *bnode;
3286 	int idx;
3287 
3288 	*krcp = krc_this_cpu_lock(flags);
3289 	if (unlikely(!(*krcp)->initialized))
3290 		return false;
3291 
3292 	idx = !!is_vmalloc_addr(ptr);
3293 	bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
3294 		struct kvfree_rcu_bulk_data, list);
3295 
3296 	/* Check if a new block is required. */
3297 	if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
3298 		bnode = get_cached_bnode(*krcp);
3299 		if (!bnode && can_alloc) {
3300 			krc_this_cpu_unlock(*krcp, *flags);
3301 
3302 			// __GFP_NORETRY - allows a light-weight direct reclaim
3303 			// what is OK from minimizing of fallback hitting point of
3304 			// view. Apart of that it forbids any OOM invoking what is
3305 			// also beneficial since we are about to release memory soon.
3306 			//
3307 			// __GFP_NOMEMALLOC - prevents from consuming of all the
3308 			// memory reserves. Please note we have a fallback path.
3309 			//
3310 			// __GFP_NOWARN - it is supposed that an allocation can
3311 			// be failed under low memory or high memory pressure
3312 			// scenarios.
3313 			bnode = (struct kvfree_rcu_bulk_data *)
3314 				__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3315 			raw_spin_lock_irqsave(&(*krcp)->lock, *flags);
3316 		}
3317 
3318 		if (!bnode)
3319 			return false;
3320 
3321 		// Initialize the new block and attach it.
3322 		bnode->nr_records = 0;
3323 		list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
3324 	}
3325 
3326 	// Finally insert and update the GP for this page.
3327 	bnode->records[bnode->nr_records++] = ptr;
3328 	get_state_synchronize_rcu_full(&bnode->gp_snap);
3329 	atomic_inc(&(*krcp)->bulk_count[idx]);
3330 
3331 	return true;
3332 }
3333 
3334 /*
3335  * Queue a request for lazy invocation of the appropriate free routine
3336  * after a grace period.  Please note that three paths are maintained,
3337  * two for the common case using arrays of pointers and a third one that
3338  * is used only when the main paths cannot be used, for example, due to
3339  * memory pressure.
3340  *
3341  * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3342  * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3343  * be free'd in workqueue context. This allows us to: batch requests together to
3344  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3345  */
3346 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3347 {
3348 	unsigned long flags;
3349 	struct kfree_rcu_cpu *krcp;
3350 	bool success;
3351 
3352 	/*
3353 	 * Please note there is a limitation for the head-less
3354 	 * variant, that is why there is a clear rule for such
3355 	 * objects: it can be used from might_sleep() context
3356 	 * only. For other places please embed an rcu_head to
3357 	 * your data.
3358 	 */
3359 	if (!head)
3360 		might_sleep();
3361 
3362 	// Queue the object but don't yet schedule the batch.
3363 	if (debug_rcu_head_queue(ptr)) {
3364 		// Probable double kfree_rcu(), just leak.
3365 		WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3366 			  __func__, head);
3367 
3368 		// Mark as success and leave.
3369 		return;
3370 	}
3371 
3372 	kasan_record_aux_stack_noalloc(ptr);
3373 	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3374 	if (!success) {
3375 		run_page_cache_worker(krcp);
3376 
3377 		if (head == NULL)
3378 			// Inline if kvfree_rcu(one_arg) call.
3379 			goto unlock_return;
3380 
3381 		head->func = ptr;
3382 		head->next = krcp->head;
3383 		WRITE_ONCE(krcp->head, head);
3384 		atomic_inc(&krcp->head_count);
3385 
3386 		// Take a snapshot for this krcp.
3387 		krcp->head_gp_snap = get_state_synchronize_rcu();
3388 		success = true;
3389 	}
3390 
3391 	// Set timer to drain after KFREE_DRAIN_JIFFIES.
3392 	if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3393 		schedule_delayed_monitor_work(krcp);
3394 
3395 unlock_return:
3396 	krc_this_cpu_unlock(krcp, flags);
3397 
3398 	/*
3399 	 * Inline kvfree() after synchronize_rcu(). We can do
3400 	 * it from might_sleep() context only, so the current
3401 	 * CPU can pass the QS state.
3402 	 */
3403 	if (!success) {
3404 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
3405 		synchronize_rcu();
3406 		kvfree(ptr);
3407 	}
3408 }
3409 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3410 
3411 static unsigned long
3412 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3413 {
3414 	int cpu;
3415 	unsigned long count = 0;
3416 
3417 	/* Snapshot count of all CPUs */
3418 	for_each_possible_cpu(cpu) {
3419 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3420 
3421 		count += krc_count(krcp);
3422 		count += READ_ONCE(krcp->nr_bkv_objs);
3423 		atomic_set(&krcp->backoff_page_cache_fill, 1);
3424 	}
3425 
3426 	return count == 0 ? SHRINK_EMPTY : count;
3427 }
3428 
3429 static unsigned long
3430 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3431 {
3432 	int cpu, freed = 0;
3433 
3434 	for_each_possible_cpu(cpu) {
3435 		int count;
3436 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3437 
3438 		count = krc_count(krcp);
3439 		count += drain_page_cache(krcp);
3440 		kfree_rcu_monitor(&krcp->monitor_work.work);
3441 
3442 		sc->nr_to_scan -= count;
3443 		freed += count;
3444 
3445 		if (sc->nr_to_scan <= 0)
3446 			break;
3447 	}
3448 
3449 	return freed == 0 ? SHRINK_STOP : freed;
3450 }
3451 
3452 static struct shrinker kfree_rcu_shrinker = {
3453 	.count_objects = kfree_rcu_shrink_count,
3454 	.scan_objects = kfree_rcu_shrink_scan,
3455 	.batch = 0,
3456 	.seeks = DEFAULT_SEEKS,
3457 };
3458 
3459 void __init kfree_rcu_scheduler_running(void)
3460 {
3461 	int cpu;
3462 
3463 	for_each_possible_cpu(cpu) {
3464 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3465 
3466 		if (need_offload_krc(krcp))
3467 			schedule_delayed_monitor_work(krcp);
3468 	}
3469 }
3470 
3471 /*
3472  * During early boot, any blocking grace-period wait automatically
3473  * implies a grace period.
3474  *
3475  * Later on, this could in theory be the case for kernels built with
3476  * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3477  * is not a common case.  Furthermore, this optimization would cause
3478  * the rcu_gp_oldstate structure to expand by 50%, so this potential
3479  * grace-period optimization is ignored once the scheduler is running.
3480  */
3481 static int rcu_blocking_is_gp(void)
3482 {
3483 	if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3484 		might_sleep();
3485 		return false;
3486 	}
3487 	return true;
3488 }
3489 
3490 /**
3491  * synchronize_rcu - wait until a grace period has elapsed.
3492  *
3493  * Control will return to the caller some time after a full grace
3494  * period has elapsed, in other words after all currently executing RCU
3495  * read-side critical sections have completed.  Note, however, that
3496  * upon return from synchronize_rcu(), the caller might well be executing
3497  * concurrently with new RCU read-side critical sections that began while
3498  * synchronize_rcu() was waiting.
3499  *
3500  * RCU read-side critical sections are delimited by rcu_read_lock()
3501  * and rcu_read_unlock(), and may be nested.  In addition, but only in
3502  * v5.0 and later, regions of code across which interrupts, preemption,
3503  * or softirqs have been disabled also serve as RCU read-side critical
3504  * sections.  This includes hardware interrupt handlers, softirq handlers,
3505  * and NMI handlers.
3506  *
3507  * Note that this guarantee implies further memory-ordering guarantees.
3508  * On systems with more than one CPU, when synchronize_rcu() returns,
3509  * each CPU is guaranteed to have executed a full memory barrier since
3510  * the end of its last RCU read-side critical section whose beginning
3511  * preceded the call to synchronize_rcu().  In addition, each CPU having
3512  * an RCU read-side critical section that extends beyond the return from
3513  * synchronize_rcu() is guaranteed to have executed a full memory barrier
3514  * after the beginning of synchronize_rcu() and before the beginning of
3515  * that RCU read-side critical section.  Note that these guarantees include
3516  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3517  * that are executing in the kernel.
3518  *
3519  * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3520  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3521  * to have executed a full memory barrier during the execution of
3522  * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3523  * again only if the system has more than one CPU).
3524  *
3525  * Implementation of these memory-ordering guarantees is described here:
3526  * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3527  */
3528 void synchronize_rcu(void)
3529 {
3530 	unsigned long flags;
3531 	struct rcu_node *rnp;
3532 
3533 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3534 			 lock_is_held(&rcu_lock_map) ||
3535 			 lock_is_held(&rcu_sched_lock_map),
3536 			 "Illegal synchronize_rcu() in RCU read-side critical section");
3537 	if (!rcu_blocking_is_gp()) {
3538 		if (rcu_gp_is_expedited())
3539 			synchronize_rcu_expedited();
3540 		else
3541 			wait_rcu_gp(call_rcu_hurry);
3542 		return;
3543 	}
3544 
3545 	// Context allows vacuous grace periods.
3546 	// Note well that this code runs with !PREEMPT && !SMP.
3547 	// In addition, all code that advances grace periods runs at
3548 	// process level.  Therefore, this normal GP overlaps with other
3549 	// normal GPs only by being fully nested within them, which allows
3550 	// reuse of ->gp_seq_polled_snap.
3551 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3552 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3553 
3554 	// Update the normal grace-period counters to record
3555 	// this grace period, but only those used by the boot CPU.
3556 	// The rcu_scheduler_starting() will take care of the rest of
3557 	// these counters.
3558 	local_irq_save(flags);
3559 	WARN_ON_ONCE(num_online_cpus() > 1);
3560 	rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3561 	for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3562 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3563 	local_irq_restore(flags);
3564 }
3565 EXPORT_SYMBOL_GPL(synchronize_rcu);
3566 
3567 /**
3568  * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3569  * @rgosp: Place to put state cookie
3570  *
3571  * Stores into @rgosp a value that will always be treated by functions
3572  * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3573  * has already completed.
3574  */
3575 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3576 {
3577 	rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3578 	rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3579 }
3580 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3581 
3582 /**
3583  * get_state_synchronize_rcu - Snapshot current RCU state
3584  *
3585  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3586  * or poll_state_synchronize_rcu() to determine whether or not a full
3587  * grace period has elapsed in the meantime.
3588  */
3589 unsigned long get_state_synchronize_rcu(void)
3590 {
3591 	/*
3592 	 * Any prior manipulation of RCU-protected data must happen
3593 	 * before the load from ->gp_seq.
3594 	 */
3595 	smp_mb();  /* ^^^ */
3596 	return rcu_seq_snap(&rcu_state.gp_seq_polled);
3597 }
3598 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3599 
3600 /**
3601  * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3602  * @rgosp: location to place combined normal/expedited grace-period state
3603  *
3604  * Places the normal and expedited grace-period states in @rgosp.  This
3605  * state value can be passed to a later call to cond_synchronize_rcu_full()
3606  * or poll_state_synchronize_rcu_full() to determine whether or not a
3607  * grace period (whether normal or expedited) has elapsed in the meantime.
3608  * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3609  * long, but is guaranteed to see all grace periods.  In contrast, the
3610  * combined state occupies less memory, but can sometimes fail to take
3611  * grace periods into account.
3612  *
3613  * This does not guarantee that the needed grace period will actually
3614  * start.
3615  */
3616 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3617 {
3618 	struct rcu_node *rnp = rcu_get_root();
3619 
3620 	/*
3621 	 * Any prior manipulation of RCU-protected data must happen
3622 	 * before the loads from ->gp_seq and ->expedited_sequence.
3623 	 */
3624 	smp_mb();  /* ^^^ */
3625 	rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3626 	rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3627 }
3628 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3629 
3630 /*
3631  * Helper function for start_poll_synchronize_rcu() and
3632  * start_poll_synchronize_rcu_full().
3633  */
3634 static void start_poll_synchronize_rcu_common(void)
3635 {
3636 	unsigned long flags;
3637 	bool needwake;
3638 	struct rcu_data *rdp;
3639 	struct rcu_node *rnp;
3640 
3641 	lockdep_assert_irqs_enabled();
3642 	local_irq_save(flags);
3643 	rdp = this_cpu_ptr(&rcu_data);
3644 	rnp = rdp->mynode;
3645 	raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3646 	// Note it is possible for a grace period to have elapsed between
3647 	// the above call to get_state_synchronize_rcu() and the below call
3648 	// to rcu_seq_snap.  This is OK, the worst that happens is that we
3649 	// get a grace period that no one needed.  These accesses are ordered
3650 	// by smp_mb(), and we are accessing them in the opposite order
3651 	// from which they are updated at grace-period start, as required.
3652 	needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3653 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3654 	if (needwake)
3655 		rcu_gp_kthread_wake();
3656 }
3657 
3658 /**
3659  * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3660  *
3661  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3662  * or poll_state_synchronize_rcu() to determine whether or not a full
3663  * grace period has elapsed in the meantime.  If the needed grace period
3664  * is not already slated to start, notifies RCU core of the need for that
3665  * grace period.
3666  *
3667  * Interrupts must be enabled for the case where it is necessary to awaken
3668  * the grace-period kthread.
3669  */
3670 unsigned long start_poll_synchronize_rcu(void)
3671 {
3672 	unsigned long gp_seq = get_state_synchronize_rcu();
3673 
3674 	start_poll_synchronize_rcu_common();
3675 	return gp_seq;
3676 }
3677 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3678 
3679 /**
3680  * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3681  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3682  *
3683  * Places the normal and expedited grace-period states in *@rgos.  This
3684  * state value can be passed to a later call to cond_synchronize_rcu_full()
3685  * or poll_state_synchronize_rcu_full() to determine whether or not a
3686  * grace period (whether normal or expedited) has elapsed in the meantime.
3687  * If the needed grace period is not already slated to start, notifies
3688  * RCU core of the need for that grace period.
3689  *
3690  * Interrupts must be enabled for the case where it is necessary to awaken
3691  * the grace-period kthread.
3692  */
3693 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3694 {
3695 	get_state_synchronize_rcu_full(rgosp);
3696 
3697 	start_poll_synchronize_rcu_common();
3698 }
3699 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3700 
3701 /**
3702  * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3703  * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3704  *
3705  * If a full RCU grace period has elapsed since the earlier call from
3706  * which @oldstate was obtained, return @true, otherwise return @false.
3707  * If @false is returned, it is the caller's responsibility to invoke this
3708  * function later on until it does return @true.  Alternatively, the caller
3709  * can explicitly wait for a grace period, for example, by passing @oldstate
3710  * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3711  * on the one hand or by directly invoking either synchronize_rcu() or
3712  * synchronize_rcu_expedited() on the other.
3713  *
3714  * Yes, this function does not take counter wrap into account.
3715  * But counter wrap is harmless.  If the counter wraps, we have waited for
3716  * more than a billion grace periods (and way more on a 64-bit system!).
3717  * Those needing to keep old state values for very long time periods
3718  * (many hours even on 32-bit systems) should check them occasionally and
3719  * either refresh them or set a flag indicating that the grace period has
3720  * completed.  Alternatively, they can use get_completed_synchronize_rcu()
3721  * to get a guaranteed-completed grace-period state.
3722  *
3723  * In addition, because oldstate compresses the grace-period state for
3724  * both normal and expedited grace periods into a single unsigned long,
3725  * it can miss a grace period when synchronize_rcu() runs concurrently
3726  * with synchronize_rcu_expedited().  If this is unacceptable, please
3727  * instead use the _full() variant of these polling APIs.
3728  *
3729  * This function provides the same memory-ordering guarantees that
3730  * would be provided by a synchronize_rcu() that was invoked at the call
3731  * to the function that provided @oldstate, and that returned at the end
3732  * of this function.
3733  */
3734 bool poll_state_synchronize_rcu(unsigned long oldstate)
3735 {
3736 	if (oldstate == RCU_GET_STATE_COMPLETED ||
3737 	    rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3738 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3739 		return true;
3740 	}
3741 	return false;
3742 }
3743 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3744 
3745 /**
3746  * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3747  * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3748  *
3749  * If a full RCU grace period has elapsed since the earlier call from
3750  * which *rgosp was obtained, return @true, otherwise return @false.
3751  * If @false is returned, it is the caller's responsibility to invoke this
3752  * function later on until it does return @true.  Alternatively, the caller
3753  * can explicitly wait for a grace period, for example, by passing @rgosp
3754  * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3755  *
3756  * Yes, this function does not take counter wrap into account.
3757  * But counter wrap is harmless.  If the counter wraps, we have waited
3758  * for more than a billion grace periods (and way more on a 64-bit
3759  * system!).  Those needing to keep rcu_gp_oldstate values for very
3760  * long time periods (many hours even on 32-bit systems) should check
3761  * them occasionally and either refresh them or set a flag indicating
3762  * that the grace period has completed.  Alternatively, they can use
3763  * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3764  * grace-period state.
3765  *
3766  * This function provides the same memory-ordering guarantees that would
3767  * be provided by a synchronize_rcu() that was invoked at the call to
3768  * the function that provided @rgosp, and that returned at the end of this
3769  * function.  And this guarantee requires that the root rcu_node structure's
3770  * ->gp_seq field be checked instead of that of the rcu_state structure.
3771  * The problem is that the just-ending grace-period's callbacks can be
3772  * invoked between the time that the root rcu_node structure's ->gp_seq
3773  * field is updated and the time that the rcu_state structure's ->gp_seq
3774  * field is updated.  Therefore, if a single synchronize_rcu() is to
3775  * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3776  * then the root rcu_node structure is the one that needs to be polled.
3777  */
3778 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3779 {
3780 	struct rcu_node *rnp = rcu_get_root();
3781 
3782 	smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3783 	if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3784 	    rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3785 	    rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3786 	    rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3787 		smp_mb(); /* Ensure GP ends before subsequent accesses. */
3788 		return true;
3789 	}
3790 	return false;
3791 }
3792 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3793 
3794 /**
3795  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3796  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3797  *
3798  * If a full RCU grace period has elapsed since the earlier call to
3799  * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3800  * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3801  *
3802  * Yes, this function does not take counter wrap into account.
3803  * But counter wrap is harmless.  If the counter wraps, we have waited for
3804  * more than 2 billion grace periods (and way more on a 64-bit system!),
3805  * so waiting for a couple of additional grace periods should be just fine.
3806  *
3807  * This function provides the same memory-ordering guarantees that
3808  * would be provided by a synchronize_rcu() that was invoked at the call
3809  * to the function that provided @oldstate and that returned at the end
3810  * of this function.
3811  */
3812 void cond_synchronize_rcu(unsigned long oldstate)
3813 {
3814 	if (!poll_state_synchronize_rcu(oldstate))
3815 		synchronize_rcu();
3816 }
3817 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3818 
3819 /**
3820  * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3821  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3822  *
3823  * If a full RCU grace period has elapsed since the call to
3824  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3825  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3826  * obtained, just return.  Otherwise, invoke synchronize_rcu() to wait
3827  * for a full grace period.
3828  *
3829  * Yes, this function does not take counter wrap into account.
3830  * But counter wrap is harmless.  If the counter wraps, we have waited for
3831  * more than 2 billion grace periods (and way more on a 64-bit system!),
3832  * so waiting for a couple of additional grace periods should be just fine.
3833  *
3834  * This function provides the same memory-ordering guarantees that
3835  * would be provided by a synchronize_rcu() that was invoked at the call
3836  * to the function that provided @rgosp and that returned at the end of
3837  * this function.
3838  */
3839 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3840 {
3841 	if (!poll_state_synchronize_rcu_full(rgosp))
3842 		synchronize_rcu();
3843 }
3844 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3845 
3846 /*
3847  * Check to see if there is any immediate RCU-related work to be done by
3848  * the current CPU, returning 1 if so and zero otherwise.  The checks are
3849  * in order of increasing expense: checks that can be carried out against
3850  * CPU-local state are performed first.  However, we must check for CPU
3851  * stalls first, else we might not get a chance.
3852  */
3853 static int rcu_pending(int user)
3854 {
3855 	bool gp_in_progress;
3856 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3857 	struct rcu_node *rnp = rdp->mynode;
3858 
3859 	lockdep_assert_irqs_disabled();
3860 
3861 	/* Check for CPU stalls, if enabled. */
3862 	check_cpu_stall(rdp);
3863 
3864 	/* Does this CPU need a deferred NOCB wakeup? */
3865 	if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3866 		return 1;
3867 
3868 	/* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3869 	if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3870 		return 0;
3871 
3872 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3873 	gp_in_progress = rcu_gp_in_progress();
3874 	if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3875 		return 1;
3876 
3877 	/* Does this CPU have callbacks ready to invoke? */
3878 	if (!rcu_rdp_is_offloaded(rdp) &&
3879 	    rcu_segcblist_ready_cbs(&rdp->cblist))
3880 		return 1;
3881 
3882 	/* Has RCU gone idle with this CPU needing another grace period? */
3883 	if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3884 	    !rcu_rdp_is_offloaded(rdp) &&
3885 	    !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3886 		return 1;
3887 
3888 	/* Have RCU grace period completed or started?  */
3889 	if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3890 	    unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3891 		return 1;
3892 
3893 	/* nothing to do */
3894 	return 0;
3895 }
3896 
3897 /*
3898  * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3899  * the compiler is expected to optimize this away.
3900  */
3901 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3902 {
3903 	trace_rcu_barrier(rcu_state.name, s, cpu,
3904 			  atomic_read(&rcu_state.barrier_cpu_count), done);
3905 }
3906 
3907 /*
3908  * RCU callback function for rcu_barrier().  If we are last, wake
3909  * up the task executing rcu_barrier().
3910  *
3911  * Note that the value of rcu_state.barrier_sequence must be captured
3912  * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3913  * other CPUs might count the value down to zero before this CPU gets
3914  * around to invoking rcu_barrier_trace(), which might result in bogus
3915  * data from the next instance of rcu_barrier().
3916  */
3917 static void rcu_barrier_callback(struct rcu_head *rhp)
3918 {
3919 	unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3920 
3921 	if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3922 		rcu_barrier_trace(TPS("LastCB"), -1, s);
3923 		complete(&rcu_state.barrier_completion);
3924 	} else {
3925 		rcu_barrier_trace(TPS("CB"), -1, s);
3926 	}
3927 }
3928 
3929 /*
3930  * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3931  */
3932 static void rcu_barrier_entrain(struct rcu_data *rdp)
3933 {
3934 	unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3935 	unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3936 	bool wake_nocb = false;
3937 	bool was_alldone = false;
3938 
3939 	lockdep_assert_held(&rcu_state.barrier_lock);
3940 	if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3941 		return;
3942 	rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3943 	rdp->barrier_head.func = rcu_barrier_callback;
3944 	debug_rcu_head_queue(&rdp->barrier_head);
3945 	rcu_nocb_lock(rdp);
3946 	/*
3947 	 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3948 	 * queue. This way we don't wait for bypass timer that can reach seconds
3949 	 * if it's fully lazy.
3950 	 */
3951 	was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3952 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3953 	wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3954 	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3955 		atomic_inc(&rcu_state.barrier_cpu_count);
3956 	} else {
3957 		debug_rcu_head_unqueue(&rdp->barrier_head);
3958 		rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3959 	}
3960 	rcu_nocb_unlock(rdp);
3961 	if (wake_nocb)
3962 		wake_nocb_gp(rdp, false);
3963 	smp_store_release(&rdp->barrier_seq_snap, gseq);
3964 }
3965 
3966 /*
3967  * Called with preemption disabled, and from cross-cpu IRQ context.
3968  */
3969 static void rcu_barrier_handler(void *cpu_in)
3970 {
3971 	uintptr_t cpu = (uintptr_t)cpu_in;
3972 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3973 
3974 	lockdep_assert_irqs_disabled();
3975 	WARN_ON_ONCE(cpu != rdp->cpu);
3976 	WARN_ON_ONCE(cpu != smp_processor_id());
3977 	raw_spin_lock(&rcu_state.barrier_lock);
3978 	rcu_barrier_entrain(rdp);
3979 	raw_spin_unlock(&rcu_state.barrier_lock);
3980 }
3981 
3982 /**
3983  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3984  *
3985  * Note that this primitive does not necessarily wait for an RCU grace period
3986  * to complete.  For example, if there are no RCU callbacks queued anywhere
3987  * in the system, then rcu_barrier() is within its rights to return
3988  * immediately, without waiting for anything, much less an RCU grace period.
3989  */
3990 void rcu_barrier(void)
3991 {
3992 	uintptr_t cpu;
3993 	unsigned long flags;
3994 	unsigned long gseq;
3995 	struct rcu_data *rdp;
3996 	unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3997 
3998 	rcu_barrier_trace(TPS("Begin"), -1, s);
3999 
4000 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
4001 	mutex_lock(&rcu_state.barrier_mutex);
4002 
4003 	/* Did someone else do our work for us? */
4004 	if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4005 		rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4006 		smp_mb(); /* caller's subsequent code after above check. */
4007 		mutex_unlock(&rcu_state.barrier_mutex);
4008 		return;
4009 	}
4010 
4011 	/* Mark the start of the barrier operation. */
4012 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4013 	rcu_seq_start(&rcu_state.barrier_sequence);
4014 	gseq = rcu_state.barrier_sequence;
4015 	rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4016 
4017 	/*
4018 	 * Initialize the count to two rather than to zero in order
4019 	 * to avoid a too-soon return to zero in case of an immediate
4020 	 * invocation of the just-enqueued callback (or preemption of
4021 	 * this task).  Exclude CPU-hotplug operations to ensure that no
4022 	 * offline non-offloaded CPU has callbacks queued.
4023 	 */
4024 	init_completion(&rcu_state.barrier_completion);
4025 	atomic_set(&rcu_state.barrier_cpu_count, 2);
4026 	raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4027 
4028 	/*
4029 	 * Force each CPU with callbacks to register a new callback.
4030 	 * When that callback is invoked, we will know that all of the
4031 	 * corresponding CPU's preceding callbacks have been invoked.
4032 	 */
4033 	for_each_possible_cpu(cpu) {
4034 		rdp = per_cpu_ptr(&rcu_data, cpu);
4035 retry:
4036 		if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4037 			continue;
4038 		raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4039 		if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4040 			WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4041 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4042 			rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4043 			continue;
4044 		}
4045 		if (!rcu_rdp_cpu_online(rdp)) {
4046 			rcu_barrier_entrain(rdp);
4047 			WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4048 			raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4049 			rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4050 			continue;
4051 		}
4052 		raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4053 		if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4054 			schedule_timeout_uninterruptible(1);
4055 			goto retry;
4056 		}
4057 		WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4058 		rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4059 	}
4060 
4061 	/*
4062 	 * Now that we have an rcu_barrier_callback() callback on each
4063 	 * CPU, and thus each counted, remove the initial count.
4064 	 */
4065 	if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4066 		complete(&rcu_state.barrier_completion);
4067 
4068 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4069 	wait_for_completion(&rcu_state.barrier_completion);
4070 
4071 	/* Mark the end of the barrier operation. */
4072 	rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4073 	rcu_seq_end(&rcu_state.barrier_sequence);
4074 	gseq = rcu_state.barrier_sequence;
4075 	for_each_possible_cpu(cpu) {
4076 		rdp = per_cpu_ptr(&rcu_data, cpu);
4077 
4078 		WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4079 	}
4080 
4081 	/* Other rcu_barrier() invocations can now safely proceed. */
4082 	mutex_unlock(&rcu_state.barrier_mutex);
4083 }
4084 EXPORT_SYMBOL_GPL(rcu_barrier);
4085 
4086 /*
4087  * Compute the mask of online CPUs for the specified rcu_node structure.
4088  * This will not be stable unless the rcu_node structure's ->lock is
4089  * held, but the bit corresponding to the current CPU will be stable
4090  * in most contexts.
4091  */
4092 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4093 {
4094 	return READ_ONCE(rnp->qsmaskinitnext);
4095 }
4096 
4097 /*
4098  * Is the CPU corresponding to the specified rcu_data structure online
4099  * from RCU's perspective?  This perspective is given by that structure's
4100  * ->qsmaskinitnext field rather than by the global cpu_online_mask.
4101  */
4102 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4103 {
4104 	return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
4105 }
4106 
4107 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
4108 
4109 /*
4110  * Is the current CPU online as far as RCU is concerned?
4111  *
4112  * Disable preemption to avoid false positives that could otherwise
4113  * happen due to the current CPU number being sampled, this task being
4114  * preempted, its old CPU being taken offline, resuming on some other CPU,
4115  * then determining that its old CPU is now offline.
4116  *
4117  * Disable checking if in an NMI handler because we cannot safely
4118  * report errors from NMI handlers anyway.  In addition, it is OK to use
4119  * RCU on an offline processor during initial boot, hence the check for
4120  * rcu_scheduler_fully_active.
4121  */
4122 bool rcu_lockdep_current_cpu_online(void)
4123 {
4124 	struct rcu_data *rdp;
4125 	bool ret = false;
4126 
4127 	if (in_nmi() || !rcu_scheduler_fully_active)
4128 		return true;
4129 	preempt_disable_notrace();
4130 	rdp = this_cpu_ptr(&rcu_data);
4131 	/*
4132 	 * Strictly, we care here about the case where the current CPU is
4133 	 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
4134 	 * not being up to date. So arch_spin_is_locked() might have a
4135 	 * false positive if it's held by some *other* CPU, but that's
4136 	 * OK because that just means a false *negative* on the warning.
4137 	 */
4138 	if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4139 		ret = true;
4140 	preempt_enable_notrace();
4141 	return ret;
4142 }
4143 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4144 
4145 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4146 
4147 // Has rcu_init() been invoked?  This is used (for example) to determine
4148 // whether spinlocks may be acquired safely.
4149 static bool rcu_init_invoked(void)
4150 {
4151 	return !!rcu_state.n_online_cpus;
4152 }
4153 
4154 /*
4155  * Near the end of the offline process.  Trace the fact that this CPU
4156  * is going offline.
4157  */
4158 int rcutree_dying_cpu(unsigned int cpu)
4159 {
4160 	bool blkd;
4161 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4162 	struct rcu_node *rnp = rdp->mynode;
4163 
4164 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4165 		return 0;
4166 
4167 	blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4168 	trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4169 			       blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4170 	return 0;
4171 }
4172 
4173 /*
4174  * All CPUs for the specified rcu_node structure have gone offline,
4175  * and all tasks that were preempted within an RCU read-side critical
4176  * section while running on one of those CPUs have since exited their RCU
4177  * read-side critical section.  Some other CPU is reporting this fact with
4178  * the specified rcu_node structure's ->lock held and interrupts disabled.
4179  * This function therefore goes up the tree of rcu_node structures,
4180  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
4181  * the leaf rcu_node structure's ->qsmaskinit field has already been
4182  * updated.
4183  *
4184  * This function does check that the specified rcu_node structure has
4185  * all CPUs offline and no blocked tasks, so it is OK to invoke it
4186  * prematurely.  That said, invoking it after the fact will cost you
4187  * a needless lock acquisition.  So once it has done its work, don't
4188  * invoke it again.
4189  */
4190 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4191 {
4192 	long mask;
4193 	struct rcu_node *rnp = rnp_leaf;
4194 
4195 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4196 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4197 	    WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4198 	    WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4199 		return;
4200 	for (;;) {
4201 		mask = rnp->grpmask;
4202 		rnp = rnp->parent;
4203 		if (!rnp)
4204 			break;
4205 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4206 		rnp->qsmaskinit &= ~mask;
4207 		/* Between grace periods, so better already be zero! */
4208 		WARN_ON_ONCE(rnp->qsmask);
4209 		if (rnp->qsmaskinit) {
4210 			raw_spin_unlock_rcu_node(rnp);
4211 			/* irqs remain disabled. */
4212 			return;
4213 		}
4214 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4215 	}
4216 }
4217 
4218 /*
4219  * The CPU has been completely removed, and some other CPU is reporting
4220  * this fact from process context.  Do the remainder of the cleanup.
4221  * There can only be one CPU hotplug operation at a time, so no need for
4222  * explicit locking.
4223  */
4224 int rcutree_dead_cpu(unsigned int cpu)
4225 {
4226 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4227 		return 0;
4228 
4229 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4230 	// Stop-machine done, so allow nohz_full to disable tick.
4231 	tick_dep_clear(TICK_DEP_BIT_RCU);
4232 	return 0;
4233 }
4234 
4235 /*
4236  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4237  * first CPU in a given leaf rcu_node structure coming online.  The caller
4238  * must hold the corresponding leaf rcu_node ->lock with interrupts
4239  * disabled.
4240  */
4241 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4242 {
4243 	long mask;
4244 	long oldmask;
4245 	struct rcu_node *rnp = rnp_leaf;
4246 
4247 	raw_lockdep_assert_held_rcu_node(rnp_leaf);
4248 	WARN_ON_ONCE(rnp->wait_blkd_tasks);
4249 	for (;;) {
4250 		mask = rnp->grpmask;
4251 		rnp = rnp->parent;
4252 		if (rnp == NULL)
4253 			return;
4254 		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4255 		oldmask = rnp->qsmaskinit;
4256 		rnp->qsmaskinit |= mask;
4257 		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4258 		if (oldmask)
4259 			return;
4260 	}
4261 }
4262 
4263 /*
4264  * Do boot-time initialization of a CPU's per-CPU RCU data.
4265  */
4266 static void __init
4267 rcu_boot_init_percpu_data(int cpu)
4268 {
4269 	struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4270 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4271 
4272 	/* Set up local state, ensuring consistent view of global state. */
4273 	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4274 	INIT_WORK(&rdp->strict_work, strict_work_handler);
4275 	WARN_ON_ONCE(ct->dynticks_nesting != 1);
4276 	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4277 	rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4278 	rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4279 	rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4280 	rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4281 	rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4282 	rdp->last_sched_clock = jiffies;
4283 	rdp->cpu = cpu;
4284 	rcu_boot_init_nocb_percpu_data(rdp);
4285 }
4286 
4287 /*
4288  * Invoked early in the CPU-online process, when pretty much all services
4289  * are available.  The incoming CPU is not present.
4290  *
4291  * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4292  * offline event can be happening at a given time.  Note also that we can
4293  * accept some slop in the rsp->gp_seq access due to the fact that this
4294  * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4295  * And any offloaded callbacks are being numbered elsewhere.
4296  */
4297 int rcutree_prepare_cpu(unsigned int cpu)
4298 {
4299 	unsigned long flags;
4300 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4301 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4302 	struct rcu_node *rnp = rcu_get_root();
4303 
4304 	/* Set up local state, ensuring consistent view of global state. */
4305 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4306 	rdp->qlen_last_fqs_check = 0;
4307 	rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4308 	rdp->blimit = blimit;
4309 	ct->dynticks_nesting = 1;	/* CPU not up, no tearing. */
4310 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
4311 
4312 	/*
4313 	 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4314 	 * (re-)initialized.
4315 	 */
4316 	if (!rcu_segcblist_is_enabled(&rdp->cblist))
4317 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4318 
4319 	/*
4320 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4321 	 * propagation up the rcu_node tree will happen at the beginning
4322 	 * of the next grace period.
4323 	 */
4324 	rnp = rdp->mynode;
4325 	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
4326 	rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4327 	rdp->gp_seq_needed = rdp->gp_seq;
4328 	rdp->cpu_no_qs.b.norm = true;
4329 	rdp->core_needs_qs = false;
4330 	rdp->rcu_iw_pending = false;
4331 	rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4332 	rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4333 	trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4334 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4335 	rcu_spawn_one_boost_kthread(rnp);
4336 	rcu_spawn_cpu_nocb_kthread(cpu);
4337 	WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4338 
4339 	return 0;
4340 }
4341 
4342 /*
4343  * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4344  */
4345 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4346 {
4347 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4348 
4349 	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4350 }
4351 
4352 /*
4353  * Has the specified (known valid) CPU ever been fully online?
4354  */
4355 bool rcu_cpu_beenfullyonline(int cpu)
4356 {
4357 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4358 
4359 	return smp_load_acquire(&rdp->beenonline);
4360 }
4361 
4362 /*
4363  * Near the end of the CPU-online process.  Pretty much all services
4364  * enabled, and the CPU is now very much alive.
4365  */
4366 int rcutree_online_cpu(unsigned int cpu)
4367 {
4368 	unsigned long flags;
4369 	struct rcu_data *rdp;
4370 	struct rcu_node *rnp;
4371 
4372 	rdp = per_cpu_ptr(&rcu_data, cpu);
4373 	rnp = rdp->mynode;
4374 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4375 	rnp->ffmask |= rdp->grpmask;
4376 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4377 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4378 		return 0; /* Too early in boot for scheduler work. */
4379 	sync_sched_exp_online_cleanup(cpu);
4380 	rcutree_affinity_setting(cpu, -1);
4381 
4382 	// Stop-machine done, so allow nohz_full to disable tick.
4383 	tick_dep_clear(TICK_DEP_BIT_RCU);
4384 	return 0;
4385 }
4386 
4387 /*
4388  * Near the beginning of the process.  The CPU is still very much alive
4389  * with pretty much all services enabled.
4390  */
4391 int rcutree_offline_cpu(unsigned int cpu)
4392 {
4393 	unsigned long flags;
4394 	struct rcu_data *rdp;
4395 	struct rcu_node *rnp;
4396 
4397 	rdp = per_cpu_ptr(&rcu_data, cpu);
4398 	rnp = rdp->mynode;
4399 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4400 	rnp->ffmask &= ~rdp->grpmask;
4401 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4402 
4403 	rcutree_affinity_setting(cpu, cpu);
4404 
4405 	// nohz_full CPUs need the tick for stop-machine to work quickly
4406 	tick_dep_set(TICK_DEP_BIT_RCU);
4407 	return 0;
4408 }
4409 
4410 /*
4411  * Mark the specified CPU as being online so that subsequent grace periods
4412  * (both expedited and normal) will wait on it.  Note that this means that
4413  * incoming CPUs are not allowed to use RCU read-side critical sections
4414  * until this function is called.  Failing to observe this restriction
4415  * will result in lockdep splats.
4416  *
4417  * Note that this function is special in that it is invoked directly
4418  * from the incoming CPU rather than from the cpuhp_step mechanism.
4419  * This is because this function must be invoked at a precise location.
4420  * This incoming CPU must not have enabled interrupts yet.
4421  */
4422 void rcu_cpu_starting(unsigned int cpu)
4423 {
4424 	unsigned long mask;
4425 	struct rcu_data *rdp;
4426 	struct rcu_node *rnp;
4427 	bool newcpu;
4428 
4429 	lockdep_assert_irqs_disabled();
4430 	rdp = per_cpu_ptr(&rcu_data, cpu);
4431 	if (rdp->cpu_started)
4432 		return;
4433 	rdp->cpu_started = true;
4434 
4435 	rnp = rdp->mynode;
4436 	mask = rdp->grpmask;
4437 	arch_spin_lock(&rcu_state.ofl_lock);
4438 	rcu_dynticks_eqs_online();
4439 	raw_spin_lock(&rcu_state.barrier_lock);
4440 	raw_spin_lock_rcu_node(rnp);
4441 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4442 	raw_spin_unlock(&rcu_state.barrier_lock);
4443 	newcpu = !(rnp->expmaskinitnext & mask);
4444 	rnp->expmaskinitnext |= mask;
4445 	/* Allow lockless access for expedited grace periods. */
4446 	smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4447 	ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4448 	rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4449 	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4450 	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4451 
4452 	/* An incoming CPU should never be blocking a grace period. */
4453 	if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4454 		/* rcu_report_qs_rnp() *really* wants some flags to restore */
4455 		unsigned long flags;
4456 
4457 		local_irq_save(flags);
4458 		rcu_disable_urgency_upon_qs(rdp);
4459 		/* Report QS -after- changing ->qsmaskinitnext! */
4460 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4461 	} else {
4462 		raw_spin_unlock_rcu_node(rnp);
4463 	}
4464 	arch_spin_unlock(&rcu_state.ofl_lock);
4465 	smp_store_release(&rdp->beenonline, true);
4466 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4467 }
4468 
4469 /*
4470  * The outgoing function has no further need of RCU, so remove it from
4471  * the rcu_node tree's ->qsmaskinitnext bit masks.
4472  *
4473  * Note that this function is special in that it is invoked directly
4474  * from the outgoing CPU rather than from the cpuhp_step mechanism.
4475  * This is because this function must be invoked at a precise location.
4476  */
4477 void rcu_report_dead(unsigned int cpu)
4478 {
4479 	unsigned long flags, seq_flags;
4480 	unsigned long mask;
4481 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4482 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4483 
4484 	// Do any dangling deferred wakeups.
4485 	do_nocb_deferred_wakeup(rdp);
4486 
4487 	rcu_preempt_deferred_qs(current);
4488 
4489 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4490 	mask = rdp->grpmask;
4491 	local_irq_save(seq_flags);
4492 	arch_spin_lock(&rcu_state.ofl_lock);
4493 	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4494 	rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4495 	rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4496 	if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4497 		/* Report quiescent state -before- changing ->qsmaskinitnext! */
4498 		rcu_disable_urgency_upon_qs(rdp);
4499 		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4500 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
4501 	}
4502 	WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4503 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4504 	arch_spin_unlock(&rcu_state.ofl_lock);
4505 	local_irq_restore(seq_flags);
4506 
4507 	rdp->cpu_started = false;
4508 }
4509 
4510 #ifdef CONFIG_HOTPLUG_CPU
4511 /*
4512  * The outgoing CPU has just passed through the dying-idle state, and we
4513  * are being invoked from the CPU that was IPIed to continue the offline
4514  * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4515  */
4516 void rcutree_migrate_callbacks(int cpu)
4517 {
4518 	unsigned long flags;
4519 	struct rcu_data *my_rdp;
4520 	struct rcu_node *my_rnp;
4521 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4522 	bool needwake;
4523 
4524 	if (rcu_rdp_is_offloaded(rdp) ||
4525 	    rcu_segcblist_empty(&rdp->cblist))
4526 		return;  /* No callbacks to migrate. */
4527 
4528 	raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4529 	WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4530 	rcu_barrier_entrain(rdp);
4531 	my_rdp = this_cpu_ptr(&rcu_data);
4532 	my_rnp = my_rdp->mynode;
4533 	rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4534 	WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4535 	raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4536 	/* Leverage recent GPs and set GP for new callbacks. */
4537 	needwake = rcu_advance_cbs(my_rnp, rdp) ||
4538 		   rcu_advance_cbs(my_rnp, my_rdp);
4539 	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4540 	raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4541 	needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4542 	rcu_segcblist_disable(&rdp->cblist);
4543 	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4544 	check_cb_ovld_locked(my_rdp, my_rnp);
4545 	if (rcu_rdp_is_offloaded(my_rdp)) {
4546 		raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4547 		__call_rcu_nocb_wake(my_rdp, true, flags);
4548 	} else {
4549 		rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4550 		raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4551 	}
4552 	if (needwake)
4553 		rcu_gp_kthread_wake();
4554 	lockdep_assert_irqs_enabled();
4555 	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4556 		  !rcu_segcblist_empty(&rdp->cblist),
4557 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4558 		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4559 		  rcu_segcblist_first_cb(&rdp->cblist));
4560 }
4561 #endif
4562 
4563 /*
4564  * On non-huge systems, use expedited RCU grace periods to make suspend
4565  * and hibernation run faster.
4566  */
4567 static int rcu_pm_notify(struct notifier_block *self,
4568 			 unsigned long action, void *hcpu)
4569 {
4570 	switch (action) {
4571 	case PM_HIBERNATION_PREPARE:
4572 	case PM_SUSPEND_PREPARE:
4573 		rcu_async_hurry();
4574 		rcu_expedite_gp();
4575 		break;
4576 	case PM_POST_HIBERNATION:
4577 	case PM_POST_SUSPEND:
4578 		rcu_unexpedite_gp();
4579 		rcu_async_relax();
4580 		break;
4581 	default:
4582 		break;
4583 	}
4584 	return NOTIFY_OK;
4585 }
4586 
4587 #ifdef CONFIG_RCU_EXP_KTHREAD
4588 struct kthread_worker *rcu_exp_gp_kworker;
4589 struct kthread_worker *rcu_exp_par_gp_kworker;
4590 
4591 static void __init rcu_start_exp_gp_kworkers(void)
4592 {
4593 	const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4594 	const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4595 	struct sched_param param = { .sched_priority = kthread_prio };
4596 
4597 	rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4598 	if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4599 		pr_err("Failed to create %s!\n", gp_kworker_name);
4600 		return;
4601 	}
4602 
4603 	rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4604 	if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4605 		pr_err("Failed to create %s!\n", par_gp_kworker_name);
4606 		kthread_destroy_worker(rcu_exp_gp_kworker);
4607 		return;
4608 	}
4609 
4610 	sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, &param);
4611 	sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4612 				   &param);
4613 }
4614 
4615 static inline void rcu_alloc_par_gp_wq(void)
4616 {
4617 }
4618 #else /* !CONFIG_RCU_EXP_KTHREAD */
4619 struct workqueue_struct *rcu_par_gp_wq;
4620 
4621 static void __init rcu_start_exp_gp_kworkers(void)
4622 {
4623 }
4624 
4625 static inline void rcu_alloc_par_gp_wq(void)
4626 {
4627 	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4628 	WARN_ON(!rcu_par_gp_wq);
4629 }
4630 #endif /* CONFIG_RCU_EXP_KTHREAD */
4631 
4632 /*
4633  * Spawn the kthreads that handle RCU's grace periods.
4634  */
4635 static int __init rcu_spawn_gp_kthread(void)
4636 {
4637 	unsigned long flags;
4638 	struct rcu_node *rnp;
4639 	struct sched_param sp;
4640 	struct task_struct *t;
4641 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4642 
4643 	rcu_scheduler_fully_active = 1;
4644 	t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4645 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4646 		return 0;
4647 	if (kthread_prio) {
4648 		sp.sched_priority = kthread_prio;
4649 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4650 	}
4651 	rnp = rcu_get_root();
4652 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
4653 	WRITE_ONCE(rcu_state.gp_activity, jiffies);
4654 	WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4655 	// Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4656 	smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4657 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4658 	wake_up_process(t);
4659 	/* This is a pre-SMP initcall, we expect a single CPU */
4660 	WARN_ON(num_online_cpus() > 1);
4661 	/*
4662 	 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4663 	 * due to rcu_scheduler_fully_active.
4664 	 */
4665 	rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4666 	rcu_spawn_one_boost_kthread(rdp->mynode);
4667 	rcu_spawn_core_kthreads();
4668 	/* Create kthread worker for expedited GPs */
4669 	rcu_start_exp_gp_kworkers();
4670 	return 0;
4671 }
4672 early_initcall(rcu_spawn_gp_kthread);
4673 
4674 /*
4675  * This function is invoked towards the end of the scheduler's
4676  * initialization process.  Before this is called, the idle task might
4677  * contain synchronous grace-period primitives (during which time, this idle
4678  * task is booting the system, and such primitives are no-ops).  After this
4679  * function is called, any synchronous grace-period primitives are run as
4680  * expedited, with the requesting task driving the grace period forward.
4681  * A later core_initcall() rcu_set_runtime_mode() will switch to full
4682  * runtime RCU functionality.
4683  */
4684 void rcu_scheduler_starting(void)
4685 {
4686 	unsigned long flags;
4687 	struct rcu_node *rnp;
4688 
4689 	WARN_ON(num_online_cpus() != 1);
4690 	WARN_ON(nr_context_switches() > 0);
4691 	rcu_test_sync_prims();
4692 
4693 	// Fix up the ->gp_seq counters.
4694 	local_irq_save(flags);
4695 	rcu_for_each_node_breadth_first(rnp)
4696 		rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4697 	local_irq_restore(flags);
4698 
4699 	// Switch out of early boot mode.
4700 	rcu_scheduler_active = RCU_SCHEDULER_INIT;
4701 	rcu_test_sync_prims();
4702 }
4703 
4704 /*
4705  * Helper function for rcu_init() that initializes the rcu_state structure.
4706  */
4707 static void __init rcu_init_one(void)
4708 {
4709 	static const char * const buf[] = RCU_NODE_NAME_INIT;
4710 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
4711 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4712 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4713 
4714 	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
4715 	int cpustride = 1;
4716 	int i;
4717 	int j;
4718 	struct rcu_node *rnp;
4719 
4720 	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4721 
4722 	/* Silence gcc 4.8 false positive about array index out of range. */
4723 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4724 		panic("rcu_init_one: rcu_num_lvls out of range");
4725 
4726 	/* Initialize the level-tracking arrays. */
4727 
4728 	for (i = 1; i < rcu_num_lvls; i++)
4729 		rcu_state.level[i] =
4730 			rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4731 	rcu_init_levelspread(levelspread, num_rcu_lvl);
4732 
4733 	/* Initialize the elements themselves, starting from the leaves. */
4734 
4735 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
4736 		cpustride *= levelspread[i];
4737 		rnp = rcu_state.level[i];
4738 		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4739 			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4740 			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4741 						   &rcu_node_class[i], buf[i]);
4742 			raw_spin_lock_init(&rnp->fqslock);
4743 			lockdep_set_class_and_name(&rnp->fqslock,
4744 						   &rcu_fqs_class[i], fqs[i]);
4745 			rnp->gp_seq = rcu_state.gp_seq;
4746 			rnp->gp_seq_needed = rcu_state.gp_seq;
4747 			rnp->completedqs = rcu_state.gp_seq;
4748 			rnp->qsmask = 0;
4749 			rnp->qsmaskinit = 0;
4750 			rnp->grplo = j * cpustride;
4751 			rnp->grphi = (j + 1) * cpustride - 1;
4752 			if (rnp->grphi >= nr_cpu_ids)
4753 				rnp->grphi = nr_cpu_ids - 1;
4754 			if (i == 0) {
4755 				rnp->grpnum = 0;
4756 				rnp->grpmask = 0;
4757 				rnp->parent = NULL;
4758 			} else {
4759 				rnp->grpnum = j % levelspread[i - 1];
4760 				rnp->grpmask = BIT(rnp->grpnum);
4761 				rnp->parent = rcu_state.level[i - 1] +
4762 					      j / levelspread[i - 1];
4763 			}
4764 			rnp->level = i;
4765 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4766 			rcu_init_one_nocb(rnp);
4767 			init_waitqueue_head(&rnp->exp_wq[0]);
4768 			init_waitqueue_head(&rnp->exp_wq[1]);
4769 			init_waitqueue_head(&rnp->exp_wq[2]);
4770 			init_waitqueue_head(&rnp->exp_wq[3]);
4771 			spin_lock_init(&rnp->exp_lock);
4772 			mutex_init(&rnp->boost_kthread_mutex);
4773 			raw_spin_lock_init(&rnp->exp_poll_lock);
4774 			rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4775 			INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4776 		}
4777 	}
4778 
4779 	init_swait_queue_head(&rcu_state.gp_wq);
4780 	init_swait_queue_head(&rcu_state.expedited_wq);
4781 	rnp = rcu_first_leaf_node();
4782 	for_each_possible_cpu(i) {
4783 		while (i > rnp->grphi)
4784 			rnp++;
4785 		per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4786 		rcu_boot_init_percpu_data(i);
4787 	}
4788 }
4789 
4790 /*
4791  * Force priority from the kernel command-line into range.
4792  */
4793 static void __init sanitize_kthread_prio(void)
4794 {
4795 	int kthread_prio_in = kthread_prio;
4796 
4797 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4798 	    && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4799 		kthread_prio = 2;
4800 	else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4801 		kthread_prio = 1;
4802 	else if (kthread_prio < 0)
4803 		kthread_prio = 0;
4804 	else if (kthread_prio > 99)
4805 		kthread_prio = 99;
4806 
4807 	if (kthread_prio != kthread_prio_in)
4808 		pr_alert("%s: Limited prio to %d from %d\n",
4809 			 __func__, kthread_prio, kthread_prio_in);
4810 }
4811 
4812 /*
4813  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4814  * replace the definitions in tree.h because those are needed to size
4815  * the ->node array in the rcu_state structure.
4816  */
4817 void rcu_init_geometry(void)
4818 {
4819 	ulong d;
4820 	int i;
4821 	static unsigned long old_nr_cpu_ids;
4822 	int rcu_capacity[RCU_NUM_LVLS];
4823 	static bool initialized;
4824 
4825 	if (initialized) {
4826 		/*
4827 		 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4828 		 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4829 		 */
4830 		WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4831 		return;
4832 	}
4833 
4834 	old_nr_cpu_ids = nr_cpu_ids;
4835 	initialized = true;
4836 
4837 	/*
4838 	 * Initialize any unspecified boot parameters.
4839 	 * The default values of jiffies_till_first_fqs and
4840 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4841 	 * value, which is a function of HZ, then adding one for each
4842 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4843 	 */
4844 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4845 	if (jiffies_till_first_fqs == ULONG_MAX)
4846 		jiffies_till_first_fqs = d;
4847 	if (jiffies_till_next_fqs == ULONG_MAX)
4848 		jiffies_till_next_fqs = d;
4849 	adjust_jiffies_till_sched_qs();
4850 
4851 	/* If the compile-time values are accurate, just leave. */
4852 	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4853 	    nr_cpu_ids == NR_CPUS)
4854 		return;
4855 	pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4856 		rcu_fanout_leaf, nr_cpu_ids);
4857 
4858 	/*
4859 	 * The boot-time rcu_fanout_leaf parameter must be at least two
4860 	 * and cannot exceed the number of bits in the rcu_node masks.
4861 	 * Complain and fall back to the compile-time values if this
4862 	 * limit is exceeded.
4863 	 */
4864 	if (rcu_fanout_leaf < 2 ||
4865 	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4866 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4867 		WARN_ON(1);
4868 		return;
4869 	}
4870 
4871 	/*
4872 	 * Compute number of nodes that can be handled an rcu_node tree
4873 	 * with the given number of levels.
4874 	 */
4875 	rcu_capacity[0] = rcu_fanout_leaf;
4876 	for (i = 1; i < RCU_NUM_LVLS; i++)
4877 		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4878 
4879 	/*
4880 	 * The tree must be able to accommodate the configured number of CPUs.
4881 	 * If this limit is exceeded, fall back to the compile-time values.
4882 	 */
4883 	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4884 		rcu_fanout_leaf = RCU_FANOUT_LEAF;
4885 		WARN_ON(1);
4886 		return;
4887 	}
4888 
4889 	/* Calculate the number of levels in the tree. */
4890 	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4891 	}
4892 	rcu_num_lvls = i + 1;
4893 
4894 	/* Calculate the number of rcu_nodes at each level of the tree. */
4895 	for (i = 0; i < rcu_num_lvls; i++) {
4896 		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4897 		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4898 	}
4899 
4900 	/* Calculate the total number of rcu_node structures. */
4901 	rcu_num_nodes = 0;
4902 	for (i = 0; i < rcu_num_lvls; i++)
4903 		rcu_num_nodes += num_rcu_lvl[i];
4904 }
4905 
4906 /*
4907  * Dump out the structure of the rcu_node combining tree associated
4908  * with the rcu_state structure.
4909  */
4910 static void __init rcu_dump_rcu_node_tree(void)
4911 {
4912 	int level = 0;
4913 	struct rcu_node *rnp;
4914 
4915 	pr_info("rcu_node tree layout dump\n");
4916 	pr_info(" ");
4917 	rcu_for_each_node_breadth_first(rnp) {
4918 		if (rnp->level != level) {
4919 			pr_cont("\n");
4920 			pr_info(" ");
4921 			level = rnp->level;
4922 		}
4923 		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4924 	}
4925 	pr_cont("\n");
4926 }
4927 
4928 struct workqueue_struct *rcu_gp_wq;
4929 
4930 static void __init kfree_rcu_batch_init(void)
4931 {
4932 	int cpu;
4933 	int i, j;
4934 
4935 	/* Clamp it to [0:100] seconds interval. */
4936 	if (rcu_delay_page_cache_fill_msec < 0 ||
4937 		rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4938 
4939 		rcu_delay_page_cache_fill_msec =
4940 			clamp(rcu_delay_page_cache_fill_msec, 0,
4941 				(int) (100 * MSEC_PER_SEC));
4942 
4943 		pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4944 			rcu_delay_page_cache_fill_msec);
4945 	}
4946 
4947 	for_each_possible_cpu(cpu) {
4948 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4949 
4950 		for (i = 0; i < KFREE_N_BATCHES; i++) {
4951 			INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4952 			krcp->krw_arr[i].krcp = krcp;
4953 
4954 			for (j = 0; j < FREE_N_CHANNELS; j++)
4955 				INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
4956 		}
4957 
4958 		for (i = 0; i < FREE_N_CHANNELS; i++)
4959 			INIT_LIST_HEAD(&krcp->bulk_head[i]);
4960 
4961 		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4962 		INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4963 		krcp->initialized = true;
4964 	}
4965 	if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
4966 		pr_err("Failed to register kfree_rcu() shrinker!\n");
4967 }
4968 
4969 void __init rcu_init(void)
4970 {
4971 	int cpu = smp_processor_id();
4972 
4973 	rcu_early_boot_tests();
4974 
4975 	kfree_rcu_batch_init();
4976 	rcu_bootup_announce();
4977 	sanitize_kthread_prio();
4978 	rcu_init_geometry();
4979 	rcu_init_one();
4980 	if (dump_tree)
4981 		rcu_dump_rcu_node_tree();
4982 	if (use_softirq)
4983 		open_softirq(RCU_SOFTIRQ, rcu_core_si);
4984 
4985 	/*
4986 	 * We don't need protection against CPU-hotplug here because
4987 	 * this is called early in boot, before either interrupts
4988 	 * or the scheduler are operational.
4989 	 */
4990 	pm_notifier(rcu_pm_notify, 0);
4991 	WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4992 	rcutree_prepare_cpu(cpu);
4993 	rcu_cpu_starting(cpu);
4994 	rcutree_online_cpu(cpu);
4995 
4996 	/* Create workqueue for Tree SRCU and for expedited GPs. */
4997 	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4998 	WARN_ON(!rcu_gp_wq);
4999 	rcu_alloc_par_gp_wq();
5000 
5001 	/* Fill in default value for rcutree.qovld boot parameter. */
5002 	/* -After- the rcu_node ->lock fields are initialized! */
5003 	if (qovld < 0)
5004 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
5005 	else
5006 		qovld_calc = qovld;
5007 
5008 	// Kick-start in case any polled grace periods started early.
5009 	(void)start_poll_synchronize_rcu_expedited();
5010 
5011 	rcu_test_sync_prims();
5012 }
5013 
5014 #include "tree_stall.h"
5015 #include "tree_exp.h"
5016 #include "tree_nocb.h"
5017 #include "tree_plugin.h"
5018