xref: /openbmc/linux/kernel/rcu/tree.c (revision e6c81cce)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23  *
24  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26  *
27  * For detailed explanation of Read-Copy Update mechanism see -
28  *	Documentation/RCU
29  */
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp.h>
35 #include <linux/rcupdate.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/nmi.h>
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/export.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/module.h>
45 #include <linux/percpu.h>
46 #include <linux/notifier.h>
47 #include <linux/cpu.h>
48 #include <linux/mutex.h>
49 #include <linux/time.h>
50 #include <linux/kernel_stat.h>
51 #include <linux/wait.h>
52 #include <linux/kthread.h>
53 #include <linux/prefetch.h>
54 #include <linux/delay.h>
55 #include <linux/stop_machine.h>
56 #include <linux/random.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/suspend.h>
59 
60 #include "tree.h"
61 #include "rcu.h"
62 
63 MODULE_ALIAS("rcutree");
64 #ifdef MODULE_PARAM_PREFIX
65 #undef MODULE_PARAM_PREFIX
66 #endif
67 #define MODULE_PARAM_PREFIX "rcutree."
68 
69 /* Data structures. */
70 
71 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73 
74 /*
75  * In order to export the rcu_state name to the tracing tools, it
76  * needs to be added in the __tracepoint_string section.
77  * This requires defining a separate variable tp_<sname>_varname
78  * that points to the string being used, and this will allow
79  * the tracing userspace tools to be able to decipher the string
80  * address to the matching string.
81  */
82 #ifdef CONFIG_TRACING
83 # define DEFINE_RCU_TPS(sname) \
84 static char sname##_varname[] = #sname; \
85 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
86 # define RCU_STATE_NAME(sname) sname##_varname
87 #else
88 # define DEFINE_RCU_TPS(sname)
89 # define RCU_STATE_NAME(sname) __stringify(sname)
90 #endif
91 
92 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
93 DEFINE_RCU_TPS(sname) \
94 DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
95 struct rcu_state sname##_state = { \
96 	.level = { &sname##_state.node[0] }, \
97 	.rda = &sname##_data, \
98 	.call = cr, \
99 	.fqs_state = RCU_GP_IDLE, \
100 	.gpnum = 0UL - 300UL, \
101 	.completed = 0UL - 300UL, \
102 	.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
103 	.orphan_nxttail = &sname##_state.orphan_nxtlist, \
104 	.orphan_donetail = &sname##_state.orphan_donelist, \
105 	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
106 	.name = RCU_STATE_NAME(sname), \
107 	.abbr = sabbr, \
108 }
109 
110 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
112 
113 static struct rcu_state *rcu_state_p;
114 LIST_HEAD(rcu_struct_flavors);
115 
116 /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
117 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
118 module_param(rcu_fanout_leaf, int, 0444);
119 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
120 static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
121 	NUM_RCU_LVL_0,
122 	NUM_RCU_LVL_1,
123 	NUM_RCU_LVL_2,
124 	NUM_RCU_LVL_3,
125 	NUM_RCU_LVL_4,
126 };
127 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
128 
129 /*
130  * The rcu_scheduler_active variable transitions from zero to one just
131  * before the first task is spawned.  So when this variable is zero, RCU
132  * can assume that there is but one task, allowing RCU to (for example)
133  * optimize synchronize_sched() to a simple barrier().  When this variable
134  * is one, RCU must actually do all the hard work required to detect real
135  * grace periods.  This variable is also used to suppress boot-time false
136  * positives from lockdep-RCU error checking.
137  */
138 int rcu_scheduler_active __read_mostly;
139 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
140 
141 /*
142  * The rcu_scheduler_fully_active variable transitions from zero to one
143  * during the early_initcall() processing, which is after the scheduler
144  * is capable of creating new tasks.  So RCU processing (for example,
145  * creating tasks for RCU priority boosting) must be delayed until after
146  * rcu_scheduler_fully_active transitions from zero to one.  We also
147  * currently delay invocation of any RCU callbacks until after this point.
148  *
149  * It might later prove better for people registering RCU callbacks during
150  * early boot to take responsibility for these callbacks, but one step at
151  * a time.
152  */
153 static int rcu_scheduler_fully_active __read_mostly;
154 
155 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
156 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
157 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
158 static void invoke_rcu_core(void);
159 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
160 
161 /* rcuc/rcub kthread realtime priority */
162 static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
163 module_param(kthread_prio, int, 0644);
164 
165 /* Delay in jiffies for grace-period initialization delays. */
166 static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT)
167 				? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY
168 				: 0;
169 module_param(gp_init_delay, int, 0644);
170 
171 /*
172  * Track the rcutorture test sequence number and the update version
173  * number within a given test.  The rcutorture_testseq is incremented
174  * on every rcutorture module load and unload, so has an odd value
175  * when a test is running.  The rcutorture_vernum is set to zero
176  * when rcutorture starts and is incremented on each rcutorture update.
177  * These variables enable correlating rcutorture output with the
178  * RCU tracing information.
179  */
180 unsigned long rcutorture_testseq;
181 unsigned long rcutorture_vernum;
182 
183 /*
184  * Compute the mask of online CPUs for the specified rcu_node structure.
185  * This will not be stable unless the rcu_node structure's ->lock is
186  * held, but the bit corresponding to the current CPU will be stable
187  * in most contexts.
188  */
189 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
190 {
191 	return ACCESS_ONCE(rnp->qsmaskinitnext);
192 }
193 
194 /*
195  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
196  * permit this function to be invoked without holding the root rcu_node
197  * structure's ->lock, but of course results can be subject to change.
198  */
199 static int rcu_gp_in_progress(struct rcu_state *rsp)
200 {
201 	return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
202 }
203 
204 /*
205  * Note a quiescent state.  Because we do not need to know
206  * how many quiescent states passed, just if there was at least
207  * one since the start of the grace period, this just sets a flag.
208  * The caller must have disabled preemption.
209  */
210 void rcu_sched_qs(void)
211 {
212 	if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
213 		trace_rcu_grace_period(TPS("rcu_sched"),
214 				       __this_cpu_read(rcu_sched_data.gpnum),
215 				       TPS("cpuqs"));
216 		__this_cpu_write(rcu_sched_data.passed_quiesce, 1);
217 	}
218 }
219 
220 void rcu_bh_qs(void)
221 {
222 	if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
223 		trace_rcu_grace_period(TPS("rcu_bh"),
224 				       __this_cpu_read(rcu_bh_data.gpnum),
225 				       TPS("cpuqs"));
226 		__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
227 	}
228 }
229 
230 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
231 
232 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
233 	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
234 	.dynticks = ATOMIC_INIT(1),
235 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
236 	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
237 	.dynticks_idle = ATOMIC_INIT(1),
238 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
239 };
240 
241 DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
242 EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
243 
244 /*
245  * Let the RCU core know that this CPU has gone through the scheduler,
246  * which is a quiescent state.  This is called when the need for a
247  * quiescent state is urgent, so we burn an atomic operation and full
248  * memory barriers to let the RCU core know about it, regardless of what
249  * this CPU might (or might not) do in the near future.
250  *
251  * We inform the RCU core by emulating a zero-duration dyntick-idle
252  * period, which we in turn do by incrementing the ->dynticks counter
253  * by two.
254  */
255 static void rcu_momentary_dyntick_idle(void)
256 {
257 	unsigned long flags;
258 	struct rcu_data *rdp;
259 	struct rcu_dynticks *rdtp;
260 	int resched_mask;
261 	struct rcu_state *rsp;
262 
263 	local_irq_save(flags);
264 
265 	/*
266 	 * Yes, we can lose flag-setting operations.  This is OK, because
267 	 * the flag will be set again after some delay.
268 	 */
269 	resched_mask = raw_cpu_read(rcu_sched_qs_mask);
270 	raw_cpu_write(rcu_sched_qs_mask, 0);
271 
272 	/* Find the flavor that needs a quiescent state. */
273 	for_each_rcu_flavor(rsp) {
274 		rdp = raw_cpu_ptr(rsp->rda);
275 		if (!(resched_mask & rsp->flavor_mask))
276 			continue;
277 		smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
278 		if (ACCESS_ONCE(rdp->mynode->completed) !=
279 		    ACCESS_ONCE(rdp->cond_resched_completed))
280 			continue;
281 
282 		/*
283 		 * Pretend to be momentarily idle for the quiescent state.
284 		 * This allows the grace-period kthread to record the
285 		 * quiescent state, with no need for this CPU to do anything
286 		 * further.
287 		 */
288 		rdtp = this_cpu_ptr(&rcu_dynticks);
289 		smp_mb__before_atomic(); /* Earlier stuff before QS. */
290 		atomic_add(2, &rdtp->dynticks);  /* QS. */
291 		smp_mb__after_atomic(); /* Later stuff after QS. */
292 		break;
293 	}
294 	local_irq_restore(flags);
295 }
296 
297 /*
298  * Note a context switch.  This is a quiescent state for RCU-sched,
299  * and requires special handling for preemptible RCU.
300  * The caller must have disabled preemption.
301  */
302 void rcu_note_context_switch(void)
303 {
304 	trace_rcu_utilization(TPS("Start context switch"));
305 	rcu_sched_qs();
306 	rcu_preempt_note_context_switch();
307 	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
308 		rcu_momentary_dyntick_idle();
309 	trace_rcu_utilization(TPS("End context switch"));
310 }
311 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
312 
313 /*
314  * Register a quiescent state for all RCU flavors.  If there is an
315  * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
316  * dyntick-idle quiescent state visible to other CPUs (but only for those
317  * RCU flavors in desperate need of a quiescent state, which will normally
318  * be none of them).  Either way, do a lightweight quiescent state for
319  * all RCU flavors.
320  */
321 void rcu_all_qs(void)
322 {
323 	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
324 		rcu_momentary_dyntick_idle();
325 	this_cpu_inc(rcu_qs_ctr);
326 }
327 EXPORT_SYMBOL_GPL(rcu_all_qs);
328 
329 static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
330 static long qhimark = 10000;	/* If this many pending, ignore blimit. */
331 static long qlowmark = 100;	/* Once only this many pending, use blimit. */
332 
333 module_param(blimit, long, 0444);
334 module_param(qhimark, long, 0444);
335 module_param(qlowmark, long, 0444);
336 
337 static ulong jiffies_till_first_fqs = ULONG_MAX;
338 static ulong jiffies_till_next_fqs = ULONG_MAX;
339 
340 module_param(jiffies_till_first_fqs, ulong, 0644);
341 module_param(jiffies_till_next_fqs, ulong, 0644);
342 
343 /*
344  * How long the grace period must be before we start recruiting
345  * quiescent-state help from rcu_note_context_switch().
346  */
347 static ulong jiffies_till_sched_qs = HZ / 20;
348 module_param(jiffies_till_sched_qs, ulong, 0644);
349 
350 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
351 				  struct rcu_data *rdp);
352 static void force_qs_rnp(struct rcu_state *rsp,
353 			 int (*f)(struct rcu_data *rsp, bool *isidle,
354 				  unsigned long *maxj),
355 			 bool *isidle, unsigned long *maxj);
356 static void force_quiescent_state(struct rcu_state *rsp);
357 static int rcu_pending(void);
358 
359 /*
360  * Return the number of RCU batches started thus far for debug & stats.
361  */
362 unsigned long rcu_batches_started(void)
363 {
364 	return rcu_state_p->gpnum;
365 }
366 EXPORT_SYMBOL_GPL(rcu_batches_started);
367 
368 /*
369  * Return the number of RCU-sched batches started thus far for debug & stats.
370  */
371 unsigned long rcu_batches_started_sched(void)
372 {
373 	return rcu_sched_state.gpnum;
374 }
375 EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
376 
377 /*
378  * Return the number of RCU BH batches started thus far for debug & stats.
379  */
380 unsigned long rcu_batches_started_bh(void)
381 {
382 	return rcu_bh_state.gpnum;
383 }
384 EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
385 
386 /*
387  * Return the number of RCU batches completed thus far for debug & stats.
388  */
389 unsigned long rcu_batches_completed(void)
390 {
391 	return rcu_state_p->completed;
392 }
393 EXPORT_SYMBOL_GPL(rcu_batches_completed);
394 
395 /*
396  * Return the number of RCU-sched batches completed thus far for debug & stats.
397  */
398 unsigned long rcu_batches_completed_sched(void)
399 {
400 	return rcu_sched_state.completed;
401 }
402 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
403 
404 /*
405  * Return the number of RCU BH batches completed thus far for debug & stats.
406  */
407 unsigned long rcu_batches_completed_bh(void)
408 {
409 	return rcu_bh_state.completed;
410 }
411 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
412 
413 /*
414  * Force a quiescent state.
415  */
416 void rcu_force_quiescent_state(void)
417 {
418 	force_quiescent_state(rcu_state_p);
419 }
420 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
421 
422 /*
423  * Force a quiescent state for RCU BH.
424  */
425 void rcu_bh_force_quiescent_state(void)
426 {
427 	force_quiescent_state(&rcu_bh_state);
428 }
429 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
430 
431 /*
432  * Force a quiescent state for RCU-sched.
433  */
434 void rcu_sched_force_quiescent_state(void)
435 {
436 	force_quiescent_state(&rcu_sched_state);
437 }
438 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
439 
440 /*
441  * Show the state of the grace-period kthreads.
442  */
443 void show_rcu_gp_kthreads(void)
444 {
445 	struct rcu_state *rsp;
446 
447 	for_each_rcu_flavor(rsp) {
448 		pr_info("%s: wait state: %d ->state: %#lx\n",
449 			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
450 		/* sched_show_task(rsp->gp_kthread); */
451 	}
452 }
453 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
454 
455 /*
456  * Record the number of times rcutorture tests have been initiated and
457  * terminated.  This information allows the debugfs tracing stats to be
458  * correlated to the rcutorture messages, even when the rcutorture module
459  * is being repeatedly loaded and unloaded.  In other words, we cannot
460  * store this state in rcutorture itself.
461  */
462 void rcutorture_record_test_transition(void)
463 {
464 	rcutorture_testseq++;
465 	rcutorture_vernum = 0;
466 }
467 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
468 
469 /*
470  * Send along grace-period-related data for rcutorture diagnostics.
471  */
472 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
473 			    unsigned long *gpnum, unsigned long *completed)
474 {
475 	struct rcu_state *rsp = NULL;
476 
477 	switch (test_type) {
478 	case RCU_FLAVOR:
479 		rsp = rcu_state_p;
480 		break;
481 	case RCU_BH_FLAVOR:
482 		rsp = &rcu_bh_state;
483 		break;
484 	case RCU_SCHED_FLAVOR:
485 		rsp = &rcu_sched_state;
486 		break;
487 	default:
488 		break;
489 	}
490 	if (rsp != NULL) {
491 		*flags = ACCESS_ONCE(rsp->gp_flags);
492 		*gpnum = ACCESS_ONCE(rsp->gpnum);
493 		*completed = ACCESS_ONCE(rsp->completed);
494 		return;
495 	}
496 	*flags = 0;
497 	*gpnum = 0;
498 	*completed = 0;
499 }
500 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
501 
502 /*
503  * Record the number of writer passes through the current rcutorture test.
504  * This is also used to correlate debugfs tracing stats with the rcutorture
505  * messages.
506  */
507 void rcutorture_record_progress(unsigned long vernum)
508 {
509 	rcutorture_vernum++;
510 }
511 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
512 
513 /*
514  * Does the CPU have callbacks ready to be invoked?
515  */
516 static int
517 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
518 {
519 	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
520 	       rdp->nxttail[RCU_DONE_TAIL] != NULL;
521 }
522 
523 /*
524  * Return the root node of the specified rcu_state structure.
525  */
526 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
527 {
528 	return &rsp->node[0];
529 }
530 
531 /*
532  * Is there any need for future grace periods?
533  * Interrupts must be disabled.  If the caller does not hold the root
534  * rnp_node structure's ->lock, the results are advisory only.
535  */
536 static int rcu_future_needs_gp(struct rcu_state *rsp)
537 {
538 	struct rcu_node *rnp = rcu_get_root(rsp);
539 	int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1;
540 	int *fp = &rnp->need_future_gp[idx];
541 
542 	return ACCESS_ONCE(*fp);
543 }
544 
545 /*
546  * Does the current CPU require a not-yet-started grace period?
547  * The caller must have disabled interrupts to prevent races with
548  * normal callback registry.
549  */
550 static int
551 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
552 {
553 	int i;
554 
555 	if (rcu_gp_in_progress(rsp))
556 		return 0;  /* No, a grace period is already in progress. */
557 	if (rcu_future_needs_gp(rsp))
558 		return 1;  /* Yes, a no-CBs CPU needs one. */
559 	if (!rdp->nxttail[RCU_NEXT_TAIL])
560 		return 0;  /* No, this is a no-CBs (or offline) CPU. */
561 	if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
562 		return 1;  /* Yes, this CPU has newly registered callbacks. */
563 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
564 		if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
565 		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
566 				 rdp->nxtcompleted[i]))
567 			return 1;  /* Yes, CBs for future grace period. */
568 	return 0; /* No grace period needed. */
569 }
570 
571 /*
572  * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
573  *
574  * If the new value of the ->dynticks_nesting counter now is zero,
575  * we really have entered idle, and must do the appropriate accounting.
576  * The caller must have disabled interrupts.
577  */
578 static void rcu_eqs_enter_common(long long oldval, bool user)
579 {
580 	struct rcu_state *rsp;
581 	struct rcu_data *rdp;
582 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
583 
584 	trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
585 	if (!user && !is_idle_task(current)) {
586 		struct task_struct *idle __maybe_unused =
587 			idle_task(smp_processor_id());
588 
589 		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
590 		ftrace_dump(DUMP_ORIG);
591 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
592 			  current->pid, current->comm,
593 			  idle->pid, idle->comm); /* must be idle task! */
594 	}
595 	for_each_rcu_flavor(rsp) {
596 		rdp = this_cpu_ptr(rsp->rda);
597 		do_nocb_deferred_wakeup(rdp);
598 	}
599 	rcu_prepare_for_idle();
600 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
601 	smp_mb__before_atomic();  /* See above. */
602 	atomic_inc(&rdtp->dynticks);
603 	smp_mb__after_atomic();  /* Force ordering with next sojourn. */
604 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
605 	rcu_dynticks_task_enter();
606 
607 	/*
608 	 * It is illegal to enter an extended quiescent state while
609 	 * in an RCU read-side critical section.
610 	 */
611 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
612 			   "Illegal idle entry in RCU read-side critical section.");
613 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
614 			   "Illegal idle entry in RCU-bh read-side critical section.");
615 	rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
616 			   "Illegal idle entry in RCU-sched read-side critical section.");
617 }
618 
619 /*
620  * Enter an RCU extended quiescent state, which can be either the
621  * idle loop or adaptive-tickless usermode execution.
622  */
623 static void rcu_eqs_enter(bool user)
624 {
625 	long long oldval;
626 	struct rcu_dynticks *rdtp;
627 
628 	rdtp = this_cpu_ptr(&rcu_dynticks);
629 	oldval = rdtp->dynticks_nesting;
630 	WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
631 	if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
632 		rdtp->dynticks_nesting = 0;
633 		rcu_eqs_enter_common(oldval, user);
634 	} else {
635 		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
636 	}
637 }
638 
639 /**
640  * rcu_idle_enter - inform RCU that current CPU is entering idle
641  *
642  * Enter idle mode, in other words, -leave- the mode in which RCU
643  * read-side critical sections can occur.  (Though RCU read-side
644  * critical sections can occur in irq handlers in idle, a possibility
645  * handled by irq_enter() and irq_exit().)
646  *
647  * We crowbar the ->dynticks_nesting field to zero to allow for
648  * the possibility of usermode upcalls having messed up our count
649  * of interrupt nesting level during the prior busy period.
650  */
651 void rcu_idle_enter(void)
652 {
653 	unsigned long flags;
654 
655 	local_irq_save(flags);
656 	rcu_eqs_enter(false);
657 	rcu_sysidle_enter(0);
658 	local_irq_restore(flags);
659 }
660 EXPORT_SYMBOL_GPL(rcu_idle_enter);
661 
662 #ifdef CONFIG_RCU_USER_QS
663 /**
664  * rcu_user_enter - inform RCU that we are resuming userspace.
665  *
666  * Enter RCU idle mode right before resuming userspace.  No use of RCU
667  * is permitted between this call and rcu_user_exit(). This way the
668  * CPU doesn't need to maintain the tick for RCU maintenance purposes
669  * when the CPU runs in userspace.
670  */
671 void rcu_user_enter(void)
672 {
673 	rcu_eqs_enter(1);
674 }
675 #endif /* CONFIG_RCU_USER_QS */
676 
677 /**
678  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
679  *
680  * Exit from an interrupt handler, which might possibly result in entering
681  * idle mode, in other words, leaving the mode in which read-side critical
682  * sections can occur.
683  *
684  * This code assumes that the idle loop never does anything that might
685  * result in unbalanced calls to irq_enter() and irq_exit().  If your
686  * architecture violates this assumption, RCU will give you what you
687  * deserve, good and hard.  But very infrequently and irreproducibly.
688  *
689  * Use things like work queues to work around this limitation.
690  *
691  * You have been warned.
692  */
693 void rcu_irq_exit(void)
694 {
695 	unsigned long flags;
696 	long long oldval;
697 	struct rcu_dynticks *rdtp;
698 
699 	local_irq_save(flags);
700 	rdtp = this_cpu_ptr(&rcu_dynticks);
701 	oldval = rdtp->dynticks_nesting;
702 	rdtp->dynticks_nesting--;
703 	WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
704 	if (rdtp->dynticks_nesting)
705 		trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
706 	else
707 		rcu_eqs_enter_common(oldval, true);
708 	rcu_sysidle_enter(1);
709 	local_irq_restore(flags);
710 }
711 
712 /*
713  * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
714  *
715  * If the new value of the ->dynticks_nesting counter was previously zero,
716  * we really have exited idle, and must do the appropriate accounting.
717  * The caller must have disabled interrupts.
718  */
719 static void rcu_eqs_exit_common(long long oldval, int user)
720 {
721 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
722 
723 	rcu_dynticks_task_exit();
724 	smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
725 	atomic_inc(&rdtp->dynticks);
726 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
727 	smp_mb__after_atomic();  /* See above. */
728 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
729 	rcu_cleanup_after_idle();
730 	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
731 	if (!user && !is_idle_task(current)) {
732 		struct task_struct *idle __maybe_unused =
733 			idle_task(smp_processor_id());
734 
735 		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
736 				  oldval, rdtp->dynticks_nesting);
737 		ftrace_dump(DUMP_ORIG);
738 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
739 			  current->pid, current->comm,
740 			  idle->pid, idle->comm); /* must be idle task! */
741 	}
742 }
743 
744 /*
745  * Exit an RCU extended quiescent state, which can be either the
746  * idle loop or adaptive-tickless usermode execution.
747  */
748 static void rcu_eqs_exit(bool user)
749 {
750 	struct rcu_dynticks *rdtp;
751 	long long oldval;
752 
753 	rdtp = this_cpu_ptr(&rcu_dynticks);
754 	oldval = rdtp->dynticks_nesting;
755 	WARN_ON_ONCE(oldval < 0);
756 	if (oldval & DYNTICK_TASK_NEST_MASK) {
757 		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
758 	} else {
759 		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
760 		rcu_eqs_exit_common(oldval, user);
761 	}
762 }
763 
764 /**
765  * rcu_idle_exit - inform RCU that current CPU is leaving idle
766  *
767  * Exit idle mode, in other words, -enter- the mode in which RCU
768  * read-side critical sections can occur.
769  *
770  * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
771  * allow for the possibility of usermode upcalls messing up our count
772  * of interrupt nesting level during the busy period that is just
773  * now starting.
774  */
775 void rcu_idle_exit(void)
776 {
777 	unsigned long flags;
778 
779 	local_irq_save(flags);
780 	rcu_eqs_exit(false);
781 	rcu_sysidle_exit(0);
782 	local_irq_restore(flags);
783 }
784 EXPORT_SYMBOL_GPL(rcu_idle_exit);
785 
786 #ifdef CONFIG_RCU_USER_QS
787 /**
788  * rcu_user_exit - inform RCU that we are exiting userspace.
789  *
790  * Exit RCU idle mode while entering the kernel because it can
791  * run a RCU read side critical section anytime.
792  */
793 void rcu_user_exit(void)
794 {
795 	rcu_eqs_exit(1);
796 }
797 #endif /* CONFIG_RCU_USER_QS */
798 
799 /**
800  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
801  *
802  * Enter an interrupt handler, which might possibly result in exiting
803  * idle mode, in other words, entering the mode in which read-side critical
804  * sections can occur.
805  *
806  * Note that the Linux kernel is fully capable of entering an interrupt
807  * handler that it never exits, for example when doing upcalls to
808  * user mode!  This code assumes that the idle loop never does upcalls to
809  * user mode.  If your architecture does do upcalls from the idle loop (or
810  * does anything else that results in unbalanced calls to the irq_enter()
811  * and irq_exit() functions), RCU will give you what you deserve, good
812  * and hard.  But very infrequently and irreproducibly.
813  *
814  * Use things like work queues to work around this limitation.
815  *
816  * You have been warned.
817  */
818 void rcu_irq_enter(void)
819 {
820 	unsigned long flags;
821 	struct rcu_dynticks *rdtp;
822 	long long oldval;
823 
824 	local_irq_save(flags);
825 	rdtp = this_cpu_ptr(&rcu_dynticks);
826 	oldval = rdtp->dynticks_nesting;
827 	rdtp->dynticks_nesting++;
828 	WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
829 	if (oldval)
830 		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
831 	else
832 		rcu_eqs_exit_common(oldval, true);
833 	rcu_sysidle_exit(1);
834 	local_irq_restore(flags);
835 }
836 
837 /**
838  * rcu_nmi_enter - inform RCU of entry to NMI context
839  *
840  * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
841  * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
842  * that the CPU is active.  This implementation permits nested NMIs, as
843  * long as the nesting level does not overflow an int.  (You will probably
844  * run out of stack space first.)
845  */
846 void rcu_nmi_enter(void)
847 {
848 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
849 	int incby = 2;
850 
851 	/* Complain about underflow. */
852 	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
853 
854 	/*
855 	 * If idle from RCU viewpoint, atomically increment ->dynticks
856 	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
857 	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
858 	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
859 	 * to be in the outermost NMI handler that interrupted an RCU-idle
860 	 * period (observation due to Andy Lutomirski).
861 	 */
862 	if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
863 		smp_mb__before_atomic();  /* Force delay from prior write. */
864 		atomic_inc(&rdtp->dynticks);
865 		/* atomic_inc() before later RCU read-side crit sects */
866 		smp_mb__after_atomic();  /* See above. */
867 		WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
868 		incby = 1;
869 	}
870 	rdtp->dynticks_nmi_nesting += incby;
871 	barrier();
872 }
873 
874 /**
875  * rcu_nmi_exit - inform RCU of exit from NMI context
876  *
877  * If we are returning from the outermost NMI handler that interrupted an
878  * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
879  * to let the RCU grace-period handling know that the CPU is back to
880  * being RCU-idle.
881  */
882 void rcu_nmi_exit(void)
883 {
884 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
885 
886 	/*
887 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
888 	 * (We are exiting an NMI handler, so RCU better be paying attention
889 	 * to us!)
890 	 */
891 	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
892 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
893 
894 	/*
895 	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
896 	 * leave it in non-RCU-idle state.
897 	 */
898 	if (rdtp->dynticks_nmi_nesting != 1) {
899 		rdtp->dynticks_nmi_nesting -= 2;
900 		return;
901 	}
902 
903 	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
904 	rdtp->dynticks_nmi_nesting = 0;
905 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
906 	smp_mb__before_atomic();  /* See above. */
907 	atomic_inc(&rdtp->dynticks);
908 	smp_mb__after_atomic();  /* Force delay to next write. */
909 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
910 }
911 
912 /**
913  * __rcu_is_watching - are RCU read-side critical sections safe?
914  *
915  * Return true if RCU is watching the running CPU, which means that
916  * this CPU can safely enter RCU read-side critical sections.  Unlike
917  * rcu_is_watching(), the caller of __rcu_is_watching() must have at
918  * least disabled preemption.
919  */
920 bool notrace __rcu_is_watching(void)
921 {
922 	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
923 }
924 
925 /**
926  * rcu_is_watching - see if RCU thinks that the current CPU is idle
927  *
928  * If the current CPU is in its idle loop and is neither in an interrupt
929  * or NMI handler, return true.
930  */
931 bool notrace rcu_is_watching(void)
932 {
933 	bool ret;
934 
935 	preempt_disable();
936 	ret = __rcu_is_watching();
937 	preempt_enable();
938 	return ret;
939 }
940 EXPORT_SYMBOL_GPL(rcu_is_watching);
941 
942 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
943 
944 /*
945  * Is the current CPU online?  Disable preemption to avoid false positives
946  * that could otherwise happen due to the current CPU number being sampled,
947  * this task being preempted, its old CPU being taken offline, resuming
948  * on some other CPU, then determining that its old CPU is now offline.
949  * It is OK to use RCU on an offline processor during initial boot, hence
950  * the check for rcu_scheduler_fully_active.  Note also that it is OK
951  * for a CPU coming online to use RCU for one jiffy prior to marking itself
952  * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
953  * offline to continue to use RCU for one jiffy after marking itself
954  * offline in the cpu_online_mask.  This leniency is necessary given the
955  * non-atomic nature of the online and offline processing, for example,
956  * the fact that a CPU enters the scheduler after completing the CPU_DYING
957  * notifiers.
958  *
959  * This is also why RCU internally marks CPUs online during the
960  * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
961  *
962  * Disable checking if in an NMI handler because we cannot safely report
963  * errors from NMI handlers anyway.
964  */
965 bool rcu_lockdep_current_cpu_online(void)
966 {
967 	struct rcu_data *rdp;
968 	struct rcu_node *rnp;
969 	bool ret;
970 
971 	if (in_nmi())
972 		return true;
973 	preempt_disable();
974 	rdp = this_cpu_ptr(&rcu_sched_data);
975 	rnp = rdp->mynode;
976 	ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
977 	      !rcu_scheduler_fully_active;
978 	preempt_enable();
979 	return ret;
980 }
981 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
982 
983 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
984 
985 /**
986  * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
987  *
988  * If the current CPU is idle or running at a first-level (not nested)
989  * interrupt from idle, return true.  The caller must have at least
990  * disabled preemption.
991  */
992 static int rcu_is_cpu_rrupt_from_idle(void)
993 {
994 	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
995 }
996 
997 /*
998  * Snapshot the specified CPU's dynticks counter so that we can later
999  * credit them with an implicit quiescent state.  Return 1 if this CPU
1000  * is in dynticks idle mode, which is an extended quiescent state.
1001  */
1002 static int dyntick_save_progress_counter(struct rcu_data *rdp,
1003 					 bool *isidle, unsigned long *maxj)
1004 {
1005 	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
1006 	rcu_sysidle_check_cpu(rdp, isidle, maxj);
1007 	if ((rdp->dynticks_snap & 0x1) == 0) {
1008 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1009 		return 1;
1010 	} else {
1011 		if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1012 				 rdp->mynode->gpnum))
1013 			ACCESS_ONCE(rdp->gpwrap) = true;
1014 		return 0;
1015 	}
1016 }
1017 
1018 /*
1019  * Return true if the specified CPU has passed through a quiescent
1020  * state by virtue of being in or having passed through an dynticks
1021  * idle state since the last call to dyntick_save_progress_counter()
1022  * for this same CPU, or by virtue of having been offline.
1023  */
1024 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1025 				    bool *isidle, unsigned long *maxj)
1026 {
1027 	unsigned int curr;
1028 	int *rcrmp;
1029 	unsigned int snap;
1030 
1031 	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
1032 	snap = (unsigned int)rdp->dynticks_snap;
1033 
1034 	/*
1035 	 * If the CPU passed through or entered a dynticks idle phase with
1036 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
1037 	 * already acknowledged the request to pass through a quiescent
1038 	 * state.  Either way, that CPU cannot possibly be in an RCU
1039 	 * read-side critical section that started before the beginning
1040 	 * of the current RCU grace period.
1041 	 */
1042 	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
1043 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1044 		rdp->dynticks_fqs++;
1045 		return 1;
1046 	}
1047 
1048 	/*
1049 	 * Check for the CPU being offline, but only if the grace period
1050 	 * is old enough.  We don't need to worry about the CPU changing
1051 	 * state: If we see it offline even once, it has been through a
1052 	 * quiescent state.
1053 	 *
1054 	 * The reason for insisting that the grace period be at least
1055 	 * one jiffy old is that CPUs that are not quite online and that
1056 	 * have just gone offline can still execute RCU read-side critical
1057 	 * sections.
1058 	 */
1059 	if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
1060 		return 0;  /* Grace period is not old enough. */
1061 	barrier();
1062 	if (cpu_is_offline(rdp->cpu)) {
1063 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1064 		rdp->offline_fqs++;
1065 		return 1;
1066 	}
1067 
1068 	/*
1069 	 * A CPU running for an extended time within the kernel can
1070 	 * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
1071 	 * even context-switching back and forth between a pair of
1072 	 * in-kernel CPU-bound tasks cannot advance grace periods.
1073 	 * So if the grace period is old enough, make the CPU pay attention.
1074 	 * Note that the unsynchronized assignments to the per-CPU
1075 	 * rcu_sched_qs_mask variable are safe.  Yes, setting of
1076 	 * bits can be lost, but they will be set again on the next
1077 	 * force-quiescent-state pass.  So lost bit sets do not result
1078 	 * in incorrect behavior, merely in a grace period lasting
1079 	 * a few jiffies longer than it might otherwise.  Because
1080 	 * there are at most four threads involved, and because the
1081 	 * updates are only once every few jiffies, the probability of
1082 	 * lossage (and thus of slight grace-period extension) is
1083 	 * quite low.
1084 	 *
1085 	 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
1086 	 * is set too high, we override with half of the RCU CPU stall
1087 	 * warning delay.
1088 	 */
1089 	rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
1090 	if (ULONG_CMP_GE(jiffies,
1091 			 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
1092 	    ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1093 		if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
1094 			ACCESS_ONCE(rdp->cond_resched_completed) =
1095 				ACCESS_ONCE(rdp->mynode->completed);
1096 			smp_mb(); /* ->cond_resched_completed before *rcrmp. */
1097 			ACCESS_ONCE(*rcrmp) =
1098 				ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
1099 			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
1100 			rdp->rsp->jiffies_resched += 5; /* Enable beating. */
1101 		} else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1102 			/* Time to beat on that CPU again! */
1103 			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
1104 			rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
1105 		}
1106 	}
1107 
1108 	return 0;
1109 }
1110 
1111 static void record_gp_stall_check_time(struct rcu_state *rsp)
1112 {
1113 	unsigned long j = jiffies;
1114 	unsigned long j1;
1115 
1116 	rsp->gp_start = j;
1117 	smp_wmb(); /* Record start time before stall time. */
1118 	j1 = rcu_jiffies_till_stall_check();
1119 	ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
1120 	rsp->jiffies_resched = j + j1 / 2;
1121 	rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
1122 }
1123 
1124 /*
1125  * Complain about starvation of grace-period kthread.
1126  */
1127 static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1128 {
1129 	unsigned long gpa;
1130 	unsigned long j;
1131 
1132 	j = jiffies;
1133 	gpa = ACCESS_ONCE(rsp->gp_activity);
1134 	if (j - gpa > 2 * HZ)
1135 		pr_err("%s kthread starved for %ld jiffies!\n",
1136 		       rsp->name, j - gpa);
1137 }
1138 
1139 /*
1140  * Dump stacks of all tasks running on stalled CPUs.
1141  */
1142 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1143 {
1144 	int cpu;
1145 	unsigned long flags;
1146 	struct rcu_node *rnp;
1147 
1148 	rcu_for_each_leaf_node(rsp, rnp) {
1149 		raw_spin_lock_irqsave(&rnp->lock, flags);
1150 		if (rnp->qsmask != 0) {
1151 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1152 				if (rnp->qsmask & (1UL << cpu))
1153 					dump_cpu_task(rnp->grplo + cpu);
1154 		}
1155 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1156 	}
1157 }
1158 
1159 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1160 {
1161 	int cpu;
1162 	long delta;
1163 	unsigned long flags;
1164 	unsigned long gpa;
1165 	unsigned long j;
1166 	int ndetected = 0;
1167 	struct rcu_node *rnp = rcu_get_root(rsp);
1168 	long totqlen = 0;
1169 
1170 	/* Only let one CPU complain about others per time interval. */
1171 
1172 	raw_spin_lock_irqsave(&rnp->lock, flags);
1173 	delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
1174 	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1175 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1176 		return;
1177 	}
1178 	ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
1179 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1180 
1181 	/*
1182 	 * OK, time to rat on our buddy...
1183 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
1184 	 * RCU CPU stall warnings.
1185 	 */
1186 	pr_err("INFO: %s detected stalls on CPUs/tasks:",
1187 	       rsp->name);
1188 	print_cpu_stall_info_begin();
1189 	rcu_for_each_leaf_node(rsp, rnp) {
1190 		raw_spin_lock_irqsave(&rnp->lock, flags);
1191 		ndetected += rcu_print_task_stall(rnp);
1192 		if (rnp->qsmask != 0) {
1193 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1194 				if (rnp->qsmask & (1UL << cpu)) {
1195 					print_cpu_stall_info(rsp,
1196 							     rnp->grplo + cpu);
1197 					ndetected++;
1198 				}
1199 		}
1200 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1201 	}
1202 
1203 	print_cpu_stall_info_end();
1204 	for_each_possible_cpu(cpu)
1205 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1206 	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1207 	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
1208 	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
1209 	if (ndetected) {
1210 		rcu_dump_cpu_stacks(rsp);
1211 	} else {
1212 		if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
1213 		    ACCESS_ONCE(rsp->completed) == gpnum) {
1214 			pr_err("INFO: Stall ended before state dump start\n");
1215 		} else {
1216 			j = jiffies;
1217 			gpa = ACCESS_ONCE(rsp->gp_activity);
1218 			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1219 			       rsp->name, j - gpa, j, gpa,
1220 			       jiffies_till_next_fqs,
1221 			       rcu_get_root(rsp)->qsmask);
1222 			/* In this case, the current CPU might be at fault. */
1223 			sched_show_task(current);
1224 		}
1225 	}
1226 
1227 	/* Complain about tasks blocking the grace period. */
1228 	rcu_print_detail_task_stall(rsp);
1229 
1230 	rcu_check_gp_kthread_starvation(rsp);
1231 
1232 	force_quiescent_state(rsp);  /* Kick them all. */
1233 }
1234 
1235 static void print_cpu_stall(struct rcu_state *rsp)
1236 {
1237 	int cpu;
1238 	unsigned long flags;
1239 	struct rcu_node *rnp = rcu_get_root(rsp);
1240 	long totqlen = 0;
1241 
1242 	/*
1243 	 * OK, time to rat on ourselves...
1244 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
1245 	 * RCU CPU stall warnings.
1246 	 */
1247 	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1248 	print_cpu_stall_info_begin();
1249 	print_cpu_stall_info(rsp, smp_processor_id());
1250 	print_cpu_stall_info_end();
1251 	for_each_possible_cpu(cpu)
1252 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1253 	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1254 		jiffies - rsp->gp_start,
1255 		(long)rsp->gpnum, (long)rsp->completed, totqlen);
1256 
1257 	rcu_check_gp_kthread_starvation(rsp);
1258 
1259 	rcu_dump_cpu_stacks(rsp);
1260 
1261 	raw_spin_lock_irqsave(&rnp->lock, flags);
1262 	if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
1263 		ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
1264 				     3 * rcu_jiffies_till_stall_check() + 3;
1265 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1266 
1267 	/*
1268 	 * Attempt to revive the RCU machinery by forcing a context switch.
1269 	 *
1270 	 * A context switch would normally allow the RCU state machine to make
1271 	 * progress and it could be we're stuck in kernel space without context
1272 	 * switches for an entirely unreasonable amount of time.
1273 	 */
1274 	resched_cpu(smp_processor_id());
1275 }
1276 
1277 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1278 {
1279 	unsigned long completed;
1280 	unsigned long gpnum;
1281 	unsigned long gps;
1282 	unsigned long j;
1283 	unsigned long js;
1284 	struct rcu_node *rnp;
1285 
1286 	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1287 		return;
1288 	j = jiffies;
1289 
1290 	/*
1291 	 * Lots of memory barriers to reject false positives.
1292 	 *
1293 	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1294 	 * then rsp->gp_start, and finally rsp->completed.  These values
1295 	 * are updated in the opposite order with memory barriers (or
1296 	 * equivalent) during grace-period initialization and cleanup.
1297 	 * Now, a false positive can occur if we get an new value of
1298 	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
1299 	 * the memory barriers, the only way that this can happen is if one
1300 	 * grace period ends and another starts between these two fetches.
1301 	 * Detect this by comparing rsp->completed with the previous fetch
1302 	 * from rsp->gpnum.
1303 	 *
1304 	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1305 	 * and rsp->gp_start suffice to forestall false positives.
1306 	 */
1307 	gpnum = ACCESS_ONCE(rsp->gpnum);
1308 	smp_rmb(); /* Pick up ->gpnum first... */
1309 	js = ACCESS_ONCE(rsp->jiffies_stall);
1310 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1311 	gps = ACCESS_ONCE(rsp->gp_start);
1312 	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1313 	completed = ACCESS_ONCE(rsp->completed);
1314 	if (ULONG_CMP_GE(completed, gpnum) ||
1315 	    ULONG_CMP_LT(j, js) ||
1316 	    ULONG_CMP_GE(gps, js))
1317 		return; /* No stall or GP completed since entering function. */
1318 	rnp = rdp->mynode;
1319 	if (rcu_gp_in_progress(rsp) &&
1320 	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
1321 
1322 		/* We haven't checked in, so go dump stack. */
1323 		print_cpu_stall(rsp);
1324 
1325 	} else if (rcu_gp_in_progress(rsp) &&
1326 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1327 
1328 		/* They had a few time units to dump stack, so complain. */
1329 		print_other_cpu_stall(rsp, gpnum);
1330 	}
1331 }
1332 
1333 /**
1334  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1335  *
1336  * Set the stall-warning timeout way off into the future, thus preventing
1337  * any RCU CPU stall-warning messages from appearing in the current set of
1338  * RCU grace periods.
1339  *
1340  * The caller must disable hard irqs.
1341  */
1342 void rcu_cpu_stall_reset(void)
1343 {
1344 	struct rcu_state *rsp;
1345 
1346 	for_each_rcu_flavor(rsp)
1347 		ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
1348 }
1349 
1350 /*
1351  * Initialize the specified rcu_data structure's default callback list
1352  * to empty.  The default callback list is the one that is not used by
1353  * no-callbacks CPUs.
1354  */
1355 static void init_default_callback_list(struct rcu_data *rdp)
1356 {
1357 	int i;
1358 
1359 	rdp->nxtlist = NULL;
1360 	for (i = 0; i < RCU_NEXT_SIZE; i++)
1361 		rdp->nxttail[i] = &rdp->nxtlist;
1362 }
1363 
1364 /*
1365  * Initialize the specified rcu_data structure's callback list to empty.
1366  */
1367 static void init_callback_list(struct rcu_data *rdp)
1368 {
1369 	if (init_nocb_callback_list(rdp))
1370 		return;
1371 	init_default_callback_list(rdp);
1372 }
1373 
1374 /*
1375  * Determine the value that ->completed will have at the end of the
1376  * next subsequent grace period.  This is used to tag callbacks so that
1377  * a CPU can invoke callbacks in a timely fashion even if that CPU has
1378  * been dyntick-idle for an extended period with callbacks under the
1379  * influence of RCU_FAST_NO_HZ.
1380  *
1381  * The caller must hold rnp->lock with interrupts disabled.
1382  */
1383 static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1384 				       struct rcu_node *rnp)
1385 {
1386 	/*
1387 	 * If RCU is idle, we just wait for the next grace period.
1388 	 * But we can only be sure that RCU is idle if we are looking
1389 	 * at the root rcu_node structure -- otherwise, a new grace
1390 	 * period might have started, but just not yet gotten around
1391 	 * to initializing the current non-root rcu_node structure.
1392 	 */
1393 	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1394 		return rnp->completed + 1;
1395 
1396 	/*
1397 	 * Otherwise, wait for a possible partial grace period and
1398 	 * then the subsequent full grace period.
1399 	 */
1400 	return rnp->completed + 2;
1401 }
1402 
1403 /*
1404  * Trace-event helper function for rcu_start_future_gp() and
1405  * rcu_nocb_wait_gp().
1406  */
1407 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1408 				unsigned long c, const char *s)
1409 {
1410 	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1411 				      rnp->completed, c, rnp->level,
1412 				      rnp->grplo, rnp->grphi, s);
1413 }
1414 
1415 /*
1416  * Start some future grace period, as needed to handle newly arrived
1417  * callbacks.  The required future grace periods are recorded in each
1418  * rcu_node structure's ->need_future_gp field.  Returns true if there
1419  * is reason to awaken the grace-period kthread.
1420  *
1421  * The caller must hold the specified rcu_node structure's ->lock.
1422  */
1423 static bool __maybe_unused
1424 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1425 		    unsigned long *c_out)
1426 {
1427 	unsigned long c;
1428 	int i;
1429 	bool ret = false;
1430 	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1431 
1432 	/*
1433 	 * Pick up grace-period number for new callbacks.  If this
1434 	 * grace period is already marked as needed, return to the caller.
1435 	 */
1436 	c = rcu_cbs_completed(rdp->rsp, rnp);
1437 	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1438 	if (rnp->need_future_gp[c & 0x1]) {
1439 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1440 		goto out;
1441 	}
1442 
1443 	/*
1444 	 * If either this rcu_node structure or the root rcu_node structure
1445 	 * believe that a grace period is in progress, then we must wait
1446 	 * for the one following, which is in "c".  Because our request
1447 	 * will be noticed at the end of the current grace period, we don't
1448 	 * need to explicitly start one.  We only do the lockless check
1449 	 * of rnp_root's fields if the current rcu_node structure thinks
1450 	 * there is no grace period in flight, and because we hold rnp->lock,
1451 	 * the only possible change is when rnp_root's two fields are
1452 	 * equal, in which case rnp_root->gpnum might be concurrently
1453 	 * incremented.  But that is OK, as it will just result in our
1454 	 * doing some extra useless work.
1455 	 */
1456 	if (rnp->gpnum != rnp->completed ||
1457 	    ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
1458 		rnp->need_future_gp[c & 0x1]++;
1459 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1460 		goto out;
1461 	}
1462 
1463 	/*
1464 	 * There might be no grace period in progress.  If we don't already
1465 	 * hold it, acquire the root rcu_node structure's lock in order to
1466 	 * start one (if needed).
1467 	 */
1468 	if (rnp != rnp_root) {
1469 		raw_spin_lock(&rnp_root->lock);
1470 		smp_mb__after_unlock_lock();
1471 	}
1472 
1473 	/*
1474 	 * Get a new grace-period number.  If there really is no grace
1475 	 * period in progress, it will be smaller than the one we obtained
1476 	 * earlier.  Adjust callbacks as needed.  Note that even no-CBs
1477 	 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1478 	 */
1479 	c = rcu_cbs_completed(rdp->rsp, rnp_root);
1480 	for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1481 		if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1482 			rdp->nxtcompleted[i] = c;
1483 
1484 	/*
1485 	 * If the needed for the required grace period is already
1486 	 * recorded, trace and leave.
1487 	 */
1488 	if (rnp_root->need_future_gp[c & 0x1]) {
1489 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1490 		goto unlock_out;
1491 	}
1492 
1493 	/* Record the need for the future grace period. */
1494 	rnp_root->need_future_gp[c & 0x1]++;
1495 
1496 	/* If a grace period is not already in progress, start one. */
1497 	if (rnp_root->gpnum != rnp_root->completed) {
1498 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1499 	} else {
1500 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1501 		ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1502 	}
1503 unlock_out:
1504 	if (rnp != rnp_root)
1505 		raw_spin_unlock(&rnp_root->lock);
1506 out:
1507 	if (c_out != NULL)
1508 		*c_out = c;
1509 	return ret;
1510 }
1511 
1512 /*
1513  * Clean up any old requests for the just-ended grace period.  Also return
1514  * whether any additional grace periods have been requested.  Also invoke
1515  * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1516  * waiting for this grace period to complete.
1517  */
1518 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1519 {
1520 	int c = rnp->completed;
1521 	int needmore;
1522 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1523 
1524 	rcu_nocb_gp_cleanup(rsp, rnp);
1525 	rnp->need_future_gp[c & 0x1] = 0;
1526 	needmore = rnp->need_future_gp[(c + 1) & 0x1];
1527 	trace_rcu_future_gp(rnp, rdp, c,
1528 			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1529 	return needmore;
1530 }
1531 
1532 /*
1533  * Awaken the grace-period kthread for the specified flavor of RCU.
1534  * Don't do a self-awaken, and don't bother awakening when there is
1535  * nothing for the grace-period kthread to do (as in several CPUs
1536  * raced to awaken, and we lost), and finally don't try to awaken
1537  * a kthread that has not yet been created.
1538  */
1539 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1540 {
1541 	if (current == rsp->gp_kthread ||
1542 	    !ACCESS_ONCE(rsp->gp_flags) ||
1543 	    !rsp->gp_kthread)
1544 		return;
1545 	wake_up(&rsp->gp_wq);
1546 }
1547 
1548 /*
1549  * If there is room, assign a ->completed number to any callbacks on
1550  * this CPU that have not already been assigned.  Also accelerate any
1551  * callbacks that were previously assigned a ->completed number that has
1552  * since proven to be too conservative, which can happen if callbacks get
1553  * assigned a ->completed number while RCU is idle, but with reference to
1554  * a non-root rcu_node structure.  This function is idempotent, so it does
1555  * not hurt to call it repeatedly.  Returns an flag saying that we should
1556  * awaken the RCU grace-period kthread.
1557  *
1558  * The caller must hold rnp->lock with interrupts disabled.
1559  */
1560 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1561 			       struct rcu_data *rdp)
1562 {
1563 	unsigned long c;
1564 	int i;
1565 	bool ret;
1566 
1567 	/* If the CPU has no callbacks, nothing to do. */
1568 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1569 		return false;
1570 
1571 	/*
1572 	 * Starting from the sublist containing the callbacks most
1573 	 * recently assigned a ->completed number and working down, find the
1574 	 * first sublist that is not assignable to an upcoming grace period.
1575 	 * Such a sublist has something in it (first two tests) and has
1576 	 * a ->completed number assigned that will complete sooner than
1577 	 * the ->completed number for newly arrived callbacks (last test).
1578 	 *
1579 	 * The key point is that any later sublist can be assigned the
1580 	 * same ->completed number as the newly arrived callbacks, which
1581 	 * means that the callbacks in any of these later sublist can be
1582 	 * grouped into a single sublist, whether or not they have already
1583 	 * been assigned a ->completed number.
1584 	 */
1585 	c = rcu_cbs_completed(rsp, rnp);
1586 	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1587 		if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1588 		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1589 			break;
1590 
1591 	/*
1592 	 * If there are no sublist for unassigned callbacks, leave.
1593 	 * At the same time, advance "i" one sublist, so that "i" will
1594 	 * index into the sublist where all the remaining callbacks should
1595 	 * be grouped into.
1596 	 */
1597 	if (++i >= RCU_NEXT_TAIL)
1598 		return false;
1599 
1600 	/*
1601 	 * Assign all subsequent callbacks' ->completed number to the next
1602 	 * full grace period and group them all in the sublist initially
1603 	 * indexed by "i".
1604 	 */
1605 	for (; i <= RCU_NEXT_TAIL; i++) {
1606 		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1607 		rdp->nxtcompleted[i] = c;
1608 	}
1609 	/* Record any needed additional grace periods. */
1610 	ret = rcu_start_future_gp(rnp, rdp, NULL);
1611 
1612 	/* Trace depending on how much we were able to accelerate. */
1613 	if (!*rdp->nxttail[RCU_WAIT_TAIL])
1614 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1615 	else
1616 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1617 	return ret;
1618 }
1619 
1620 /*
1621  * Move any callbacks whose grace period has completed to the
1622  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1623  * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1624  * sublist.  This function is idempotent, so it does not hurt to
1625  * invoke it repeatedly.  As long as it is not invoked -too- often...
1626  * Returns true if the RCU grace-period kthread needs to be awakened.
1627  *
1628  * The caller must hold rnp->lock with interrupts disabled.
1629  */
1630 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1631 			    struct rcu_data *rdp)
1632 {
1633 	int i, j;
1634 
1635 	/* If the CPU has no callbacks, nothing to do. */
1636 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1637 		return false;
1638 
1639 	/*
1640 	 * Find all callbacks whose ->completed numbers indicate that they
1641 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1642 	 */
1643 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1644 		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1645 			break;
1646 		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1647 	}
1648 	/* Clean up any sublist tail pointers that were misordered above. */
1649 	for (j = RCU_WAIT_TAIL; j < i; j++)
1650 		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1651 
1652 	/* Copy down callbacks to fill in empty sublists. */
1653 	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1654 		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1655 			break;
1656 		rdp->nxttail[j] = rdp->nxttail[i];
1657 		rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1658 	}
1659 
1660 	/* Classify any remaining callbacks. */
1661 	return rcu_accelerate_cbs(rsp, rnp, rdp);
1662 }
1663 
1664 /*
1665  * Update CPU-local rcu_data state to record the beginnings and ends of
1666  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1667  * structure corresponding to the current CPU, and must have irqs disabled.
1668  * Returns true if the grace-period kthread needs to be awakened.
1669  */
1670 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1671 			      struct rcu_data *rdp)
1672 {
1673 	bool ret;
1674 
1675 	/* Handle the ends of any preceding grace periods first. */
1676 	if (rdp->completed == rnp->completed &&
1677 	    !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1678 
1679 		/* No grace period end, so just accelerate recent callbacks. */
1680 		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1681 
1682 	} else {
1683 
1684 		/* Advance callbacks. */
1685 		ret = rcu_advance_cbs(rsp, rnp, rdp);
1686 
1687 		/* Remember that we saw this grace-period completion. */
1688 		rdp->completed = rnp->completed;
1689 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1690 	}
1691 
1692 	if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
1693 		/*
1694 		 * If the current grace period is waiting for this CPU,
1695 		 * set up to detect a quiescent state, otherwise don't
1696 		 * go looking for one.
1697 		 */
1698 		rdp->gpnum = rnp->gpnum;
1699 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1700 		rdp->passed_quiesce = 0;
1701 		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1702 		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1703 		zero_cpu_stall_ticks(rdp);
1704 		ACCESS_ONCE(rdp->gpwrap) = false;
1705 	}
1706 	return ret;
1707 }
1708 
1709 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1710 {
1711 	unsigned long flags;
1712 	bool needwake;
1713 	struct rcu_node *rnp;
1714 
1715 	local_irq_save(flags);
1716 	rnp = rdp->mynode;
1717 	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1718 	     rdp->completed == ACCESS_ONCE(rnp->completed) &&
1719 	     !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
1720 	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1721 		local_irq_restore(flags);
1722 		return;
1723 	}
1724 	smp_mb__after_unlock_lock();
1725 	needwake = __note_gp_changes(rsp, rnp, rdp);
1726 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1727 	if (needwake)
1728 		rcu_gp_kthread_wake(rsp);
1729 }
1730 
1731 /*
1732  * Initialize a new grace period.  Return 0 if no grace period required.
1733  */
1734 static int rcu_gp_init(struct rcu_state *rsp)
1735 {
1736 	unsigned long oldmask;
1737 	struct rcu_data *rdp;
1738 	struct rcu_node *rnp = rcu_get_root(rsp);
1739 
1740 	ACCESS_ONCE(rsp->gp_activity) = jiffies;
1741 	raw_spin_lock_irq(&rnp->lock);
1742 	smp_mb__after_unlock_lock();
1743 	if (!ACCESS_ONCE(rsp->gp_flags)) {
1744 		/* Spurious wakeup, tell caller to go back to sleep.  */
1745 		raw_spin_unlock_irq(&rnp->lock);
1746 		return 0;
1747 	}
1748 	ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
1749 
1750 	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1751 		/*
1752 		 * Grace period already in progress, don't start another.
1753 		 * Not supposed to be able to happen.
1754 		 */
1755 		raw_spin_unlock_irq(&rnp->lock);
1756 		return 0;
1757 	}
1758 
1759 	/* Advance to a new grace period and initialize state. */
1760 	record_gp_stall_check_time(rsp);
1761 	/* Record GP times before starting GP, hence smp_store_release(). */
1762 	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1763 	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1764 	raw_spin_unlock_irq(&rnp->lock);
1765 
1766 	/*
1767 	 * Apply per-leaf buffered online and offline operations to the
1768 	 * rcu_node tree.  Note that this new grace period need not wait
1769 	 * for subsequent online CPUs, and that quiescent-state forcing
1770 	 * will handle subsequent offline CPUs.
1771 	 */
1772 	rcu_for_each_leaf_node(rsp, rnp) {
1773 		raw_spin_lock_irq(&rnp->lock);
1774 		smp_mb__after_unlock_lock();
1775 		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1776 		    !rnp->wait_blkd_tasks) {
1777 			/* Nothing to do on this leaf rcu_node structure. */
1778 			raw_spin_unlock_irq(&rnp->lock);
1779 			continue;
1780 		}
1781 
1782 		/* Record old state, apply changes to ->qsmaskinit field. */
1783 		oldmask = rnp->qsmaskinit;
1784 		rnp->qsmaskinit = rnp->qsmaskinitnext;
1785 
1786 		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1787 		if (!oldmask != !rnp->qsmaskinit) {
1788 			if (!oldmask) /* First online CPU for this rcu_node. */
1789 				rcu_init_new_rnp(rnp);
1790 			else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
1791 				rnp->wait_blkd_tasks = true;
1792 			else /* Last offline CPU and can propagate. */
1793 				rcu_cleanup_dead_rnp(rnp);
1794 		}
1795 
1796 		/*
1797 		 * If all waited-on tasks from prior grace period are
1798 		 * done, and if all this rcu_node structure's CPUs are
1799 		 * still offline, propagate up the rcu_node tree and
1800 		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1801 		 * rcu_node structure's CPUs has since come back online,
1802 		 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
1803 		 * checks for this, so just call it unconditionally).
1804 		 */
1805 		if (rnp->wait_blkd_tasks &&
1806 		    (!rcu_preempt_has_tasks(rnp) ||
1807 		     rnp->qsmaskinit)) {
1808 			rnp->wait_blkd_tasks = false;
1809 			rcu_cleanup_dead_rnp(rnp);
1810 		}
1811 
1812 		raw_spin_unlock_irq(&rnp->lock);
1813 	}
1814 
1815 	/*
1816 	 * Set the quiescent-state-needed bits in all the rcu_node
1817 	 * structures for all currently online CPUs in breadth-first order,
1818 	 * starting from the root rcu_node structure, relying on the layout
1819 	 * of the tree within the rsp->node[] array.  Note that other CPUs
1820 	 * will access only the leaves of the hierarchy, thus seeing that no
1821 	 * grace period is in progress, at least until the corresponding
1822 	 * leaf node has been initialized.  In addition, we have excluded
1823 	 * CPU-hotplug operations.
1824 	 *
1825 	 * The grace period cannot complete until the initialization
1826 	 * process finishes, because this kthread handles both.
1827 	 */
1828 	rcu_for_each_node_breadth_first(rsp, rnp) {
1829 		raw_spin_lock_irq(&rnp->lock);
1830 		smp_mb__after_unlock_lock();
1831 		rdp = this_cpu_ptr(rsp->rda);
1832 		rcu_preempt_check_blocked_tasks(rnp);
1833 		rnp->qsmask = rnp->qsmaskinit;
1834 		ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1835 		if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1836 			ACCESS_ONCE(rnp->completed) = rsp->completed;
1837 		if (rnp == rdp->mynode)
1838 			(void)__note_gp_changes(rsp, rnp, rdp);
1839 		rcu_preempt_boost_start_gp(rnp);
1840 		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1841 					    rnp->level, rnp->grplo,
1842 					    rnp->grphi, rnp->qsmask);
1843 		raw_spin_unlock_irq(&rnp->lock);
1844 		cond_resched_rcu_qs();
1845 		ACCESS_ONCE(rsp->gp_activity) = jiffies;
1846 		if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) &&
1847 		    gp_init_delay > 0 &&
1848 		    !(rsp->gpnum % (rcu_num_nodes * 10)))
1849 			schedule_timeout_uninterruptible(gp_init_delay);
1850 	}
1851 
1852 	return 1;
1853 }
1854 
1855 /*
1856  * Do one round of quiescent-state forcing.
1857  */
1858 static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1859 {
1860 	int fqs_state = fqs_state_in;
1861 	bool isidle = false;
1862 	unsigned long maxj;
1863 	struct rcu_node *rnp = rcu_get_root(rsp);
1864 
1865 	ACCESS_ONCE(rsp->gp_activity) = jiffies;
1866 	rsp->n_force_qs++;
1867 	if (fqs_state == RCU_SAVE_DYNTICK) {
1868 		/* Collect dyntick-idle snapshots. */
1869 		if (is_sysidle_rcu_state(rsp)) {
1870 			isidle = true;
1871 			maxj = jiffies - ULONG_MAX / 4;
1872 		}
1873 		force_qs_rnp(rsp, dyntick_save_progress_counter,
1874 			     &isidle, &maxj);
1875 		rcu_sysidle_report_gp(rsp, isidle, maxj);
1876 		fqs_state = RCU_FORCE_QS;
1877 	} else {
1878 		/* Handle dyntick-idle and offline CPUs. */
1879 		isidle = true;
1880 		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1881 	}
1882 	/* Clear flag to prevent immediate re-entry. */
1883 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1884 		raw_spin_lock_irq(&rnp->lock);
1885 		smp_mb__after_unlock_lock();
1886 		ACCESS_ONCE(rsp->gp_flags) =
1887 			ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
1888 		raw_spin_unlock_irq(&rnp->lock);
1889 	}
1890 	return fqs_state;
1891 }
1892 
1893 /*
1894  * Clean up after the old grace period.
1895  */
1896 static void rcu_gp_cleanup(struct rcu_state *rsp)
1897 {
1898 	unsigned long gp_duration;
1899 	bool needgp = false;
1900 	int nocb = 0;
1901 	struct rcu_data *rdp;
1902 	struct rcu_node *rnp = rcu_get_root(rsp);
1903 
1904 	ACCESS_ONCE(rsp->gp_activity) = jiffies;
1905 	raw_spin_lock_irq(&rnp->lock);
1906 	smp_mb__after_unlock_lock();
1907 	gp_duration = jiffies - rsp->gp_start;
1908 	if (gp_duration > rsp->gp_max)
1909 		rsp->gp_max = gp_duration;
1910 
1911 	/*
1912 	 * We know the grace period is complete, but to everyone else
1913 	 * it appears to still be ongoing.  But it is also the case
1914 	 * that to everyone else it looks like there is nothing that
1915 	 * they can do to advance the grace period.  It is therefore
1916 	 * safe for us to drop the lock in order to mark the grace
1917 	 * period as completed in all of the rcu_node structures.
1918 	 */
1919 	raw_spin_unlock_irq(&rnp->lock);
1920 
1921 	/*
1922 	 * Propagate new ->completed value to rcu_node structures so
1923 	 * that other CPUs don't have to wait until the start of the next
1924 	 * grace period to process their callbacks.  This also avoids
1925 	 * some nasty RCU grace-period initialization races by forcing
1926 	 * the end of the current grace period to be completely recorded in
1927 	 * all of the rcu_node structures before the beginning of the next
1928 	 * grace period is recorded in any of the rcu_node structures.
1929 	 */
1930 	rcu_for_each_node_breadth_first(rsp, rnp) {
1931 		raw_spin_lock_irq(&rnp->lock);
1932 		smp_mb__after_unlock_lock();
1933 		WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
1934 		WARN_ON_ONCE(rnp->qsmask);
1935 		ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1936 		rdp = this_cpu_ptr(rsp->rda);
1937 		if (rnp == rdp->mynode)
1938 			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
1939 		/* smp_mb() provided by prior unlock-lock pair. */
1940 		nocb += rcu_future_gp_cleanup(rsp, rnp);
1941 		raw_spin_unlock_irq(&rnp->lock);
1942 		cond_resched_rcu_qs();
1943 		ACCESS_ONCE(rsp->gp_activity) = jiffies;
1944 	}
1945 	rnp = rcu_get_root(rsp);
1946 	raw_spin_lock_irq(&rnp->lock);
1947 	smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1948 	rcu_nocb_gp_set(rnp, nocb);
1949 
1950 	/* Declare grace period done. */
1951 	ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1952 	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1953 	rsp->fqs_state = RCU_GP_IDLE;
1954 	rdp = this_cpu_ptr(rsp->rda);
1955 	/* Advance CBs to reduce false positives below. */
1956 	needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
1957 	if (needgp || cpu_needs_another_gp(rsp, rdp)) {
1958 		ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
1959 		trace_rcu_grace_period(rsp->name,
1960 				       ACCESS_ONCE(rsp->gpnum),
1961 				       TPS("newreq"));
1962 	}
1963 	raw_spin_unlock_irq(&rnp->lock);
1964 }
1965 
1966 /*
1967  * Body of kthread that handles grace periods.
1968  */
1969 static int __noreturn rcu_gp_kthread(void *arg)
1970 {
1971 	int fqs_state;
1972 	int gf;
1973 	unsigned long j;
1974 	int ret;
1975 	struct rcu_state *rsp = arg;
1976 	struct rcu_node *rnp = rcu_get_root(rsp);
1977 
1978 	rcu_bind_gp_kthread();
1979 	for (;;) {
1980 
1981 		/* Handle grace-period start. */
1982 		for (;;) {
1983 			trace_rcu_grace_period(rsp->name,
1984 					       ACCESS_ONCE(rsp->gpnum),
1985 					       TPS("reqwait"));
1986 			rsp->gp_state = RCU_GP_WAIT_GPS;
1987 			wait_event_interruptible(rsp->gp_wq,
1988 						 ACCESS_ONCE(rsp->gp_flags) &
1989 						 RCU_GP_FLAG_INIT);
1990 			/* Locking provides needed memory barrier. */
1991 			if (rcu_gp_init(rsp))
1992 				break;
1993 			cond_resched_rcu_qs();
1994 			ACCESS_ONCE(rsp->gp_activity) = jiffies;
1995 			WARN_ON(signal_pending(current));
1996 			trace_rcu_grace_period(rsp->name,
1997 					       ACCESS_ONCE(rsp->gpnum),
1998 					       TPS("reqwaitsig"));
1999 		}
2000 
2001 		/* Handle quiescent-state forcing. */
2002 		fqs_state = RCU_SAVE_DYNTICK;
2003 		j = jiffies_till_first_fqs;
2004 		if (j > HZ) {
2005 			j = HZ;
2006 			jiffies_till_first_fqs = HZ;
2007 		}
2008 		ret = 0;
2009 		for (;;) {
2010 			if (!ret)
2011 				rsp->jiffies_force_qs = jiffies + j;
2012 			trace_rcu_grace_period(rsp->name,
2013 					       ACCESS_ONCE(rsp->gpnum),
2014 					       TPS("fqswait"));
2015 			rsp->gp_state = RCU_GP_WAIT_FQS;
2016 			ret = wait_event_interruptible_timeout(rsp->gp_wq,
2017 					((gf = ACCESS_ONCE(rsp->gp_flags)) &
2018 					 RCU_GP_FLAG_FQS) ||
2019 					(!ACCESS_ONCE(rnp->qsmask) &&
2020 					 !rcu_preempt_blocked_readers_cgp(rnp)),
2021 					j);
2022 			/* Locking provides needed memory barriers. */
2023 			/* If grace period done, leave loop. */
2024 			if (!ACCESS_ONCE(rnp->qsmask) &&
2025 			    !rcu_preempt_blocked_readers_cgp(rnp))
2026 				break;
2027 			/* If time for quiescent-state forcing, do it. */
2028 			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2029 			    (gf & RCU_GP_FLAG_FQS)) {
2030 				trace_rcu_grace_period(rsp->name,
2031 						       ACCESS_ONCE(rsp->gpnum),
2032 						       TPS("fqsstart"));
2033 				fqs_state = rcu_gp_fqs(rsp, fqs_state);
2034 				trace_rcu_grace_period(rsp->name,
2035 						       ACCESS_ONCE(rsp->gpnum),
2036 						       TPS("fqsend"));
2037 				cond_resched_rcu_qs();
2038 				ACCESS_ONCE(rsp->gp_activity) = jiffies;
2039 			} else {
2040 				/* Deal with stray signal. */
2041 				cond_resched_rcu_qs();
2042 				ACCESS_ONCE(rsp->gp_activity) = jiffies;
2043 				WARN_ON(signal_pending(current));
2044 				trace_rcu_grace_period(rsp->name,
2045 						       ACCESS_ONCE(rsp->gpnum),
2046 						       TPS("fqswaitsig"));
2047 			}
2048 			j = jiffies_till_next_fqs;
2049 			if (j > HZ) {
2050 				j = HZ;
2051 				jiffies_till_next_fqs = HZ;
2052 			} else if (j < 1) {
2053 				j = 1;
2054 				jiffies_till_next_fqs = 1;
2055 			}
2056 		}
2057 
2058 		/* Handle grace-period end. */
2059 		rcu_gp_cleanup(rsp);
2060 	}
2061 }
2062 
2063 /*
2064  * Start a new RCU grace period if warranted, re-initializing the hierarchy
2065  * in preparation for detecting the next grace period.  The caller must hold
2066  * the root node's ->lock and hard irqs must be disabled.
2067  *
2068  * Note that it is legal for a dying CPU (which is marked as offline) to
2069  * invoke this function.  This can happen when the dying CPU reports its
2070  * quiescent state.
2071  *
2072  * Returns true if the grace-period kthread must be awakened.
2073  */
2074 static bool
2075 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2076 		      struct rcu_data *rdp)
2077 {
2078 	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2079 		/*
2080 		 * Either we have not yet spawned the grace-period
2081 		 * task, this CPU does not need another grace period,
2082 		 * or a grace period is already in progress.
2083 		 * Either way, don't start a new grace period.
2084 		 */
2085 		return false;
2086 	}
2087 	ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
2088 	trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
2089 			       TPS("newreq"));
2090 
2091 	/*
2092 	 * We can't do wakeups while holding the rnp->lock, as that
2093 	 * could cause possible deadlocks with the rq->lock. Defer
2094 	 * the wakeup to our caller.
2095 	 */
2096 	return true;
2097 }
2098 
2099 /*
2100  * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
2101  * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
2102  * is invoked indirectly from rcu_advance_cbs(), which would result in
2103  * endless recursion -- or would do so if it wasn't for the self-deadlock
2104  * that is encountered beforehand.
2105  *
2106  * Returns true if the grace-period kthread needs to be awakened.
2107  */
2108 static bool rcu_start_gp(struct rcu_state *rsp)
2109 {
2110 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2111 	struct rcu_node *rnp = rcu_get_root(rsp);
2112 	bool ret = false;
2113 
2114 	/*
2115 	 * If there is no grace period in progress right now, any
2116 	 * callbacks we have up to this point will be satisfied by the
2117 	 * next grace period.  Also, advancing the callbacks reduces the
2118 	 * probability of false positives from cpu_needs_another_gp()
2119 	 * resulting in pointless grace periods.  So, advance callbacks
2120 	 * then start the grace period!
2121 	 */
2122 	ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2123 	ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2124 	return ret;
2125 }
2126 
2127 /*
2128  * Report a full set of quiescent states to the specified rcu_state
2129  * data structure.  This involves cleaning up after the prior grace
2130  * period and letting rcu_start_gp() start up the next grace period
2131  * if one is needed.  Note that the caller must hold rnp->lock, which
2132  * is released before return.
2133  */
2134 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2135 	__releases(rcu_get_root(rsp)->lock)
2136 {
2137 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2138 	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2139 	rcu_gp_kthread_wake(rsp);
2140 }
2141 
2142 /*
2143  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2144  * Allows quiescent states for a group of CPUs to be reported at one go
2145  * to the specified rcu_node structure, though all the CPUs in the group
2146  * must be represented by the same rcu_node structure (which need not be a
2147  * leaf rcu_node structure, though it often will be).  The gps parameter
2148  * is the grace-period snapshot, which means that the quiescent states
2149  * are valid only if rnp->gpnum is equal to gps.  That structure's lock
2150  * must be held upon entry, and it is released before return.
2151  */
2152 static void
2153 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2154 		  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2155 	__releases(rnp->lock)
2156 {
2157 	unsigned long oldmask = 0;
2158 	struct rcu_node *rnp_c;
2159 
2160 	/* Walk up the rcu_node hierarchy. */
2161 	for (;;) {
2162 		if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2163 
2164 			/*
2165 			 * Our bit has already been cleared, or the
2166 			 * relevant grace period is already over, so done.
2167 			 */
2168 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2169 			return;
2170 		}
2171 		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2172 		rnp->qsmask &= ~mask;
2173 		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2174 						 mask, rnp->qsmask, rnp->level,
2175 						 rnp->grplo, rnp->grphi,
2176 						 !!rnp->gp_tasks);
2177 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2178 
2179 			/* Other bits still set at this level, so done. */
2180 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2181 			return;
2182 		}
2183 		mask = rnp->grpmask;
2184 		if (rnp->parent == NULL) {
2185 
2186 			/* No more levels.  Exit loop holding root lock. */
2187 
2188 			break;
2189 		}
2190 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2191 		rnp_c = rnp;
2192 		rnp = rnp->parent;
2193 		raw_spin_lock_irqsave(&rnp->lock, flags);
2194 		smp_mb__after_unlock_lock();
2195 		oldmask = rnp_c->qsmask;
2196 	}
2197 
2198 	/*
2199 	 * Get here if we are the last CPU to pass through a quiescent
2200 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
2201 	 * to clean up and start the next grace period if one is needed.
2202 	 */
2203 	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
2204 }
2205 
2206 /*
2207  * Record a quiescent state for all tasks that were previously queued
2208  * on the specified rcu_node structure and that were blocking the current
2209  * RCU grace period.  The caller must hold the specified rnp->lock with
2210  * irqs disabled, and this lock is released upon return, but irqs remain
2211  * disabled.
2212  */
2213 static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2214 				      struct rcu_node *rnp, unsigned long flags)
2215 	__releases(rnp->lock)
2216 {
2217 	unsigned long gps;
2218 	unsigned long mask;
2219 	struct rcu_node *rnp_p;
2220 
2221 	if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2222 	    rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2223 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2224 		return;  /* Still need more quiescent states! */
2225 	}
2226 
2227 	rnp_p = rnp->parent;
2228 	if (rnp_p == NULL) {
2229 		/*
2230 		 * Only one rcu_node structure in the tree, so don't
2231 		 * try to report up to its nonexistent parent!
2232 		 */
2233 		rcu_report_qs_rsp(rsp, flags);
2234 		return;
2235 	}
2236 
2237 	/* Report up the rest of the hierarchy, tracking current ->gpnum. */
2238 	gps = rnp->gpnum;
2239 	mask = rnp->grpmask;
2240 	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
2241 	raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */
2242 	smp_mb__after_unlock_lock();
2243 	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2244 }
2245 
2246 /*
2247  * Record a quiescent state for the specified CPU to that CPU's rcu_data
2248  * structure.  This must be either called from the specified CPU, or
2249  * called when the specified CPU is known to be offline (and when it is
2250  * also known that no other CPU is concurrently trying to help the offline
2251  * CPU).  The lastcomp argument is used to make sure we are still in the
2252  * grace period of interest.  We don't want to end the current grace period
2253  * based on quiescent states detected in an earlier grace period!
2254  */
2255 static void
2256 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2257 {
2258 	unsigned long flags;
2259 	unsigned long mask;
2260 	bool needwake;
2261 	struct rcu_node *rnp;
2262 
2263 	rnp = rdp->mynode;
2264 	raw_spin_lock_irqsave(&rnp->lock, flags);
2265 	smp_mb__after_unlock_lock();
2266 	if ((rdp->passed_quiesce == 0 &&
2267 	     rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2268 	    rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2269 	    rdp->gpwrap) {
2270 
2271 		/*
2272 		 * The grace period in which this quiescent state was
2273 		 * recorded has ended, so don't report it upwards.
2274 		 * We will instead need a new quiescent state that lies
2275 		 * within the current grace period.
2276 		 */
2277 		rdp->passed_quiesce = 0;	/* need qs for new gp. */
2278 		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2279 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2280 		return;
2281 	}
2282 	mask = rdp->grpmask;
2283 	if ((rnp->qsmask & mask) == 0) {
2284 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2285 	} else {
2286 		rdp->qs_pending = 0;
2287 
2288 		/*
2289 		 * This GP can't end until cpu checks in, so all of our
2290 		 * callbacks can be processed during the next GP.
2291 		 */
2292 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2293 
2294 		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2295 		/* ^^^ Released rnp->lock */
2296 		if (needwake)
2297 			rcu_gp_kthread_wake(rsp);
2298 	}
2299 }
2300 
2301 /*
2302  * Check to see if there is a new grace period of which this CPU
2303  * is not yet aware, and if so, set up local rcu_data state for it.
2304  * Otherwise, see if this CPU has just passed through its first
2305  * quiescent state for this grace period, and record that fact if so.
2306  */
2307 static void
2308 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2309 {
2310 	/* Check for grace-period ends and beginnings. */
2311 	note_gp_changes(rsp, rdp);
2312 
2313 	/*
2314 	 * Does this CPU still need to do its part for current grace period?
2315 	 * If no, return and let the other CPUs do their part as well.
2316 	 */
2317 	if (!rdp->qs_pending)
2318 		return;
2319 
2320 	/*
2321 	 * Was there a quiescent state since the beginning of the grace
2322 	 * period? If no, then exit and wait for the next call.
2323 	 */
2324 	if (!rdp->passed_quiesce &&
2325 	    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
2326 		return;
2327 
2328 	/*
2329 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2330 	 * judge of that).
2331 	 */
2332 	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2333 }
2334 
2335 #ifdef CONFIG_HOTPLUG_CPU
2336 
2337 /*
2338  * Send the specified CPU's RCU callbacks to the orphanage.  The
2339  * specified CPU must be offline, and the caller must hold the
2340  * ->orphan_lock.
2341  */
2342 static void
2343 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2344 			  struct rcu_node *rnp, struct rcu_data *rdp)
2345 {
2346 	/* No-CBs CPUs do not have orphanable callbacks. */
2347 	if (rcu_is_nocb_cpu(rdp->cpu))
2348 		return;
2349 
2350 	/*
2351 	 * Orphan the callbacks.  First adjust the counts.  This is safe
2352 	 * because _rcu_barrier() excludes CPU-hotplug operations, so it
2353 	 * cannot be running now.  Thus no memory barrier is required.
2354 	 */
2355 	if (rdp->nxtlist != NULL) {
2356 		rsp->qlen_lazy += rdp->qlen_lazy;
2357 		rsp->qlen += rdp->qlen;
2358 		rdp->n_cbs_orphaned += rdp->qlen;
2359 		rdp->qlen_lazy = 0;
2360 		ACCESS_ONCE(rdp->qlen) = 0;
2361 	}
2362 
2363 	/*
2364 	 * Next, move those callbacks still needing a grace period to
2365 	 * the orphanage, where some other CPU will pick them up.
2366 	 * Some of the callbacks might have gone partway through a grace
2367 	 * period, but that is too bad.  They get to start over because we
2368 	 * cannot assume that grace periods are synchronized across CPUs.
2369 	 * We don't bother updating the ->nxttail[] array yet, instead
2370 	 * we just reset the whole thing later on.
2371 	 */
2372 	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
2373 		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
2374 		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
2375 		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2376 	}
2377 
2378 	/*
2379 	 * Then move the ready-to-invoke callbacks to the orphanage,
2380 	 * where some other CPU will pick them up.  These will not be
2381 	 * required to pass though another grace period: They are done.
2382 	 */
2383 	if (rdp->nxtlist != NULL) {
2384 		*rsp->orphan_donetail = rdp->nxtlist;
2385 		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
2386 	}
2387 
2388 	/*
2389 	 * Finally, initialize the rcu_data structure's list to empty and
2390 	 * disallow further callbacks on this CPU.
2391 	 */
2392 	init_callback_list(rdp);
2393 	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2394 }
2395 
2396 /*
2397  * Adopt the RCU callbacks from the specified rcu_state structure's
2398  * orphanage.  The caller must hold the ->orphan_lock.
2399  */
2400 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2401 {
2402 	int i;
2403 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2404 
2405 	/* No-CBs CPUs are handled specially. */
2406 	if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
2407 		return;
2408 
2409 	/* Do the accounting first. */
2410 	rdp->qlen_lazy += rsp->qlen_lazy;
2411 	rdp->qlen += rsp->qlen;
2412 	rdp->n_cbs_adopted += rsp->qlen;
2413 	if (rsp->qlen_lazy != rsp->qlen)
2414 		rcu_idle_count_callbacks_posted();
2415 	rsp->qlen_lazy = 0;
2416 	rsp->qlen = 0;
2417 
2418 	/*
2419 	 * We do not need a memory barrier here because the only way we
2420 	 * can get here if there is an rcu_barrier() in flight is if
2421 	 * we are the task doing the rcu_barrier().
2422 	 */
2423 
2424 	/* First adopt the ready-to-invoke callbacks. */
2425 	if (rsp->orphan_donelist != NULL) {
2426 		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
2427 		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
2428 		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
2429 			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2430 				rdp->nxttail[i] = rsp->orphan_donetail;
2431 		rsp->orphan_donelist = NULL;
2432 		rsp->orphan_donetail = &rsp->orphan_donelist;
2433 	}
2434 
2435 	/* And then adopt the callbacks that still need a grace period. */
2436 	if (rsp->orphan_nxtlist != NULL) {
2437 		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
2438 		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
2439 		rsp->orphan_nxtlist = NULL;
2440 		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2441 	}
2442 }
2443 
2444 /*
2445  * Trace the fact that this CPU is going offline.
2446  */
2447 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2448 {
2449 	RCU_TRACE(unsigned long mask);
2450 	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2451 	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2452 
2453 	RCU_TRACE(mask = rdp->grpmask);
2454 	trace_rcu_grace_period(rsp->name,
2455 			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2456 			       TPS("cpuofl"));
2457 }
2458 
2459 /*
2460  * All CPUs for the specified rcu_node structure have gone offline,
2461  * and all tasks that were preempted within an RCU read-side critical
2462  * section while running on one of those CPUs have since exited their RCU
2463  * read-side critical section.  Some other CPU is reporting this fact with
2464  * the specified rcu_node structure's ->lock held and interrupts disabled.
2465  * This function therefore goes up the tree of rcu_node structures,
2466  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2467  * the leaf rcu_node structure's ->qsmaskinit field has already been
2468  * updated
2469  *
2470  * This function does check that the specified rcu_node structure has
2471  * all CPUs offline and no blocked tasks, so it is OK to invoke it
2472  * prematurely.  That said, invoking it after the fact will cost you
2473  * a needless lock acquisition.  So once it has done its work, don't
2474  * invoke it again.
2475  */
2476 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2477 {
2478 	long mask;
2479 	struct rcu_node *rnp = rnp_leaf;
2480 
2481 	if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2482 		return;
2483 	for (;;) {
2484 		mask = rnp->grpmask;
2485 		rnp = rnp->parent;
2486 		if (!rnp)
2487 			break;
2488 		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2489 		smp_mb__after_unlock_lock(); /* GP memory ordering. */
2490 		rnp->qsmaskinit &= ~mask;
2491 		rnp->qsmask &= ~mask;
2492 		if (rnp->qsmaskinit) {
2493 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2494 			return;
2495 		}
2496 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2497 	}
2498 }
2499 
2500 /*
2501  * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
2502  * function.  We now remove it from the rcu_node tree's ->qsmaskinit
2503  * bit masks.
2504  */
2505 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2506 {
2507 	unsigned long flags;
2508 	unsigned long mask;
2509 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2510 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2511 
2512 	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
2513 	mask = rdp->grpmask;
2514 	raw_spin_lock_irqsave(&rnp->lock, flags);
2515 	smp_mb__after_unlock_lock();	/* Enforce GP memory-order guarantee. */
2516 	rnp->qsmaskinitnext &= ~mask;
2517 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
2518 }
2519 
2520 /*
2521  * The CPU has been completely removed, and some other CPU is reporting
2522  * this fact from process context.  Do the remainder of the cleanup,
2523  * including orphaning the outgoing CPU's RCU callbacks, and also
2524  * adopting them.  There can only be one CPU hotplug operation at a time,
2525  * so no other CPU can be attempting to update rcu_cpu_kthread_task.
2526  */
2527 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2528 {
2529 	unsigned long flags;
2530 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2531 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2532 
2533 	/* Adjust any no-longer-needed kthreads. */
2534 	rcu_boost_kthread_setaffinity(rnp, -1);
2535 
2536 	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2537 	raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2538 	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2539 	rcu_adopt_orphan_cbs(rsp, flags);
2540 	raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2541 
2542 	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2543 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2544 		  cpu, rdp->qlen, rdp->nxtlist);
2545 }
2546 
2547 #else /* #ifdef CONFIG_HOTPLUG_CPU */
2548 
2549 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2550 {
2551 }
2552 
2553 static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2554 {
2555 }
2556 
2557 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2558 {
2559 }
2560 
2561 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2562 {
2563 }
2564 
2565 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2566 
2567 /*
2568  * Invoke any RCU callbacks that have made it to the end of their grace
2569  * period.  Thottle as specified by rdp->blimit.
2570  */
2571 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2572 {
2573 	unsigned long flags;
2574 	struct rcu_head *next, *list, **tail;
2575 	long bl, count, count_lazy;
2576 	int i;
2577 
2578 	/* If no callbacks are ready, just return. */
2579 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2580 		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2581 		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
2582 				    need_resched(), is_idle_task(current),
2583 				    rcu_is_callbacks_kthread());
2584 		return;
2585 	}
2586 
2587 	/*
2588 	 * Extract the list of ready callbacks, disabling to prevent
2589 	 * races with call_rcu() from interrupt handlers.
2590 	 */
2591 	local_irq_save(flags);
2592 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2593 	bl = rdp->blimit;
2594 	trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2595 	list = rdp->nxtlist;
2596 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2597 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2598 	tail = rdp->nxttail[RCU_DONE_TAIL];
2599 	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2600 		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2601 			rdp->nxttail[i] = &rdp->nxtlist;
2602 	local_irq_restore(flags);
2603 
2604 	/* Invoke callbacks. */
2605 	count = count_lazy = 0;
2606 	while (list) {
2607 		next = list->next;
2608 		prefetch(next);
2609 		debug_rcu_head_unqueue(list);
2610 		if (__rcu_reclaim(rsp->name, list))
2611 			count_lazy++;
2612 		list = next;
2613 		/* Stop only if limit reached and CPU has something to do. */
2614 		if (++count >= bl &&
2615 		    (need_resched() ||
2616 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2617 			break;
2618 	}
2619 
2620 	local_irq_save(flags);
2621 	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2622 			    is_idle_task(current),
2623 			    rcu_is_callbacks_kthread());
2624 
2625 	/* Update count, and requeue any remaining callbacks. */
2626 	if (list != NULL) {
2627 		*tail = rdp->nxtlist;
2628 		rdp->nxtlist = list;
2629 		for (i = 0; i < RCU_NEXT_SIZE; i++)
2630 			if (&rdp->nxtlist == rdp->nxttail[i])
2631 				rdp->nxttail[i] = tail;
2632 			else
2633 				break;
2634 	}
2635 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2636 	rdp->qlen_lazy -= count_lazy;
2637 	ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
2638 	rdp->n_cbs_invoked += count;
2639 
2640 	/* Reinstate batch limit if we have worked down the excess. */
2641 	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2642 		rdp->blimit = blimit;
2643 
2644 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2645 	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2646 		rdp->qlen_last_fqs_check = 0;
2647 		rdp->n_force_qs_snap = rsp->n_force_qs;
2648 	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2649 		rdp->qlen_last_fqs_check = rdp->qlen;
2650 	WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2651 
2652 	local_irq_restore(flags);
2653 
2654 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2655 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2656 		invoke_rcu_core();
2657 }
2658 
2659 /*
2660  * Check to see if this CPU is in a non-context-switch quiescent state
2661  * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2662  * Also schedule RCU core processing.
2663  *
2664  * This function must be called from hardirq context.  It is normally
2665  * invoked from the scheduling-clock interrupt.  If rcu_pending returns
2666  * false, there is no point in invoking rcu_check_callbacks().
2667  */
2668 void rcu_check_callbacks(int user)
2669 {
2670 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2671 	increment_cpu_stall_ticks();
2672 	if (user || rcu_is_cpu_rrupt_from_idle()) {
2673 
2674 		/*
2675 		 * Get here if this CPU took its interrupt from user
2676 		 * mode or from the idle loop, and if this is not a
2677 		 * nested interrupt.  In this case, the CPU is in
2678 		 * a quiescent state, so note it.
2679 		 *
2680 		 * No memory barrier is required here because both
2681 		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2682 		 * variables that other CPUs neither access nor modify,
2683 		 * at least not while the corresponding CPU is online.
2684 		 */
2685 
2686 		rcu_sched_qs();
2687 		rcu_bh_qs();
2688 
2689 	} else if (!in_softirq()) {
2690 
2691 		/*
2692 		 * Get here if this CPU did not take its interrupt from
2693 		 * softirq, in other words, if it is not interrupting
2694 		 * a rcu_bh read-side critical section.  This is an _bh
2695 		 * critical section, so note it.
2696 		 */
2697 
2698 		rcu_bh_qs();
2699 	}
2700 	rcu_preempt_check_callbacks();
2701 	if (rcu_pending())
2702 		invoke_rcu_core();
2703 	if (user)
2704 		rcu_note_voluntary_context_switch(current);
2705 	trace_rcu_utilization(TPS("End scheduler-tick"));
2706 }
2707 
2708 /*
2709  * Scan the leaf rcu_node structures, processing dyntick state for any that
2710  * have not yet encountered a quiescent state, using the function specified.
2711  * Also initiate boosting for any threads blocked on the root rcu_node.
2712  *
2713  * The caller must have suppressed start of new grace periods.
2714  */
2715 static void force_qs_rnp(struct rcu_state *rsp,
2716 			 int (*f)(struct rcu_data *rsp, bool *isidle,
2717 				  unsigned long *maxj),
2718 			 bool *isidle, unsigned long *maxj)
2719 {
2720 	unsigned long bit;
2721 	int cpu;
2722 	unsigned long flags;
2723 	unsigned long mask;
2724 	struct rcu_node *rnp;
2725 
2726 	rcu_for_each_leaf_node(rsp, rnp) {
2727 		cond_resched_rcu_qs();
2728 		mask = 0;
2729 		raw_spin_lock_irqsave(&rnp->lock, flags);
2730 		smp_mb__after_unlock_lock();
2731 		if (!rcu_gp_in_progress(rsp)) {
2732 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2733 			return;
2734 		}
2735 		if (rnp->qsmask == 0) {
2736 			if (rcu_state_p == &rcu_sched_state ||
2737 			    rsp != rcu_state_p ||
2738 			    rcu_preempt_blocked_readers_cgp(rnp)) {
2739 				/*
2740 				 * No point in scanning bits because they
2741 				 * are all zero.  But we might need to
2742 				 * priority-boost blocked readers.
2743 				 */
2744 				rcu_initiate_boost(rnp, flags);
2745 				/* rcu_initiate_boost() releases rnp->lock */
2746 				continue;
2747 			}
2748 			if (rnp->parent &&
2749 			    (rnp->parent->qsmask & rnp->grpmask)) {
2750 				/*
2751 				 * Race between grace-period
2752 				 * initialization and task exiting RCU
2753 				 * read-side critical section: Report.
2754 				 */
2755 				rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2756 				/* rcu_report_unblock_qs_rnp() rlses ->lock */
2757 				continue;
2758 			}
2759 		}
2760 		cpu = rnp->grplo;
2761 		bit = 1;
2762 		for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2763 			if ((rnp->qsmask & bit) != 0) {
2764 				if ((rnp->qsmaskinit & bit) == 0)
2765 					*isidle = false; /* Pending hotplug. */
2766 				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2767 					mask |= bit;
2768 			}
2769 		}
2770 		if (mask != 0) {
2771 			/* Idle/offline CPUs, report (releases rnp->lock. */
2772 			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2773 		} else {
2774 			/* Nothing to do here, so just drop the lock. */
2775 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2776 		}
2777 	}
2778 }
2779 
2780 /*
2781  * Force quiescent states on reluctant CPUs, and also detect which
2782  * CPUs are in dyntick-idle mode.
2783  */
2784 static void force_quiescent_state(struct rcu_state *rsp)
2785 {
2786 	unsigned long flags;
2787 	bool ret;
2788 	struct rcu_node *rnp;
2789 	struct rcu_node *rnp_old = NULL;
2790 
2791 	/* Funnel through hierarchy to reduce memory contention. */
2792 	rnp = __this_cpu_read(rsp->rda->mynode);
2793 	for (; rnp != NULL; rnp = rnp->parent) {
2794 		ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2795 		      !raw_spin_trylock(&rnp->fqslock);
2796 		if (rnp_old != NULL)
2797 			raw_spin_unlock(&rnp_old->fqslock);
2798 		if (ret) {
2799 			rsp->n_force_qs_lh++;
2800 			return;
2801 		}
2802 		rnp_old = rnp;
2803 	}
2804 	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2805 
2806 	/* Reached the root of the rcu_node tree, acquire lock. */
2807 	raw_spin_lock_irqsave(&rnp_old->lock, flags);
2808 	smp_mb__after_unlock_lock();
2809 	raw_spin_unlock(&rnp_old->fqslock);
2810 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2811 		rsp->n_force_qs_lh++;
2812 		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2813 		return;  /* Someone beat us to it. */
2814 	}
2815 	ACCESS_ONCE(rsp->gp_flags) =
2816 		ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
2817 	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2818 	rcu_gp_kthread_wake(rsp);
2819 }
2820 
2821 /*
2822  * This does the RCU core processing work for the specified rcu_state
2823  * and rcu_data structures.  This may be called only from the CPU to
2824  * whom the rdp belongs.
2825  */
2826 static void
2827 __rcu_process_callbacks(struct rcu_state *rsp)
2828 {
2829 	unsigned long flags;
2830 	bool needwake;
2831 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2832 
2833 	WARN_ON_ONCE(rdp->beenonline == 0);
2834 
2835 	/* Update RCU state based on any recent quiescent states. */
2836 	rcu_check_quiescent_state(rsp, rdp);
2837 
2838 	/* Does this CPU require a not-yet-started grace period? */
2839 	local_irq_save(flags);
2840 	if (cpu_needs_another_gp(rsp, rdp)) {
2841 		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2842 		needwake = rcu_start_gp(rsp);
2843 		raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2844 		if (needwake)
2845 			rcu_gp_kthread_wake(rsp);
2846 	} else {
2847 		local_irq_restore(flags);
2848 	}
2849 
2850 	/* If there are callbacks ready, invoke them. */
2851 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2852 		invoke_rcu_callbacks(rsp, rdp);
2853 
2854 	/* Do any needed deferred wakeups of rcuo kthreads. */
2855 	do_nocb_deferred_wakeup(rdp);
2856 }
2857 
2858 /*
2859  * Do RCU core processing for the current CPU.
2860  */
2861 static void rcu_process_callbacks(struct softirq_action *unused)
2862 {
2863 	struct rcu_state *rsp;
2864 
2865 	if (cpu_is_offline(smp_processor_id()))
2866 		return;
2867 	trace_rcu_utilization(TPS("Start RCU core"));
2868 	for_each_rcu_flavor(rsp)
2869 		__rcu_process_callbacks(rsp);
2870 	trace_rcu_utilization(TPS("End RCU core"));
2871 }
2872 
2873 /*
2874  * Schedule RCU callback invocation.  If the specified type of RCU
2875  * does not support RCU priority boosting, just do a direct call,
2876  * otherwise wake up the per-CPU kernel kthread.  Note that because we
2877  * are running on the current CPU with softirqs disabled, the
2878  * rcu_cpu_kthread_task cannot disappear out from under us.
2879  */
2880 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2881 {
2882 	if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2883 		return;
2884 	if (likely(!rsp->boost)) {
2885 		rcu_do_batch(rsp, rdp);
2886 		return;
2887 	}
2888 	invoke_rcu_callbacks_kthread();
2889 }
2890 
2891 static void invoke_rcu_core(void)
2892 {
2893 	if (cpu_online(smp_processor_id()))
2894 		raise_softirq(RCU_SOFTIRQ);
2895 }
2896 
2897 /*
2898  * Handle any core-RCU processing required by a call_rcu() invocation.
2899  */
2900 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2901 			    struct rcu_head *head, unsigned long flags)
2902 {
2903 	bool needwake;
2904 
2905 	/*
2906 	 * If called from an extended quiescent state, invoke the RCU
2907 	 * core in order to force a re-evaluation of RCU's idleness.
2908 	 */
2909 	if (!rcu_is_watching())
2910 		invoke_rcu_core();
2911 
2912 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2913 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2914 		return;
2915 
2916 	/*
2917 	 * Force the grace period if too many callbacks or too long waiting.
2918 	 * Enforce hysteresis, and don't invoke force_quiescent_state()
2919 	 * if some other CPU has recently done so.  Also, don't bother
2920 	 * invoking force_quiescent_state() if the newly enqueued callback
2921 	 * is the only one waiting for a grace period to complete.
2922 	 */
2923 	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2924 
2925 		/* Are we ignoring a completed grace period? */
2926 		note_gp_changes(rsp, rdp);
2927 
2928 		/* Start a new grace period if one not already started. */
2929 		if (!rcu_gp_in_progress(rsp)) {
2930 			struct rcu_node *rnp_root = rcu_get_root(rsp);
2931 
2932 			raw_spin_lock(&rnp_root->lock);
2933 			smp_mb__after_unlock_lock();
2934 			needwake = rcu_start_gp(rsp);
2935 			raw_spin_unlock(&rnp_root->lock);
2936 			if (needwake)
2937 				rcu_gp_kthread_wake(rsp);
2938 		} else {
2939 			/* Give the grace period a kick. */
2940 			rdp->blimit = LONG_MAX;
2941 			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2942 			    *rdp->nxttail[RCU_DONE_TAIL] != head)
2943 				force_quiescent_state(rsp);
2944 			rdp->n_force_qs_snap = rsp->n_force_qs;
2945 			rdp->qlen_last_fqs_check = rdp->qlen;
2946 		}
2947 	}
2948 }
2949 
2950 /*
2951  * RCU callback function to leak a callback.
2952  */
2953 static void rcu_leak_callback(struct rcu_head *rhp)
2954 {
2955 }
2956 
2957 /*
2958  * Helper function for call_rcu() and friends.  The cpu argument will
2959  * normally be -1, indicating "currently running CPU".  It may specify
2960  * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2961  * is expected to specify a CPU.
2962  */
2963 static void
2964 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2965 	   struct rcu_state *rsp, int cpu, bool lazy)
2966 {
2967 	unsigned long flags;
2968 	struct rcu_data *rdp;
2969 
2970 	WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
2971 	if (debug_rcu_head_queue(head)) {
2972 		/* Probable double call_rcu(), so leak the callback. */
2973 		ACCESS_ONCE(head->func) = rcu_leak_callback;
2974 		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2975 		return;
2976 	}
2977 	head->func = func;
2978 	head->next = NULL;
2979 
2980 	/*
2981 	 * Opportunistically note grace-period endings and beginnings.
2982 	 * Note that we might see a beginning right after we see an
2983 	 * end, but never vice versa, since this CPU has to pass through
2984 	 * a quiescent state betweentimes.
2985 	 */
2986 	local_irq_save(flags);
2987 	rdp = this_cpu_ptr(rsp->rda);
2988 
2989 	/* Add the callback to our list. */
2990 	if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2991 		int offline;
2992 
2993 		if (cpu != -1)
2994 			rdp = per_cpu_ptr(rsp->rda, cpu);
2995 		if (likely(rdp->mynode)) {
2996 			/* Post-boot, so this should be for a no-CBs CPU. */
2997 			offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2998 			WARN_ON_ONCE(offline);
2999 			/* Offline CPU, _call_rcu() illegal, leak callback.  */
3000 			local_irq_restore(flags);
3001 			return;
3002 		}
3003 		/*
3004 		 * Very early boot, before rcu_init().  Initialize if needed
3005 		 * and then drop through to queue the callback.
3006 		 */
3007 		BUG_ON(cpu != -1);
3008 		WARN_ON_ONCE(!rcu_is_watching());
3009 		if (!likely(rdp->nxtlist))
3010 			init_default_callback_list(rdp);
3011 	}
3012 	ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
3013 	if (lazy)
3014 		rdp->qlen_lazy++;
3015 	else
3016 		rcu_idle_count_callbacks_posted();
3017 	smp_mb();  /* Count before adding callback for rcu_barrier(). */
3018 	*rdp->nxttail[RCU_NEXT_TAIL] = head;
3019 	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
3020 
3021 	if (__is_kfree_rcu_offset((unsigned long)func))
3022 		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
3023 					 rdp->qlen_lazy, rdp->qlen);
3024 	else
3025 		trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
3026 
3027 	/* Go handle any RCU core processing required. */
3028 	__call_rcu_core(rsp, rdp, head, flags);
3029 	local_irq_restore(flags);
3030 }
3031 
3032 /*
3033  * Queue an RCU-sched callback for invocation after a grace period.
3034  */
3035 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
3036 {
3037 	__call_rcu(head, func, &rcu_sched_state, -1, 0);
3038 }
3039 EXPORT_SYMBOL_GPL(call_rcu_sched);
3040 
3041 /*
3042  * Queue an RCU callback for invocation after a quicker grace period.
3043  */
3044 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
3045 {
3046 	__call_rcu(head, func, &rcu_bh_state, -1, 0);
3047 }
3048 EXPORT_SYMBOL_GPL(call_rcu_bh);
3049 
3050 /*
3051  * Queue an RCU callback for lazy invocation after a grace period.
3052  * This will likely be later named something like "call_rcu_lazy()",
3053  * but this change will require some way of tagging the lazy RCU
3054  * callbacks in the list of pending callbacks. Until then, this
3055  * function may only be called from __kfree_rcu().
3056  */
3057 void kfree_call_rcu(struct rcu_head *head,
3058 		    void (*func)(struct rcu_head *rcu))
3059 {
3060 	__call_rcu(head, func, rcu_state_p, -1, 1);
3061 }
3062 EXPORT_SYMBOL_GPL(kfree_call_rcu);
3063 
3064 /*
3065  * Because a context switch is a grace period for RCU-sched and RCU-bh,
3066  * any blocking grace-period wait automatically implies a grace period
3067  * if there is only one CPU online at any point time during execution
3068  * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
3069  * occasionally incorrectly indicate that there are multiple CPUs online
3070  * when there was in fact only one the whole time, as this just adds
3071  * some overhead: RCU still operates correctly.
3072  */
3073 static inline int rcu_blocking_is_gp(void)
3074 {
3075 	int ret;
3076 
3077 	might_sleep();  /* Check for RCU read-side critical section. */
3078 	preempt_disable();
3079 	ret = num_online_cpus() <= 1;
3080 	preempt_enable();
3081 	return ret;
3082 }
3083 
3084 /**
3085  * synchronize_sched - wait until an rcu-sched grace period has elapsed.
3086  *
3087  * Control will return to the caller some time after a full rcu-sched
3088  * grace period has elapsed, in other words after all currently executing
3089  * rcu-sched read-side critical sections have completed.   These read-side
3090  * critical sections are delimited by rcu_read_lock_sched() and
3091  * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
3092  * local_irq_disable(), and so on may be used in place of
3093  * rcu_read_lock_sched().
3094  *
3095  * This means that all preempt_disable code sequences, including NMI and
3096  * non-threaded hardware-interrupt handlers, in progress on entry will
3097  * have completed before this primitive returns.  However, this does not
3098  * guarantee that softirq handlers will have completed, since in some
3099  * kernels, these handlers can run in process context, and can block.
3100  *
3101  * Note that this guarantee implies further memory-ordering guarantees.
3102  * On systems with more than one CPU, when synchronize_sched() returns,
3103  * each CPU is guaranteed to have executed a full memory barrier since the
3104  * end of its last RCU-sched read-side critical section whose beginning
3105  * preceded the call to synchronize_sched().  In addition, each CPU having
3106  * an RCU read-side critical section that extends beyond the return from
3107  * synchronize_sched() is guaranteed to have executed a full memory barrier
3108  * after the beginning of synchronize_sched() and before the beginning of
3109  * that RCU read-side critical section.  Note that these guarantees include
3110  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3111  * that are executing in the kernel.
3112  *
3113  * Furthermore, if CPU A invoked synchronize_sched(), which returned
3114  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3115  * to have executed a full memory barrier during the execution of
3116  * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
3117  * again only if the system has more than one CPU).
3118  *
3119  * This primitive provides the guarantees made by the (now removed)
3120  * synchronize_kernel() API.  In contrast, synchronize_rcu() only
3121  * guarantees that rcu_read_lock() sections will have completed.
3122  * In "classic RCU", these two guarantees happen to be one and
3123  * the same, but can differ in realtime RCU implementations.
3124  */
3125 void synchronize_sched(void)
3126 {
3127 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
3128 			   !lock_is_held(&rcu_lock_map) &&
3129 			   !lock_is_held(&rcu_sched_lock_map),
3130 			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
3131 	if (rcu_blocking_is_gp())
3132 		return;
3133 	if (rcu_gp_is_expedited())
3134 		synchronize_sched_expedited();
3135 	else
3136 		wait_rcu_gp(call_rcu_sched);
3137 }
3138 EXPORT_SYMBOL_GPL(synchronize_sched);
3139 
3140 /**
3141  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
3142  *
3143  * Control will return to the caller some time after a full rcu_bh grace
3144  * period has elapsed, in other words after all currently executing rcu_bh
3145  * read-side critical sections have completed.  RCU read-side critical
3146  * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
3147  * and may be nested.
3148  *
3149  * See the description of synchronize_sched() for more detailed information
3150  * on memory ordering guarantees.
3151  */
3152 void synchronize_rcu_bh(void)
3153 {
3154 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
3155 			   !lock_is_held(&rcu_lock_map) &&
3156 			   !lock_is_held(&rcu_sched_lock_map),
3157 			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3158 	if (rcu_blocking_is_gp())
3159 		return;
3160 	if (rcu_gp_is_expedited())
3161 		synchronize_rcu_bh_expedited();
3162 	else
3163 		wait_rcu_gp(call_rcu_bh);
3164 }
3165 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3166 
3167 /**
3168  * get_state_synchronize_rcu - Snapshot current RCU state
3169  *
3170  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3171  * to determine whether or not a full grace period has elapsed in the
3172  * meantime.
3173  */
3174 unsigned long get_state_synchronize_rcu(void)
3175 {
3176 	/*
3177 	 * Any prior manipulation of RCU-protected data must happen
3178 	 * before the load from ->gpnum.
3179 	 */
3180 	smp_mb();  /* ^^^ */
3181 
3182 	/*
3183 	 * Make sure this load happens before the purportedly
3184 	 * time-consuming work between get_state_synchronize_rcu()
3185 	 * and cond_synchronize_rcu().
3186 	 */
3187 	return smp_load_acquire(&rcu_state_p->gpnum);
3188 }
3189 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3190 
3191 /**
3192  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3193  *
3194  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3195  *
3196  * If a full RCU grace period has elapsed since the earlier call to
3197  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3198  * synchronize_rcu() to wait for a full grace period.
3199  *
3200  * Yes, this function does not take counter wrap into account.  But
3201  * counter wrap is harmless.  If the counter wraps, we have waited for
3202  * more than 2 billion grace periods (and way more on a 64-bit system!),
3203  * so waiting for one additional grace period should be just fine.
3204  */
3205 void cond_synchronize_rcu(unsigned long oldstate)
3206 {
3207 	unsigned long newstate;
3208 
3209 	/*
3210 	 * Ensure that this load happens before any RCU-destructive
3211 	 * actions the caller might carry out after we return.
3212 	 */
3213 	newstate = smp_load_acquire(&rcu_state_p->completed);
3214 	if (ULONG_CMP_GE(oldstate, newstate))
3215 		synchronize_rcu();
3216 }
3217 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3218 
3219 static int synchronize_sched_expedited_cpu_stop(void *data)
3220 {
3221 	/*
3222 	 * There must be a full memory barrier on each affected CPU
3223 	 * between the time that try_stop_cpus() is called and the
3224 	 * time that it returns.
3225 	 *
3226 	 * In the current initial implementation of cpu_stop, the
3227 	 * above condition is already met when the control reaches
3228 	 * this point and the following smp_mb() is not strictly
3229 	 * necessary.  Do smp_mb() anyway for documentation and
3230 	 * robustness against future implementation changes.
3231 	 */
3232 	smp_mb(); /* See above comment block. */
3233 	return 0;
3234 }
3235 
3236 /**
3237  * synchronize_sched_expedited - Brute-force RCU-sched grace period
3238  *
3239  * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
3240  * approach to force the grace period to end quickly.  This consumes
3241  * significant time on all CPUs and is unfriendly to real-time workloads,
3242  * so is thus not recommended for any sort of common-case code.  In fact,
3243  * if you are using synchronize_sched_expedited() in a loop, please
3244  * restructure your code to batch your updates, and then use a single
3245  * synchronize_sched() instead.
3246  *
3247  * This implementation can be thought of as an application of ticket
3248  * locking to RCU, with sync_sched_expedited_started and
3249  * sync_sched_expedited_done taking on the roles of the halves
3250  * of the ticket-lock word.  Each task atomically increments
3251  * sync_sched_expedited_started upon entry, snapshotting the old value,
3252  * then attempts to stop all the CPUs.  If this succeeds, then each
3253  * CPU will have executed a context switch, resulting in an RCU-sched
3254  * grace period.  We are then done, so we use atomic_cmpxchg() to
3255  * update sync_sched_expedited_done to match our snapshot -- but
3256  * only if someone else has not already advanced past our snapshot.
3257  *
3258  * On the other hand, if try_stop_cpus() fails, we check the value
3259  * of sync_sched_expedited_done.  If it has advanced past our
3260  * initial snapshot, then someone else must have forced a grace period
3261  * some time after we took our snapshot.  In this case, our work is
3262  * done for us, and we can simply return.  Otherwise, we try again,
3263  * but keep our initial snapshot for purposes of checking for someone
3264  * doing our work for us.
3265  *
3266  * If we fail too many times in a row, we fall back to synchronize_sched().
3267  */
3268 void synchronize_sched_expedited(void)
3269 {
3270 	cpumask_var_t cm;
3271 	bool cma = false;
3272 	int cpu;
3273 	long firstsnap, s, snap;
3274 	int trycount = 0;
3275 	struct rcu_state *rsp = &rcu_sched_state;
3276 
3277 	/*
3278 	 * If we are in danger of counter wrap, just do synchronize_sched().
3279 	 * By allowing sync_sched_expedited_started to advance no more than
3280 	 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
3281 	 * that more than 3.5 billion CPUs would be required to force a
3282 	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
3283 	 * course be required on a 64-bit system.
3284 	 */
3285 	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
3286 			 (ulong)atomic_long_read(&rsp->expedited_done) +
3287 			 ULONG_MAX / 8)) {
3288 		synchronize_sched();
3289 		atomic_long_inc(&rsp->expedited_wrap);
3290 		return;
3291 	}
3292 
3293 	/*
3294 	 * Take a ticket.  Note that atomic_inc_return() implies a
3295 	 * full memory barrier.
3296 	 */
3297 	snap = atomic_long_inc_return(&rsp->expedited_start);
3298 	firstsnap = snap;
3299 	if (!try_get_online_cpus()) {
3300 		/* CPU hotplug operation in flight, fall back to normal GP. */
3301 		wait_rcu_gp(call_rcu_sched);
3302 		atomic_long_inc(&rsp->expedited_normal);
3303 		return;
3304 	}
3305 	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
3306 
3307 	/* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
3308 	cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
3309 	if (cma) {
3310 		cpumask_copy(cm, cpu_online_mask);
3311 		cpumask_clear_cpu(raw_smp_processor_id(), cm);
3312 		for_each_cpu(cpu, cm) {
3313 			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
3314 
3315 			if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3316 				cpumask_clear_cpu(cpu, cm);
3317 		}
3318 		if (cpumask_weight(cm) == 0)
3319 			goto all_cpus_idle;
3320 	}
3321 
3322 	/*
3323 	 * Each pass through the following loop attempts to force a
3324 	 * context switch on each CPU.
3325 	 */
3326 	while (try_stop_cpus(cma ? cm : cpu_online_mask,
3327 			     synchronize_sched_expedited_cpu_stop,
3328 			     NULL) == -EAGAIN) {
3329 		put_online_cpus();
3330 		atomic_long_inc(&rsp->expedited_tryfail);
3331 
3332 		/* Check to see if someone else did our work for us. */
3333 		s = atomic_long_read(&rsp->expedited_done);
3334 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3335 			/* ensure test happens before caller kfree */
3336 			smp_mb__before_atomic(); /* ^^^ */
3337 			atomic_long_inc(&rsp->expedited_workdone1);
3338 			free_cpumask_var(cm);
3339 			return;
3340 		}
3341 
3342 		/* No joy, try again later.  Or just synchronize_sched(). */
3343 		if (trycount++ < 10) {
3344 			udelay(trycount * num_online_cpus());
3345 		} else {
3346 			wait_rcu_gp(call_rcu_sched);
3347 			atomic_long_inc(&rsp->expedited_normal);
3348 			free_cpumask_var(cm);
3349 			return;
3350 		}
3351 
3352 		/* Recheck to see if someone else did our work for us. */
3353 		s = atomic_long_read(&rsp->expedited_done);
3354 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3355 			/* ensure test happens before caller kfree */
3356 			smp_mb__before_atomic(); /* ^^^ */
3357 			atomic_long_inc(&rsp->expedited_workdone2);
3358 			free_cpumask_var(cm);
3359 			return;
3360 		}
3361 
3362 		/*
3363 		 * Refetching sync_sched_expedited_started allows later
3364 		 * callers to piggyback on our grace period.  We retry
3365 		 * after they started, so our grace period works for them,
3366 		 * and they started after our first try, so their grace
3367 		 * period works for us.
3368 		 */
3369 		if (!try_get_online_cpus()) {
3370 			/* CPU hotplug operation in flight, use normal GP. */
3371 			wait_rcu_gp(call_rcu_sched);
3372 			atomic_long_inc(&rsp->expedited_normal);
3373 			free_cpumask_var(cm);
3374 			return;
3375 		}
3376 		snap = atomic_long_read(&rsp->expedited_start);
3377 		smp_mb(); /* ensure read is before try_stop_cpus(). */
3378 	}
3379 	atomic_long_inc(&rsp->expedited_stoppedcpus);
3380 
3381 all_cpus_idle:
3382 	free_cpumask_var(cm);
3383 
3384 	/*
3385 	 * Everyone up to our most recent fetch is covered by our grace
3386 	 * period.  Update the counter, but only if our work is still
3387 	 * relevant -- which it won't be if someone who started later
3388 	 * than we did already did their update.
3389 	 */
3390 	do {
3391 		atomic_long_inc(&rsp->expedited_done_tries);
3392 		s = atomic_long_read(&rsp->expedited_done);
3393 		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
3394 			/* ensure test happens before caller kfree */
3395 			smp_mb__before_atomic(); /* ^^^ */
3396 			atomic_long_inc(&rsp->expedited_done_lost);
3397 			break;
3398 		}
3399 	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
3400 	atomic_long_inc(&rsp->expedited_done_exit);
3401 
3402 	put_online_cpus();
3403 }
3404 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3405 
3406 /*
3407  * Check to see if there is any immediate RCU-related work to be done
3408  * by the current CPU, for the specified type of RCU, returning 1 if so.
3409  * The checks are in order of increasing expense: checks that can be
3410  * carried out against CPU-local state are performed first.  However,
3411  * we must check for CPU stalls first, else we might not get a chance.
3412  */
3413 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3414 {
3415 	struct rcu_node *rnp = rdp->mynode;
3416 
3417 	rdp->n_rcu_pending++;
3418 
3419 	/* Check for CPU stalls, if enabled. */
3420 	check_cpu_stall(rsp, rdp);
3421 
3422 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3423 	if (rcu_nohz_full_cpu(rsp))
3424 		return 0;
3425 
3426 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3427 	if (rcu_scheduler_fully_active &&
3428 	    rdp->qs_pending && !rdp->passed_quiesce &&
3429 	    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3430 		rdp->n_rp_qs_pending++;
3431 	} else if (rdp->qs_pending &&
3432 		   (rdp->passed_quiesce ||
3433 		    rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3434 		rdp->n_rp_report_qs++;
3435 		return 1;
3436 	}
3437 
3438 	/* Does this CPU have callbacks ready to invoke? */
3439 	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
3440 		rdp->n_rp_cb_ready++;
3441 		return 1;
3442 	}
3443 
3444 	/* Has RCU gone idle with this CPU needing another grace period? */
3445 	if (cpu_needs_another_gp(rsp, rdp)) {
3446 		rdp->n_rp_cpu_needs_gp++;
3447 		return 1;
3448 	}
3449 
3450 	/* Has another RCU grace period completed?  */
3451 	if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
3452 		rdp->n_rp_gp_completed++;
3453 		return 1;
3454 	}
3455 
3456 	/* Has a new RCU grace period started? */
3457 	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
3458 	    unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
3459 		rdp->n_rp_gp_started++;
3460 		return 1;
3461 	}
3462 
3463 	/* Does this CPU need a deferred NOCB wakeup? */
3464 	if (rcu_nocb_need_deferred_wakeup(rdp)) {
3465 		rdp->n_rp_nocb_defer_wakeup++;
3466 		return 1;
3467 	}
3468 
3469 	/* nothing to do */
3470 	rdp->n_rp_need_nothing++;
3471 	return 0;
3472 }
3473 
3474 /*
3475  * Check to see if there is any immediate RCU-related work to be done
3476  * by the current CPU, returning 1 if so.  This function is part of the
3477  * RCU implementation; it is -not- an exported member of the RCU API.
3478  */
3479 static int rcu_pending(void)
3480 {
3481 	struct rcu_state *rsp;
3482 
3483 	for_each_rcu_flavor(rsp)
3484 		if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3485 			return 1;
3486 	return 0;
3487 }
3488 
3489 /*
3490  * Return true if the specified CPU has any callback.  If all_lazy is
3491  * non-NULL, store an indication of whether all callbacks are lazy.
3492  * (If there are no callbacks, all of them are deemed to be lazy.)
3493  */
3494 static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3495 {
3496 	bool al = true;
3497 	bool hc = false;
3498 	struct rcu_data *rdp;
3499 	struct rcu_state *rsp;
3500 
3501 	for_each_rcu_flavor(rsp) {
3502 		rdp = this_cpu_ptr(rsp->rda);
3503 		if (!rdp->nxtlist)
3504 			continue;
3505 		hc = true;
3506 		if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
3507 			al = false;
3508 			break;
3509 		}
3510 	}
3511 	if (all_lazy)
3512 		*all_lazy = al;
3513 	return hc;
3514 }
3515 
3516 /*
3517  * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
3518  * the compiler is expected to optimize this away.
3519  */
3520 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3521 			       int cpu, unsigned long done)
3522 {
3523 	trace_rcu_barrier(rsp->name, s, cpu,
3524 			  atomic_read(&rsp->barrier_cpu_count), done);
3525 }
3526 
3527 /*
3528  * RCU callback function for _rcu_barrier().  If we are last, wake
3529  * up the task executing _rcu_barrier().
3530  */
3531 static void rcu_barrier_callback(struct rcu_head *rhp)
3532 {
3533 	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3534 	struct rcu_state *rsp = rdp->rsp;
3535 
3536 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3537 		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
3538 		complete(&rsp->barrier_completion);
3539 	} else {
3540 		_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
3541 	}
3542 }
3543 
3544 /*
3545  * Called with preemption disabled, and from cross-cpu IRQ context.
3546  */
3547 static void rcu_barrier_func(void *type)
3548 {
3549 	struct rcu_state *rsp = type;
3550 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3551 
3552 	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
3553 	atomic_inc(&rsp->barrier_cpu_count);
3554 	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
3555 }
3556 
3557 /*
3558  * Orchestrate the specified type of RCU barrier, waiting for all
3559  * RCU callbacks of the specified type to complete.
3560  */
3561 static void _rcu_barrier(struct rcu_state *rsp)
3562 {
3563 	int cpu;
3564 	struct rcu_data *rdp;
3565 	unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
3566 	unsigned long snap_done;
3567 
3568 	_rcu_barrier_trace(rsp, "Begin", -1, snap);
3569 
3570 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3571 	mutex_lock(&rsp->barrier_mutex);
3572 
3573 	/*
3574 	 * Ensure that all prior references, including to ->n_barrier_done,
3575 	 * are ordered before the _rcu_barrier() machinery.
3576 	 */
3577 	smp_mb();  /* See above block comment. */
3578 
3579 	/*
3580 	 * Recheck ->n_barrier_done to see if others did our work for us.
3581 	 * This means checking ->n_barrier_done for an even-to-odd-to-even
3582 	 * transition.  The "if" expression below therefore rounds the old
3583 	 * value up to the next even number and adds two before comparing.
3584 	 */
3585 	snap_done = rsp->n_barrier_done;
3586 	_rcu_barrier_trace(rsp, "Check", -1, snap_done);
3587 
3588 	/*
3589 	 * If the value in snap is odd, we needed to wait for the current
3590 	 * rcu_barrier() to complete, then wait for the next one, in other
3591 	 * words, we need the value of snap_done to be three larger than
3592 	 * the value of snap.  On the other hand, if the value in snap is
3593 	 * even, we only had to wait for the next rcu_barrier() to complete,
3594 	 * in other words, we need the value of snap_done to be only two
3595 	 * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
3596 	 * this for us (thank you, Linus!).
3597 	 */
3598 	if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3599 		_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3600 		smp_mb(); /* caller's subsequent code after above check. */
3601 		mutex_unlock(&rsp->barrier_mutex);
3602 		return;
3603 	}
3604 
3605 	/*
3606 	 * Increment ->n_barrier_done to avoid duplicate work.  Use
3607 	 * ACCESS_ONCE() to prevent the compiler from speculating
3608 	 * the increment to precede the early-exit check.
3609 	 */
3610 	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3611 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3612 	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3613 	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
3614 
3615 	/*
3616 	 * Initialize the count to one rather than to zero in order to
3617 	 * avoid a too-soon return to zero in case of a short grace period
3618 	 * (or preemption of this task).  Exclude CPU-hotplug operations
3619 	 * to ensure that no offline CPU has callbacks queued.
3620 	 */
3621 	init_completion(&rsp->barrier_completion);
3622 	atomic_set(&rsp->barrier_cpu_count, 1);
3623 	get_online_cpus();
3624 
3625 	/*
3626 	 * Force each CPU with callbacks to register a new callback.
3627 	 * When that callback is invoked, we will know that all of the
3628 	 * corresponding CPU's preceding callbacks have been invoked.
3629 	 */
3630 	for_each_possible_cpu(cpu) {
3631 		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3632 			continue;
3633 		rdp = per_cpu_ptr(rsp->rda, cpu);
3634 		if (rcu_is_nocb_cpu(cpu)) {
3635 			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3636 				_rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3637 						   rsp->n_barrier_done);
3638 			} else {
3639 				_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3640 						   rsp->n_barrier_done);
3641 				smp_mb__before_atomic();
3642 				atomic_inc(&rsp->barrier_cpu_count);
3643 				__call_rcu(&rdp->barrier_head,
3644 					   rcu_barrier_callback, rsp, cpu, 0);
3645 			}
3646 		} else if (ACCESS_ONCE(rdp->qlen)) {
3647 			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
3648 					   rsp->n_barrier_done);
3649 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3650 		} else {
3651 			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3652 					   rsp->n_barrier_done);
3653 		}
3654 	}
3655 	put_online_cpus();
3656 
3657 	/*
3658 	 * Now that we have an rcu_barrier_callback() callback on each
3659 	 * CPU, and thus each counted, remove the initial count.
3660 	 */
3661 	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3662 		complete(&rsp->barrier_completion);
3663 
3664 	/* Increment ->n_barrier_done to prevent duplicate work. */
3665 	smp_mb(); /* Keep increment after above mechanism. */
3666 	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3667 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3668 	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3669 	smp_mb(); /* Keep increment before caller's subsequent code. */
3670 
3671 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3672 	wait_for_completion(&rsp->barrier_completion);
3673 
3674 	/* Other rcu_barrier() invocations can now safely proceed. */
3675 	mutex_unlock(&rsp->barrier_mutex);
3676 }
3677 
3678 /**
3679  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3680  */
3681 void rcu_barrier_bh(void)
3682 {
3683 	_rcu_barrier(&rcu_bh_state);
3684 }
3685 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3686 
3687 /**
3688  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3689  */
3690 void rcu_barrier_sched(void)
3691 {
3692 	_rcu_barrier(&rcu_sched_state);
3693 }
3694 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3695 
3696 /*
3697  * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3698  * first CPU in a given leaf rcu_node structure coming online.  The caller
3699  * must hold the corresponding leaf rcu_node ->lock with interrrupts
3700  * disabled.
3701  */
3702 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3703 {
3704 	long mask;
3705 	struct rcu_node *rnp = rnp_leaf;
3706 
3707 	for (;;) {
3708 		mask = rnp->grpmask;
3709 		rnp = rnp->parent;
3710 		if (rnp == NULL)
3711 			return;
3712 		raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
3713 		rnp->qsmaskinit |= mask;
3714 		raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
3715 	}
3716 }
3717 
3718 /*
3719  * Do boot-time initialization of a CPU's per-CPU RCU data.
3720  */
3721 static void __init
3722 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3723 {
3724 	unsigned long flags;
3725 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3726 	struct rcu_node *rnp = rcu_get_root(rsp);
3727 
3728 	/* Set up local state, ensuring consistent view of global state. */
3729 	raw_spin_lock_irqsave(&rnp->lock, flags);
3730 	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3731 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3732 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3733 	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
3734 	rdp->cpu = cpu;
3735 	rdp->rsp = rsp;
3736 	rcu_boot_init_nocb_percpu_data(rdp);
3737 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
3738 }
3739 
3740 /*
3741  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
3742  * offline event can be happening at a given time.  Note also that we
3743  * can accept some slop in the rsp->completed access due to the fact
3744  * that this CPU cannot possibly have any RCU callbacks in flight yet.
3745  */
3746 static void
3747 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3748 {
3749 	unsigned long flags;
3750 	unsigned long mask;
3751 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3752 	struct rcu_node *rnp = rcu_get_root(rsp);
3753 
3754 	/* Set up local state, ensuring consistent view of global state. */
3755 	raw_spin_lock_irqsave(&rnp->lock, flags);
3756 	rdp->beenonline = 1;	 /* We have now been online. */
3757 	rdp->qlen_last_fqs_check = 0;
3758 	rdp->n_force_qs_snap = rsp->n_force_qs;
3759 	rdp->blimit = blimit;
3760 	if (!rdp->nxtlist)
3761 		init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
3762 	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3763 	rcu_sysidle_init_percpu_data(rdp->dynticks);
3764 	atomic_set(&rdp->dynticks->dynticks,
3765 		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3766 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
3767 
3768 	/*
3769 	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
3770 	 * propagation up the rcu_node tree will happen at the beginning
3771 	 * of the next grace period.
3772 	 */
3773 	rnp = rdp->mynode;
3774 	mask = rdp->grpmask;
3775 	raw_spin_lock(&rnp->lock);		/* irqs already disabled. */
3776 	smp_mb__after_unlock_lock();
3777 	rnp->qsmaskinitnext |= mask;
3778 	rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
3779 	rdp->completed = rnp->completed;
3780 	rdp->passed_quiesce = false;
3781 	rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3782 	rdp->qs_pending = false;
3783 	trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3784 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
3785 }
3786 
3787 static void rcu_prepare_cpu(int cpu)
3788 {
3789 	struct rcu_state *rsp;
3790 
3791 	for_each_rcu_flavor(rsp)
3792 		rcu_init_percpu_data(cpu, rsp);
3793 }
3794 
3795 /*
3796  * Handle CPU online/offline notification events.
3797  */
3798 int rcu_cpu_notify(struct notifier_block *self,
3799 		   unsigned long action, void *hcpu)
3800 {
3801 	long cpu = (long)hcpu;
3802 	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3803 	struct rcu_node *rnp = rdp->mynode;
3804 	struct rcu_state *rsp;
3805 
3806 	switch (action) {
3807 	case CPU_UP_PREPARE:
3808 	case CPU_UP_PREPARE_FROZEN:
3809 		rcu_prepare_cpu(cpu);
3810 		rcu_prepare_kthreads(cpu);
3811 		rcu_spawn_all_nocb_kthreads(cpu);
3812 		break;
3813 	case CPU_ONLINE:
3814 	case CPU_DOWN_FAILED:
3815 		rcu_boost_kthread_setaffinity(rnp, -1);
3816 		break;
3817 	case CPU_DOWN_PREPARE:
3818 		rcu_boost_kthread_setaffinity(rnp, cpu);
3819 		break;
3820 	case CPU_DYING:
3821 	case CPU_DYING_FROZEN:
3822 		for_each_rcu_flavor(rsp)
3823 			rcu_cleanup_dying_cpu(rsp);
3824 		break;
3825 	case CPU_DYING_IDLE:
3826 		for_each_rcu_flavor(rsp) {
3827 			rcu_cleanup_dying_idle_cpu(cpu, rsp);
3828 		}
3829 		break;
3830 	case CPU_DEAD:
3831 	case CPU_DEAD_FROZEN:
3832 	case CPU_UP_CANCELED:
3833 	case CPU_UP_CANCELED_FROZEN:
3834 		for_each_rcu_flavor(rsp) {
3835 			rcu_cleanup_dead_cpu(cpu, rsp);
3836 			do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3837 		}
3838 		break;
3839 	default:
3840 		break;
3841 	}
3842 	return NOTIFY_OK;
3843 }
3844 
3845 static int rcu_pm_notify(struct notifier_block *self,
3846 			 unsigned long action, void *hcpu)
3847 {
3848 	switch (action) {
3849 	case PM_HIBERNATION_PREPARE:
3850 	case PM_SUSPEND_PREPARE:
3851 		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3852 			rcu_expedite_gp();
3853 		break;
3854 	case PM_POST_HIBERNATION:
3855 	case PM_POST_SUSPEND:
3856 		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3857 			rcu_unexpedite_gp();
3858 		break;
3859 	default:
3860 		break;
3861 	}
3862 	return NOTIFY_OK;
3863 }
3864 
3865 /*
3866  * Spawn the kthreads that handle each RCU flavor's grace periods.
3867  */
3868 static int __init rcu_spawn_gp_kthread(void)
3869 {
3870 	unsigned long flags;
3871 	int kthread_prio_in = kthread_prio;
3872 	struct rcu_node *rnp;
3873 	struct rcu_state *rsp;
3874 	struct sched_param sp;
3875 	struct task_struct *t;
3876 
3877 	/* Force priority into range. */
3878 	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3879 		kthread_prio = 1;
3880 	else if (kthread_prio < 0)
3881 		kthread_prio = 0;
3882 	else if (kthread_prio > 99)
3883 		kthread_prio = 99;
3884 	if (kthread_prio != kthread_prio_in)
3885 		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3886 			 kthread_prio, kthread_prio_in);
3887 
3888 	rcu_scheduler_fully_active = 1;
3889 	for_each_rcu_flavor(rsp) {
3890 		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3891 		BUG_ON(IS_ERR(t));
3892 		rnp = rcu_get_root(rsp);
3893 		raw_spin_lock_irqsave(&rnp->lock, flags);
3894 		rsp->gp_kthread = t;
3895 		if (kthread_prio) {
3896 			sp.sched_priority = kthread_prio;
3897 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3898 		}
3899 		wake_up_process(t);
3900 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
3901 	}
3902 	rcu_spawn_nocb_kthreads();
3903 	rcu_spawn_boost_kthreads();
3904 	return 0;
3905 }
3906 early_initcall(rcu_spawn_gp_kthread);
3907 
3908 /*
3909  * This function is invoked towards the end of the scheduler's initialization
3910  * process.  Before this is called, the idle task might contain
3911  * RCU read-side critical sections (during which time, this idle
3912  * task is booting the system).  After this function is called, the
3913  * idle tasks are prohibited from containing RCU read-side critical
3914  * sections.  This function also enables RCU lockdep checking.
3915  */
3916 void rcu_scheduler_starting(void)
3917 {
3918 	WARN_ON(num_online_cpus() != 1);
3919 	WARN_ON(nr_context_switches() > 0);
3920 	rcu_scheduler_active = 1;
3921 }
3922 
3923 /*
3924  * Compute the per-level fanout, either using the exact fanout specified
3925  * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3926  */
3927 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3928 {
3929 	int i;
3930 
3931 	if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
3932 		rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3933 		for (i = rcu_num_lvls - 2; i >= 0; i--)
3934 			rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3935 	} else {
3936 		int ccur;
3937 		int cprv;
3938 
3939 		cprv = nr_cpu_ids;
3940 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
3941 			ccur = rsp->levelcnt[i];
3942 			rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3943 			cprv = ccur;
3944 		}
3945 	}
3946 }
3947 
3948 /*
3949  * Helper function for rcu_init() that initializes one rcu_state structure.
3950  */
3951 static void __init rcu_init_one(struct rcu_state *rsp,
3952 		struct rcu_data __percpu *rda)
3953 {
3954 	static const char * const buf[] = {
3955 		"rcu_node_0",
3956 		"rcu_node_1",
3957 		"rcu_node_2",
3958 		"rcu_node_3" };  /* Match MAX_RCU_LVLS */
3959 	static const char * const fqs[] = {
3960 		"rcu_node_fqs_0",
3961 		"rcu_node_fqs_1",
3962 		"rcu_node_fqs_2",
3963 		"rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
3964 	static u8 fl_mask = 0x1;
3965 	int cpustride = 1;
3966 	int i;
3967 	int j;
3968 	struct rcu_node *rnp;
3969 
3970 	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3971 
3972 	/* Silence gcc 4.8 warning about array index out of range. */
3973 	if (rcu_num_lvls > RCU_NUM_LVLS)
3974 		panic("rcu_init_one: rcu_num_lvls overflow");
3975 
3976 	/* Initialize the level-tracking arrays. */
3977 
3978 	for (i = 0; i < rcu_num_lvls; i++)
3979 		rsp->levelcnt[i] = num_rcu_lvl[i];
3980 	for (i = 1; i < rcu_num_lvls; i++)
3981 		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3982 	rcu_init_levelspread(rsp);
3983 	rsp->flavor_mask = fl_mask;
3984 	fl_mask <<= 1;
3985 
3986 	/* Initialize the elements themselves, starting from the leaves. */
3987 
3988 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3989 		cpustride *= rsp->levelspread[i];
3990 		rnp = rsp->level[i];
3991 		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3992 			raw_spin_lock_init(&rnp->lock);
3993 			lockdep_set_class_and_name(&rnp->lock,
3994 						   &rcu_node_class[i], buf[i]);
3995 			raw_spin_lock_init(&rnp->fqslock);
3996 			lockdep_set_class_and_name(&rnp->fqslock,
3997 						   &rcu_fqs_class[i], fqs[i]);
3998 			rnp->gpnum = rsp->gpnum;
3999 			rnp->completed = rsp->completed;
4000 			rnp->qsmask = 0;
4001 			rnp->qsmaskinit = 0;
4002 			rnp->grplo = j * cpustride;
4003 			rnp->grphi = (j + 1) * cpustride - 1;
4004 			if (rnp->grphi >= nr_cpu_ids)
4005 				rnp->grphi = nr_cpu_ids - 1;
4006 			if (i == 0) {
4007 				rnp->grpnum = 0;
4008 				rnp->grpmask = 0;
4009 				rnp->parent = NULL;
4010 			} else {
4011 				rnp->grpnum = j % rsp->levelspread[i - 1];
4012 				rnp->grpmask = 1UL << rnp->grpnum;
4013 				rnp->parent = rsp->level[i - 1] +
4014 					      j / rsp->levelspread[i - 1];
4015 			}
4016 			rnp->level = i;
4017 			INIT_LIST_HEAD(&rnp->blkd_tasks);
4018 			rcu_init_one_nocb(rnp);
4019 		}
4020 	}
4021 
4022 	init_waitqueue_head(&rsp->gp_wq);
4023 	rnp = rsp->level[rcu_num_lvls - 1];
4024 	for_each_possible_cpu(i) {
4025 		while (i > rnp->grphi)
4026 			rnp++;
4027 		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
4028 		rcu_boot_init_percpu_data(i, rsp);
4029 	}
4030 	list_add(&rsp->flavors, &rcu_struct_flavors);
4031 }
4032 
4033 /*
4034  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4035  * replace the definitions in tree.h because those are needed to size
4036  * the ->node array in the rcu_state structure.
4037  */
4038 static void __init rcu_init_geometry(void)
4039 {
4040 	ulong d;
4041 	int i;
4042 	int j;
4043 	int n = nr_cpu_ids;
4044 	int rcu_capacity[MAX_RCU_LVLS + 1];
4045 
4046 	/*
4047 	 * Initialize any unspecified boot parameters.
4048 	 * The default values of jiffies_till_first_fqs and
4049 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4050 	 * value, which is a function of HZ, then adding one for each
4051 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4052 	 */
4053 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4054 	if (jiffies_till_first_fqs == ULONG_MAX)
4055 		jiffies_till_first_fqs = d;
4056 	if (jiffies_till_next_fqs == ULONG_MAX)
4057 		jiffies_till_next_fqs = d;
4058 
4059 	/* If the compile-time values are accurate, just leave. */
4060 	if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
4061 	    nr_cpu_ids == NR_CPUS)
4062 		return;
4063 	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
4064 		rcu_fanout_leaf, nr_cpu_ids);
4065 
4066 	/*
4067 	 * Compute number of nodes that can be handled an rcu_node tree
4068 	 * with the given number of levels.  Setting rcu_capacity[0] makes
4069 	 * some of the arithmetic easier.
4070 	 */
4071 	rcu_capacity[0] = 1;
4072 	rcu_capacity[1] = rcu_fanout_leaf;
4073 	for (i = 2; i <= MAX_RCU_LVLS; i++)
4074 		rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
4075 
4076 	/*
4077 	 * The boot-time rcu_fanout_leaf parameter is only permitted
4078 	 * to increase the leaf-level fanout, not decrease it.  Of course,
4079 	 * the leaf-level fanout cannot exceed the number of bits in
4080 	 * the rcu_node masks.  Finally, the tree must be able to accommodate
4081 	 * the configured number of CPUs.  Complain and fall back to the
4082 	 * compile-time values if these limits are exceeded.
4083 	 */
4084 	if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
4085 	    rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
4086 	    n > rcu_capacity[MAX_RCU_LVLS]) {
4087 		WARN_ON(1);
4088 		return;
4089 	}
4090 
4091 	/* Calculate the number of rcu_nodes at each level of the tree. */
4092 	for (i = 1; i <= MAX_RCU_LVLS; i++)
4093 		if (n <= rcu_capacity[i]) {
4094 			for (j = 0; j <= i; j++)
4095 				num_rcu_lvl[j] =
4096 					DIV_ROUND_UP(n, rcu_capacity[i - j]);
4097 			rcu_num_lvls = i;
4098 			for (j = i + 1; j <= MAX_RCU_LVLS; j++)
4099 				num_rcu_lvl[j] = 0;
4100 			break;
4101 		}
4102 
4103 	/* Calculate the total number of rcu_node structures. */
4104 	rcu_num_nodes = 0;
4105 	for (i = 0; i <= MAX_RCU_LVLS; i++)
4106 		rcu_num_nodes += num_rcu_lvl[i];
4107 	rcu_num_nodes -= n;
4108 }
4109 
4110 void __init rcu_init(void)
4111 {
4112 	int cpu;
4113 
4114 	rcu_early_boot_tests();
4115 
4116 	rcu_bootup_announce();
4117 	rcu_init_geometry();
4118 	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
4119 	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
4120 	__rcu_init_preempt();
4121 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4122 
4123 	/*
4124 	 * We don't need protection against CPU-hotplug here because
4125 	 * this is called early in boot, before either interrupts
4126 	 * or the scheduler are operational.
4127 	 */
4128 	cpu_notifier(rcu_cpu_notify, 0);
4129 	pm_notifier(rcu_pm_notify, 0);
4130 	for_each_online_cpu(cpu)
4131 		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
4132 }
4133 
4134 #include "tree_plugin.h"
4135