xref: /openbmc/linux/kernel/rcu/tree.c (revision 93d90ad7)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23  *
24  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26  *
27  * For detailed explanation of Read-Copy Update mechanism see -
28  *	Documentation/RCU
29  */
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp.h>
35 #include <linux/rcupdate.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/nmi.h>
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/export.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/module.h>
45 #include <linux/percpu.h>
46 #include <linux/notifier.h>
47 #include <linux/cpu.h>
48 #include <linux/mutex.h>
49 #include <linux/time.h>
50 #include <linux/kernel_stat.h>
51 #include <linux/wait.h>
52 #include <linux/kthread.h>
53 #include <linux/prefetch.h>
54 #include <linux/delay.h>
55 #include <linux/stop_machine.h>
56 #include <linux/random.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/suspend.h>
59 
60 #include "tree.h"
61 #include "rcu.h"
62 
63 MODULE_ALIAS("rcutree");
64 #ifdef MODULE_PARAM_PREFIX
65 #undef MODULE_PARAM_PREFIX
66 #endif
67 #define MODULE_PARAM_PREFIX "rcutree."
68 
69 /* Data structures. */
70 
71 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73 
74 /*
75  * In order to export the rcu_state name to the tracing tools, it
76  * needs to be added in the __tracepoint_string section.
77  * This requires defining a separate variable tp_<sname>_varname
78  * that points to the string being used, and this will allow
79  * the tracing userspace tools to be able to decipher the string
80  * address to the matching string.
81  */
82 #ifdef CONFIG_TRACING
83 # define DEFINE_RCU_TPS(sname) \
84 static char sname##_varname[] = #sname; \
85 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
86 # define RCU_STATE_NAME(sname) sname##_varname
87 #else
88 # define DEFINE_RCU_TPS(sname)
89 # define RCU_STATE_NAME(sname) __stringify(sname)
90 #endif
91 
92 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
93 DEFINE_RCU_TPS(sname) \
94 struct rcu_state sname##_state = { \
95 	.level = { &sname##_state.node[0] }, \
96 	.call = cr, \
97 	.fqs_state = RCU_GP_IDLE, \
98 	.gpnum = 0UL - 300UL, \
99 	.completed = 0UL - 300UL, \
100 	.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
101 	.orphan_nxttail = &sname##_state.orphan_nxtlist, \
102 	.orphan_donetail = &sname##_state.orphan_donelist, \
103 	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
104 	.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
105 	.name = RCU_STATE_NAME(sname), \
106 	.abbr = sabbr, \
107 }; \
108 DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
109 
110 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
112 
113 static struct rcu_state *rcu_state_p;
114 LIST_HEAD(rcu_struct_flavors);
115 
116 /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
117 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
118 module_param(rcu_fanout_leaf, int, 0444);
119 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
120 static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
121 	NUM_RCU_LVL_0,
122 	NUM_RCU_LVL_1,
123 	NUM_RCU_LVL_2,
124 	NUM_RCU_LVL_3,
125 	NUM_RCU_LVL_4,
126 };
127 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
128 
129 /*
130  * The rcu_scheduler_active variable transitions from zero to one just
131  * before the first task is spawned.  So when this variable is zero, RCU
132  * can assume that there is but one task, allowing RCU to (for example)
133  * optimize synchronize_sched() to a simple barrier().  When this variable
134  * is one, RCU must actually do all the hard work required to detect real
135  * grace periods.  This variable is also used to suppress boot-time false
136  * positives from lockdep-RCU error checking.
137  */
138 int rcu_scheduler_active __read_mostly;
139 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
140 
141 /*
142  * The rcu_scheduler_fully_active variable transitions from zero to one
143  * during the early_initcall() processing, which is after the scheduler
144  * is capable of creating new tasks.  So RCU processing (for example,
145  * creating tasks for RCU priority boosting) must be delayed until after
146  * rcu_scheduler_fully_active transitions from zero to one.  We also
147  * currently delay invocation of any RCU callbacks until after this point.
148  *
149  * It might later prove better for people registering RCU callbacks during
150  * early boot to take responsibility for these callbacks, but one step at
151  * a time.
152  */
153 static int rcu_scheduler_fully_active __read_mostly;
154 
155 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
156 static void invoke_rcu_core(void);
157 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
158 
159 /*
160  * Track the rcutorture test sequence number and the update version
161  * number within a given test.  The rcutorture_testseq is incremented
162  * on every rcutorture module load and unload, so has an odd value
163  * when a test is running.  The rcutorture_vernum is set to zero
164  * when rcutorture starts and is incremented on each rcutorture update.
165  * These variables enable correlating rcutorture output with the
166  * RCU tracing information.
167  */
168 unsigned long rcutorture_testseq;
169 unsigned long rcutorture_vernum;
170 
171 /*
172  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
173  * permit this function to be invoked without holding the root rcu_node
174  * structure's ->lock, but of course results can be subject to change.
175  */
176 static int rcu_gp_in_progress(struct rcu_state *rsp)
177 {
178 	return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
179 }
180 
181 /*
182  * Note a quiescent state.  Because we do not need to know
183  * how many quiescent states passed, just if there was at least
184  * one since the start of the grace period, this just sets a flag.
185  * The caller must have disabled preemption.
186  */
187 void rcu_sched_qs(void)
188 {
189 	if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
190 		trace_rcu_grace_period(TPS("rcu_sched"),
191 				       __this_cpu_read(rcu_sched_data.gpnum),
192 				       TPS("cpuqs"));
193 		__this_cpu_write(rcu_sched_data.passed_quiesce, 1);
194 	}
195 }
196 
197 void rcu_bh_qs(void)
198 {
199 	if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
200 		trace_rcu_grace_period(TPS("rcu_bh"),
201 				       __this_cpu_read(rcu_bh_data.gpnum),
202 				       TPS("cpuqs"));
203 		__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
204 	}
205 }
206 
207 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
208 
209 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
210 	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
211 	.dynticks = ATOMIC_INIT(1),
212 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
213 	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
214 	.dynticks_idle = ATOMIC_INIT(1),
215 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
216 };
217 
218 /*
219  * Let the RCU core know that this CPU has gone through the scheduler,
220  * which is a quiescent state.  This is called when the need for a
221  * quiescent state is urgent, so we burn an atomic operation and full
222  * memory barriers to let the RCU core know about it, regardless of what
223  * this CPU might (or might not) do in the near future.
224  *
225  * We inform the RCU core by emulating a zero-duration dyntick-idle
226  * period, which we in turn do by incrementing the ->dynticks counter
227  * by two.
228  */
229 static void rcu_momentary_dyntick_idle(void)
230 {
231 	unsigned long flags;
232 	struct rcu_data *rdp;
233 	struct rcu_dynticks *rdtp;
234 	int resched_mask;
235 	struct rcu_state *rsp;
236 
237 	local_irq_save(flags);
238 
239 	/*
240 	 * Yes, we can lose flag-setting operations.  This is OK, because
241 	 * the flag will be set again after some delay.
242 	 */
243 	resched_mask = raw_cpu_read(rcu_sched_qs_mask);
244 	raw_cpu_write(rcu_sched_qs_mask, 0);
245 
246 	/* Find the flavor that needs a quiescent state. */
247 	for_each_rcu_flavor(rsp) {
248 		rdp = raw_cpu_ptr(rsp->rda);
249 		if (!(resched_mask & rsp->flavor_mask))
250 			continue;
251 		smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
252 		if (ACCESS_ONCE(rdp->mynode->completed) !=
253 		    ACCESS_ONCE(rdp->cond_resched_completed))
254 			continue;
255 
256 		/*
257 		 * Pretend to be momentarily idle for the quiescent state.
258 		 * This allows the grace-period kthread to record the
259 		 * quiescent state, with no need for this CPU to do anything
260 		 * further.
261 		 */
262 		rdtp = this_cpu_ptr(&rcu_dynticks);
263 		smp_mb__before_atomic(); /* Earlier stuff before QS. */
264 		atomic_add(2, &rdtp->dynticks);  /* QS. */
265 		smp_mb__after_atomic(); /* Later stuff after QS. */
266 		break;
267 	}
268 	local_irq_restore(flags);
269 }
270 
271 /*
272  * Note a context switch.  This is a quiescent state for RCU-sched,
273  * and requires special handling for preemptible RCU.
274  * The caller must have disabled preemption.
275  */
276 void rcu_note_context_switch(void)
277 {
278 	trace_rcu_utilization(TPS("Start context switch"));
279 	rcu_sched_qs();
280 	rcu_preempt_note_context_switch();
281 	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
282 		rcu_momentary_dyntick_idle();
283 	trace_rcu_utilization(TPS("End context switch"));
284 }
285 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
286 
287 static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
288 static long qhimark = 10000;	/* If this many pending, ignore blimit. */
289 static long qlowmark = 100;	/* Once only this many pending, use blimit. */
290 
291 module_param(blimit, long, 0444);
292 module_param(qhimark, long, 0444);
293 module_param(qlowmark, long, 0444);
294 
295 static ulong jiffies_till_first_fqs = ULONG_MAX;
296 static ulong jiffies_till_next_fqs = ULONG_MAX;
297 
298 module_param(jiffies_till_first_fqs, ulong, 0644);
299 module_param(jiffies_till_next_fqs, ulong, 0644);
300 
301 /*
302  * How long the grace period must be before we start recruiting
303  * quiescent-state help from rcu_note_context_switch().
304  */
305 static ulong jiffies_till_sched_qs = HZ / 20;
306 module_param(jiffies_till_sched_qs, ulong, 0644);
307 
308 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
309 				  struct rcu_data *rdp);
310 static void force_qs_rnp(struct rcu_state *rsp,
311 			 int (*f)(struct rcu_data *rsp, bool *isidle,
312 				  unsigned long *maxj),
313 			 bool *isidle, unsigned long *maxj);
314 static void force_quiescent_state(struct rcu_state *rsp);
315 static int rcu_pending(void);
316 
317 /*
318  * Return the number of RCU-sched batches processed thus far for debug & stats.
319  */
320 long rcu_batches_completed_sched(void)
321 {
322 	return rcu_sched_state.completed;
323 }
324 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
325 
326 /*
327  * Return the number of RCU BH batches processed thus far for debug & stats.
328  */
329 long rcu_batches_completed_bh(void)
330 {
331 	return rcu_bh_state.completed;
332 }
333 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
334 
335 /*
336  * Force a quiescent state.
337  */
338 void rcu_force_quiescent_state(void)
339 {
340 	force_quiescent_state(rcu_state_p);
341 }
342 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
343 
344 /*
345  * Force a quiescent state for RCU BH.
346  */
347 void rcu_bh_force_quiescent_state(void)
348 {
349 	force_quiescent_state(&rcu_bh_state);
350 }
351 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
352 
353 /*
354  * Show the state of the grace-period kthreads.
355  */
356 void show_rcu_gp_kthreads(void)
357 {
358 	struct rcu_state *rsp;
359 
360 	for_each_rcu_flavor(rsp) {
361 		pr_info("%s: wait state: %d ->state: %#lx\n",
362 			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
363 		/* sched_show_task(rsp->gp_kthread); */
364 	}
365 }
366 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
367 
368 /*
369  * Record the number of times rcutorture tests have been initiated and
370  * terminated.  This information allows the debugfs tracing stats to be
371  * correlated to the rcutorture messages, even when the rcutorture module
372  * is being repeatedly loaded and unloaded.  In other words, we cannot
373  * store this state in rcutorture itself.
374  */
375 void rcutorture_record_test_transition(void)
376 {
377 	rcutorture_testseq++;
378 	rcutorture_vernum = 0;
379 }
380 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
381 
382 /*
383  * Send along grace-period-related data for rcutorture diagnostics.
384  */
385 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
386 			    unsigned long *gpnum, unsigned long *completed)
387 {
388 	struct rcu_state *rsp = NULL;
389 
390 	switch (test_type) {
391 	case RCU_FLAVOR:
392 		rsp = rcu_state_p;
393 		break;
394 	case RCU_BH_FLAVOR:
395 		rsp = &rcu_bh_state;
396 		break;
397 	case RCU_SCHED_FLAVOR:
398 		rsp = &rcu_sched_state;
399 		break;
400 	default:
401 		break;
402 	}
403 	if (rsp != NULL) {
404 		*flags = ACCESS_ONCE(rsp->gp_flags);
405 		*gpnum = ACCESS_ONCE(rsp->gpnum);
406 		*completed = ACCESS_ONCE(rsp->completed);
407 		return;
408 	}
409 	*flags = 0;
410 	*gpnum = 0;
411 	*completed = 0;
412 }
413 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
414 
415 /*
416  * Record the number of writer passes through the current rcutorture test.
417  * This is also used to correlate debugfs tracing stats with the rcutorture
418  * messages.
419  */
420 void rcutorture_record_progress(unsigned long vernum)
421 {
422 	rcutorture_vernum++;
423 }
424 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
425 
426 /*
427  * Force a quiescent state for RCU-sched.
428  */
429 void rcu_sched_force_quiescent_state(void)
430 {
431 	force_quiescent_state(&rcu_sched_state);
432 }
433 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
434 
435 /*
436  * Does the CPU have callbacks ready to be invoked?
437  */
438 static int
439 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
440 {
441 	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
442 	       rdp->nxttail[RCU_DONE_TAIL] != NULL;
443 }
444 
445 /*
446  * Return the root node of the specified rcu_state structure.
447  */
448 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
449 {
450 	return &rsp->node[0];
451 }
452 
453 /*
454  * Is there any need for future grace periods?
455  * Interrupts must be disabled.  If the caller does not hold the root
456  * rnp_node structure's ->lock, the results are advisory only.
457  */
458 static int rcu_future_needs_gp(struct rcu_state *rsp)
459 {
460 	struct rcu_node *rnp = rcu_get_root(rsp);
461 	int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1;
462 	int *fp = &rnp->need_future_gp[idx];
463 
464 	return ACCESS_ONCE(*fp);
465 }
466 
467 /*
468  * Does the current CPU require a not-yet-started grace period?
469  * The caller must have disabled interrupts to prevent races with
470  * normal callback registry.
471  */
472 static int
473 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
474 {
475 	int i;
476 
477 	if (rcu_gp_in_progress(rsp))
478 		return 0;  /* No, a grace period is already in progress. */
479 	if (rcu_future_needs_gp(rsp))
480 		return 1;  /* Yes, a no-CBs CPU needs one. */
481 	if (!rdp->nxttail[RCU_NEXT_TAIL])
482 		return 0;  /* No, this is a no-CBs (or offline) CPU. */
483 	if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
484 		return 1;  /* Yes, this CPU has newly registered callbacks. */
485 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
486 		if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
487 		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
488 				 rdp->nxtcompleted[i]))
489 			return 1;  /* Yes, CBs for future grace period. */
490 	return 0; /* No grace period needed. */
491 }
492 
493 /*
494  * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
495  *
496  * If the new value of the ->dynticks_nesting counter now is zero,
497  * we really have entered idle, and must do the appropriate accounting.
498  * The caller must have disabled interrupts.
499  */
500 static void rcu_eqs_enter_common(long long oldval, bool user)
501 {
502 	struct rcu_state *rsp;
503 	struct rcu_data *rdp;
504 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
505 
506 	trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
507 	if (!user && !is_idle_task(current)) {
508 		struct task_struct *idle __maybe_unused =
509 			idle_task(smp_processor_id());
510 
511 		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
512 		ftrace_dump(DUMP_ORIG);
513 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
514 			  current->pid, current->comm,
515 			  idle->pid, idle->comm); /* must be idle task! */
516 	}
517 	for_each_rcu_flavor(rsp) {
518 		rdp = this_cpu_ptr(rsp->rda);
519 		do_nocb_deferred_wakeup(rdp);
520 	}
521 	rcu_prepare_for_idle();
522 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
523 	smp_mb__before_atomic();  /* See above. */
524 	atomic_inc(&rdtp->dynticks);
525 	smp_mb__after_atomic();  /* Force ordering with next sojourn. */
526 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
527 	rcu_dynticks_task_enter();
528 
529 	/*
530 	 * It is illegal to enter an extended quiescent state while
531 	 * in an RCU read-side critical section.
532 	 */
533 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
534 			   "Illegal idle entry in RCU read-side critical section.");
535 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
536 			   "Illegal idle entry in RCU-bh read-side critical section.");
537 	rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
538 			   "Illegal idle entry in RCU-sched read-side critical section.");
539 }
540 
541 /*
542  * Enter an RCU extended quiescent state, which can be either the
543  * idle loop or adaptive-tickless usermode execution.
544  */
545 static void rcu_eqs_enter(bool user)
546 {
547 	long long oldval;
548 	struct rcu_dynticks *rdtp;
549 
550 	rdtp = this_cpu_ptr(&rcu_dynticks);
551 	oldval = rdtp->dynticks_nesting;
552 	WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
553 	if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
554 		rdtp->dynticks_nesting = 0;
555 		rcu_eqs_enter_common(oldval, user);
556 	} else {
557 		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
558 	}
559 }
560 
561 /**
562  * rcu_idle_enter - inform RCU that current CPU is entering idle
563  *
564  * Enter idle mode, in other words, -leave- the mode in which RCU
565  * read-side critical sections can occur.  (Though RCU read-side
566  * critical sections can occur in irq handlers in idle, a possibility
567  * handled by irq_enter() and irq_exit().)
568  *
569  * We crowbar the ->dynticks_nesting field to zero to allow for
570  * the possibility of usermode upcalls having messed up our count
571  * of interrupt nesting level during the prior busy period.
572  */
573 void rcu_idle_enter(void)
574 {
575 	unsigned long flags;
576 
577 	local_irq_save(flags);
578 	rcu_eqs_enter(false);
579 	rcu_sysidle_enter(0);
580 	local_irq_restore(flags);
581 }
582 EXPORT_SYMBOL_GPL(rcu_idle_enter);
583 
584 #ifdef CONFIG_RCU_USER_QS
585 /**
586  * rcu_user_enter - inform RCU that we are resuming userspace.
587  *
588  * Enter RCU idle mode right before resuming userspace.  No use of RCU
589  * is permitted between this call and rcu_user_exit(). This way the
590  * CPU doesn't need to maintain the tick for RCU maintenance purposes
591  * when the CPU runs in userspace.
592  */
593 void rcu_user_enter(void)
594 {
595 	rcu_eqs_enter(1);
596 }
597 #endif /* CONFIG_RCU_USER_QS */
598 
599 /**
600  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
601  *
602  * Exit from an interrupt handler, which might possibly result in entering
603  * idle mode, in other words, leaving the mode in which read-side critical
604  * sections can occur.
605  *
606  * This code assumes that the idle loop never does anything that might
607  * result in unbalanced calls to irq_enter() and irq_exit().  If your
608  * architecture violates this assumption, RCU will give you what you
609  * deserve, good and hard.  But very infrequently and irreproducibly.
610  *
611  * Use things like work queues to work around this limitation.
612  *
613  * You have been warned.
614  */
615 void rcu_irq_exit(void)
616 {
617 	unsigned long flags;
618 	long long oldval;
619 	struct rcu_dynticks *rdtp;
620 
621 	local_irq_save(flags);
622 	rdtp = this_cpu_ptr(&rcu_dynticks);
623 	oldval = rdtp->dynticks_nesting;
624 	rdtp->dynticks_nesting--;
625 	WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
626 	if (rdtp->dynticks_nesting)
627 		trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
628 	else
629 		rcu_eqs_enter_common(oldval, true);
630 	rcu_sysidle_enter(1);
631 	local_irq_restore(flags);
632 }
633 
634 /*
635  * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
636  *
637  * If the new value of the ->dynticks_nesting counter was previously zero,
638  * we really have exited idle, and must do the appropriate accounting.
639  * The caller must have disabled interrupts.
640  */
641 static void rcu_eqs_exit_common(long long oldval, int user)
642 {
643 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
644 
645 	rcu_dynticks_task_exit();
646 	smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
647 	atomic_inc(&rdtp->dynticks);
648 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
649 	smp_mb__after_atomic();  /* See above. */
650 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
651 	rcu_cleanup_after_idle();
652 	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
653 	if (!user && !is_idle_task(current)) {
654 		struct task_struct *idle __maybe_unused =
655 			idle_task(smp_processor_id());
656 
657 		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
658 				  oldval, rdtp->dynticks_nesting);
659 		ftrace_dump(DUMP_ORIG);
660 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
661 			  current->pid, current->comm,
662 			  idle->pid, idle->comm); /* must be idle task! */
663 	}
664 }
665 
666 /*
667  * Exit an RCU extended quiescent state, which can be either the
668  * idle loop or adaptive-tickless usermode execution.
669  */
670 static void rcu_eqs_exit(bool user)
671 {
672 	struct rcu_dynticks *rdtp;
673 	long long oldval;
674 
675 	rdtp = this_cpu_ptr(&rcu_dynticks);
676 	oldval = rdtp->dynticks_nesting;
677 	WARN_ON_ONCE(oldval < 0);
678 	if (oldval & DYNTICK_TASK_NEST_MASK) {
679 		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
680 	} else {
681 		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
682 		rcu_eqs_exit_common(oldval, user);
683 	}
684 }
685 
686 /**
687  * rcu_idle_exit - inform RCU that current CPU is leaving idle
688  *
689  * Exit idle mode, in other words, -enter- the mode in which RCU
690  * read-side critical sections can occur.
691  *
692  * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
693  * allow for the possibility of usermode upcalls messing up our count
694  * of interrupt nesting level during the busy period that is just
695  * now starting.
696  */
697 void rcu_idle_exit(void)
698 {
699 	unsigned long flags;
700 
701 	local_irq_save(flags);
702 	rcu_eqs_exit(false);
703 	rcu_sysidle_exit(0);
704 	local_irq_restore(flags);
705 }
706 EXPORT_SYMBOL_GPL(rcu_idle_exit);
707 
708 #ifdef CONFIG_RCU_USER_QS
709 /**
710  * rcu_user_exit - inform RCU that we are exiting userspace.
711  *
712  * Exit RCU idle mode while entering the kernel because it can
713  * run a RCU read side critical section anytime.
714  */
715 void rcu_user_exit(void)
716 {
717 	rcu_eqs_exit(1);
718 }
719 #endif /* CONFIG_RCU_USER_QS */
720 
721 /**
722  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
723  *
724  * Enter an interrupt handler, which might possibly result in exiting
725  * idle mode, in other words, entering the mode in which read-side critical
726  * sections can occur.
727  *
728  * Note that the Linux kernel is fully capable of entering an interrupt
729  * handler that it never exits, for example when doing upcalls to
730  * user mode!  This code assumes that the idle loop never does upcalls to
731  * user mode.  If your architecture does do upcalls from the idle loop (or
732  * does anything else that results in unbalanced calls to the irq_enter()
733  * and irq_exit() functions), RCU will give you what you deserve, good
734  * and hard.  But very infrequently and irreproducibly.
735  *
736  * Use things like work queues to work around this limitation.
737  *
738  * You have been warned.
739  */
740 void rcu_irq_enter(void)
741 {
742 	unsigned long flags;
743 	struct rcu_dynticks *rdtp;
744 	long long oldval;
745 
746 	local_irq_save(flags);
747 	rdtp = this_cpu_ptr(&rcu_dynticks);
748 	oldval = rdtp->dynticks_nesting;
749 	rdtp->dynticks_nesting++;
750 	WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
751 	if (oldval)
752 		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
753 	else
754 		rcu_eqs_exit_common(oldval, true);
755 	rcu_sysidle_exit(1);
756 	local_irq_restore(flags);
757 }
758 
759 /**
760  * rcu_nmi_enter - inform RCU of entry to NMI context
761  *
762  * If the CPU was idle with dynamic ticks active, and there is no
763  * irq handler running, this updates rdtp->dynticks_nmi to let the
764  * RCU grace-period handling know that the CPU is active.
765  */
766 void rcu_nmi_enter(void)
767 {
768 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
769 
770 	if (rdtp->dynticks_nmi_nesting == 0 &&
771 	    (atomic_read(&rdtp->dynticks) & 0x1))
772 		return;
773 	rdtp->dynticks_nmi_nesting++;
774 	smp_mb__before_atomic();  /* Force delay from prior write. */
775 	atomic_inc(&rdtp->dynticks);
776 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
777 	smp_mb__after_atomic();  /* See above. */
778 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
779 }
780 
781 /**
782  * rcu_nmi_exit - inform RCU of exit from NMI context
783  *
784  * If the CPU was idle with dynamic ticks active, and there is no
785  * irq handler running, this updates rdtp->dynticks_nmi to let the
786  * RCU grace-period handling know that the CPU is no longer active.
787  */
788 void rcu_nmi_exit(void)
789 {
790 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
791 
792 	if (rdtp->dynticks_nmi_nesting == 0 ||
793 	    --rdtp->dynticks_nmi_nesting != 0)
794 		return;
795 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
796 	smp_mb__before_atomic();  /* See above. */
797 	atomic_inc(&rdtp->dynticks);
798 	smp_mb__after_atomic();  /* Force delay to next write. */
799 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
800 }
801 
802 /**
803  * __rcu_is_watching - are RCU read-side critical sections safe?
804  *
805  * Return true if RCU is watching the running CPU, which means that
806  * this CPU can safely enter RCU read-side critical sections.  Unlike
807  * rcu_is_watching(), the caller of __rcu_is_watching() must have at
808  * least disabled preemption.
809  */
810 bool notrace __rcu_is_watching(void)
811 {
812 	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
813 }
814 
815 /**
816  * rcu_is_watching - see if RCU thinks that the current CPU is idle
817  *
818  * If the current CPU is in its idle loop and is neither in an interrupt
819  * or NMI handler, return true.
820  */
821 bool notrace rcu_is_watching(void)
822 {
823 	bool ret;
824 
825 	preempt_disable();
826 	ret = __rcu_is_watching();
827 	preempt_enable();
828 	return ret;
829 }
830 EXPORT_SYMBOL_GPL(rcu_is_watching);
831 
832 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
833 
834 /*
835  * Is the current CPU online?  Disable preemption to avoid false positives
836  * that could otherwise happen due to the current CPU number being sampled,
837  * this task being preempted, its old CPU being taken offline, resuming
838  * on some other CPU, then determining that its old CPU is now offline.
839  * It is OK to use RCU on an offline processor during initial boot, hence
840  * the check for rcu_scheduler_fully_active.  Note also that it is OK
841  * for a CPU coming online to use RCU for one jiffy prior to marking itself
842  * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
843  * offline to continue to use RCU for one jiffy after marking itself
844  * offline in the cpu_online_mask.  This leniency is necessary given the
845  * non-atomic nature of the online and offline processing, for example,
846  * the fact that a CPU enters the scheduler after completing the CPU_DYING
847  * notifiers.
848  *
849  * This is also why RCU internally marks CPUs online during the
850  * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
851  *
852  * Disable checking if in an NMI handler because we cannot safely report
853  * errors from NMI handlers anyway.
854  */
855 bool rcu_lockdep_current_cpu_online(void)
856 {
857 	struct rcu_data *rdp;
858 	struct rcu_node *rnp;
859 	bool ret;
860 
861 	if (in_nmi())
862 		return true;
863 	preempt_disable();
864 	rdp = this_cpu_ptr(&rcu_sched_data);
865 	rnp = rdp->mynode;
866 	ret = (rdp->grpmask & rnp->qsmaskinit) ||
867 	      !rcu_scheduler_fully_active;
868 	preempt_enable();
869 	return ret;
870 }
871 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
872 
873 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
874 
875 /**
876  * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
877  *
878  * If the current CPU is idle or running at a first-level (not nested)
879  * interrupt from idle, return true.  The caller must have at least
880  * disabled preemption.
881  */
882 static int rcu_is_cpu_rrupt_from_idle(void)
883 {
884 	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
885 }
886 
887 /*
888  * Snapshot the specified CPU's dynticks counter so that we can later
889  * credit them with an implicit quiescent state.  Return 1 if this CPU
890  * is in dynticks idle mode, which is an extended quiescent state.
891  */
892 static int dyntick_save_progress_counter(struct rcu_data *rdp,
893 					 bool *isidle, unsigned long *maxj)
894 {
895 	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
896 	rcu_sysidle_check_cpu(rdp, isidle, maxj);
897 	if ((rdp->dynticks_snap & 0x1) == 0) {
898 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
899 		return 1;
900 	} else {
901 		return 0;
902 	}
903 }
904 
905 /*
906  * This function really isn't for public consumption, but RCU is special in
907  * that context switches can allow the state machine to make progress.
908  */
909 extern void resched_cpu(int cpu);
910 
911 /*
912  * Return true if the specified CPU has passed through a quiescent
913  * state by virtue of being in or having passed through an dynticks
914  * idle state since the last call to dyntick_save_progress_counter()
915  * for this same CPU, or by virtue of having been offline.
916  */
917 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
918 				    bool *isidle, unsigned long *maxj)
919 {
920 	unsigned int curr;
921 	int *rcrmp;
922 	unsigned int snap;
923 
924 	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
925 	snap = (unsigned int)rdp->dynticks_snap;
926 
927 	/*
928 	 * If the CPU passed through or entered a dynticks idle phase with
929 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
930 	 * already acknowledged the request to pass through a quiescent
931 	 * state.  Either way, that CPU cannot possibly be in an RCU
932 	 * read-side critical section that started before the beginning
933 	 * of the current RCU grace period.
934 	 */
935 	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
936 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
937 		rdp->dynticks_fqs++;
938 		return 1;
939 	}
940 
941 	/*
942 	 * Check for the CPU being offline, but only if the grace period
943 	 * is old enough.  We don't need to worry about the CPU changing
944 	 * state: If we see it offline even once, it has been through a
945 	 * quiescent state.
946 	 *
947 	 * The reason for insisting that the grace period be at least
948 	 * one jiffy old is that CPUs that are not quite online and that
949 	 * have just gone offline can still execute RCU read-side critical
950 	 * sections.
951 	 */
952 	if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
953 		return 0;  /* Grace period is not old enough. */
954 	barrier();
955 	if (cpu_is_offline(rdp->cpu)) {
956 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
957 		rdp->offline_fqs++;
958 		return 1;
959 	}
960 
961 	/*
962 	 * A CPU running for an extended time within the kernel can
963 	 * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
964 	 * even context-switching back and forth between a pair of
965 	 * in-kernel CPU-bound tasks cannot advance grace periods.
966 	 * So if the grace period is old enough, make the CPU pay attention.
967 	 * Note that the unsynchronized assignments to the per-CPU
968 	 * rcu_sched_qs_mask variable are safe.  Yes, setting of
969 	 * bits can be lost, but they will be set again on the next
970 	 * force-quiescent-state pass.  So lost bit sets do not result
971 	 * in incorrect behavior, merely in a grace period lasting
972 	 * a few jiffies longer than it might otherwise.  Because
973 	 * there are at most four threads involved, and because the
974 	 * updates are only once every few jiffies, the probability of
975 	 * lossage (and thus of slight grace-period extension) is
976 	 * quite low.
977 	 *
978 	 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
979 	 * is set too high, we override with half of the RCU CPU stall
980 	 * warning delay.
981 	 */
982 	rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
983 	if (ULONG_CMP_GE(jiffies,
984 			 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
985 	    ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
986 		if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
987 			ACCESS_ONCE(rdp->cond_resched_completed) =
988 				ACCESS_ONCE(rdp->mynode->completed);
989 			smp_mb(); /* ->cond_resched_completed before *rcrmp. */
990 			ACCESS_ONCE(*rcrmp) =
991 				ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
992 			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
993 			rdp->rsp->jiffies_resched += 5; /* Enable beating. */
994 		} else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
995 			/* Time to beat on that CPU again! */
996 			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
997 			rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
998 		}
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 static void record_gp_stall_check_time(struct rcu_state *rsp)
1005 {
1006 	unsigned long j = jiffies;
1007 	unsigned long j1;
1008 
1009 	rsp->gp_start = j;
1010 	smp_wmb(); /* Record start time before stall time. */
1011 	j1 = rcu_jiffies_till_stall_check();
1012 	ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
1013 	rsp->jiffies_resched = j + j1 / 2;
1014 }
1015 
1016 /*
1017  * Dump stacks of all tasks running on stalled CPUs.
1018  */
1019 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1020 {
1021 	int cpu;
1022 	unsigned long flags;
1023 	struct rcu_node *rnp;
1024 
1025 	rcu_for_each_leaf_node(rsp, rnp) {
1026 		raw_spin_lock_irqsave(&rnp->lock, flags);
1027 		if (rnp->qsmask != 0) {
1028 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1029 				if (rnp->qsmask & (1UL << cpu))
1030 					dump_cpu_task(rnp->grplo + cpu);
1031 		}
1032 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1033 	}
1034 }
1035 
1036 static void print_other_cpu_stall(struct rcu_state *rsp)
1037 {
1038 	int cpu;
1039 	long delta;
1040 	unsigned long flags;
1041 	int ndetected = 0;
1042 	struct rcu_node *rnp = rcu_get_root(rsp);
1043 	long totqlen = 0;
1044 
1045 	/* Only let one CPU complain about others per time interval. */
1046 
1047 	raw_spin_lock_irqsave(&rnp->lock, flags);
1048 	delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
1049 	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1050 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1051 		return;
1052 	}
1053 	ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
1054 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1055 
1056 	/*
1057 	 * OK, time to rat on our buddy...
1058 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
1059 	 * RCU CPU stall warnings.
1060 	 */
1061 	pr_err("INFO: %s detected stalls on CPUs/tasks:",
1062 	       rsp->name);
1063 	print_cpu_stall_info_begin();
1064 	rcu_for_each_leaf_node(rsp, rnp) {
1065 		raw_spin_lock_irqsave(&rnp->lock, flags);
1066 		ndetected += rcu_print_task_stall(rnp);
1067 		if (rnp->qsmask != 0) {
1068 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1069 				if (rnp->qsmask & (1UL << cpu)) {
1070 					print_cpu_stall_info(rsp,
1071 							     rnp->grplo + cpu);
1072 					ndetected++;
1073 				}
1074 		}
1075 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1076 	}
1077 
1078 	/*
1079 	 * Now rat on any tasks that got kicked up to the root rcu_node
1080 	 * due to CPU offlining.
1081 	 */
1082 	rnp = rcu_get_root(rsp);
1083 	raw_spin_lock_irqsave(&rnp->lock, flags);
1084 	ndetected += rcu_print_task_stall(rnp);
1085 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1086 
1087 	print_cpu_stall_info_end();
1088 	for_each_possible_cpu(cpu)
1089 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1090 	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1091 	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
1092 	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
1093 	if (ndetected == 0)
1094 		pr_err("INFO: Stall ended before state dump start\n");
1095 	else
1096 		rcu_dump_cpu_stacks(rsp);
1097 
1098 	/* Complain about tasks blocking the grace period. */
1099 
1100 	rcu_print_detail_task_stall(rsp);
1101 
1102 	force_quiescent_state(rsp);  /* Kick them all. */
1103 }
1104 
1105 static void print_cpu_stall(struct rcu_state *rsp)
1106 {
1107 	int cpu;
1108 	unsigned long flags;
1109 	struct rcu_node *rnp = rcu_get_root(rsp);
1110 	long totqlen = 0;
1111 
1112 	/*
1113 	 * OK, time to rat on ourselves...
1114 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
1115 	 * RCU CPU stall warnings.
1116 	 */
1117 	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1118 	print_cpu_stall_info_begin();
1119 	print_cpu_stall_info(rsp, smp_processor_id());
1120 	print_cpu_stall_info_end();
1121 	for_each_possible_cpu(cpu)
1122 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1123 	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1124 		jiffies - rsp->gp_start,
1125 		(long)rsp->gpnum, (long)rsp->completed, totqlen);
1126 	rcu_dump_cpu_stacks(rsp);
1127 
1128 	raw_spin_lock_irqsave(&rnp->lock, flags);
1129 	if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
1130 		ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
1131 				     3 * rcu_jiffies_till_stall_check() + 3;
1132 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1133 
1134 	/*
1135 	 * Attempt to revive the RCU machinery by forcing a context switch.
1136 	 *
1137 	 * A context switch would normally allow the RCU state machine to make
1138 	 * progress and it could be we're stuck in kernel space without context
1139 	 * switches for an entirely unreasonable amount of time.
1140 	 */
1141 	resched_cpu(smp_processor_id());
1142 }
1143 
1144 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1145 {
1146 	unsigned long completed;
1147 	unsigned long gpnum;
1148 	unsigned long gps;
1149 	unsigned long j;
1150 	unsigned long js;
1151 	struct rcu_node *rnp;
1152 
1153 	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1154 		return;
1155 	j = jiffies;
1156 
1157 	/*
1158 	 * Lots of memory barriers to reject false positives.
1159 	 *
1160 	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1161 	 * then rsp->gp_start, and finally rsp->completed.  These values
1162 	 * are updated in the opposite order with memory barriers (or
1163 	 * equivalent) during grace-period initialization and cleanup.
1164 	 * Now, a false positive can occur if we get an new value of
1165 	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
1166 	 * the memory barriers, the only way that this can happen is if one
1167 	 * grace period ends and another starts between these two fetches.
1168 	 * Detect this by comparing rsp->completed with the previous fetch
1169 	 * from rsp->gpnum.
1170 	 *
1171 	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1172 	 * and rsp->gp_start suffice to forestall false positives.
1173 	 */
1174 	gpnum = ACCESS_ONCE(rsp->gpnum);
1175 	smp_rmb(); /* Pick up ->gpnum first... */
1176 	js = ACCESS_ONCE(rsp->jiffies_stall);
1177 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1178 	gps = ACCESS_ONCE(rsp->gp_start);
1179 	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1180 	completed = ACCESS_ONCE(rsp->completed);
1181 	if (ULONG_CMP_GE(completed, gpnum) ||
1182 	    ULONG_CMP_LT(j, js) ||
1183 	    ULONG_CMP_GE(gps, js))
1184 		return; /* No stall or GP completed since entering function. */
1185 	rnp = rdp->mynode;
1186 	if (rcu_gp_in_progress(rsp) &&
1187 	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
1188 
1189 		/* We haven't checked in, so go dump stack. */
1190 		print_cpu_stall(rsp);
1191 
1192 	} else if (rcu_gp_in_progress(rsp) &&
1193 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1194 
1195 		/* They had a few time units to dump stack, so complain. */
1196 		print_other_cpu_stall(rsp);
1197 	}
1198 }
1199 
1200 /**
1201  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1202  *
1203  * Set the stall-warning timeout way off into the future, thus preventing
1204  * any RCU CPU stall-warning messages from appearing in the current set of
1205  * RCU grace periods.
1206  *
1207  * The caller must disable hard irqs.
1208  */
1209 void rcu_cpu_stall_reset(void)
1210 {
1211 	struct rcu_state *rsp;
1212 
1213 	for_each_rcu_flavor(rsp)
1214 		ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
1215 }
1216 
1217 /*
1218  * Initialize the specified rcu_data structure's callback list to empty.
1219  */
1220 static void init_callback_list(struct rcu_data *rdp)
1221 {
1222 	int i;
1223 
1224 	if (init_nocb_callback_list(rdp))
1225 		return;
1226 	rdp->nxtlist = NULL;
1227 	for (i = 0; i < RCU_NEXT_SIZE; i++)
1228 		rdp->nxttail[i] = &rdp->nxtlist;
1229 }
1230 
1231 /*
1232  * Determine the value that ->completed will have at the end of the
1233  * next subsequent grace period.  This is used to tag callbacks so that
1234  * a CPU can invoke callbacks in a timely fashion even if that CPU has
1235  * been dyntick-idle for an extended period with callbacks under the
1236  * influence of RCU_FAST_NO_HZ.
1237  *
1238  * The caller must hold rnp->lock with interrupts disabled.
1239  */
1240 static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1241 				       struct rcu_node *rnp)
1242 {
1243 	/*
1244 	 * If RCU is idle, we just wait for the next grace period.
1245 	 * But we can only be sure that RCU is idle if we are looking
1246 	 * at the root rcu_node structure -- otherwise, a new grace
1247 	 * period might have started, but just not yet gotten around
1248 	 * to initializing the current non-root rcu_node structure.
1249 	 */
1250 	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1251 		return rnp->completed + 1;
1252 
1253 	/*
1254 	 * Otherwise, wait for a possible partial grace period and
1255 	 * then the subsequent full grace period.
1256 	 */
1257 	return rnp->completed + 2;
1258 }
1259 
1260 /*
1261  * Trace-event helper function for rcu_start_future_gp() and
1262  * rcu_nocb_wait_gp().
1263  */
1264 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1265 				unsigned long c, const char *s)
1266 {
1267 	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1268 				      rnp->completed, c, rnp->level,
1269 				      rnp->grplo, rnp->grphi, s);
1270 }
1271 
1272 /*
1273  * Start some future grace period, as needed to handle newly arrived
1274  * callbacks.  The required future grace periods are recorded in each
1275  * rcu_node structure's ->need_future_gp field.  Returns true if there
1276  * is reason to awaken the grace-period kthread.
1277  *
1278  * The caller must hold the specified rcu_node structure's ->lock.
1279  */
1280 static bool __maybe_unused
1281 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1282 		    unsigned long *c_out)
1283 {
1284 	unsigned long c;
1285 	int i;
1286 	bool ret = false;
1287 	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1288 
1289 	/*
1290 	 * Pick up grace-period number for new callbacks.  If this
1291 	 * grace period is already marked as needed, return to the caller.
1292 	 */
1293 	c = rcu_cbs_completed(rdp->rsp, rnp);
1294 	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1295 	if (rnp->need_future_gp[c & 0x1]) {
1296 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1297 		goto out;
1298 	}
1299 
1300 	/*
1301 	 * If either this rcu_node structure or the root rcu_node structure
1302 	 * believe that a grace period is in progress, then we must wait
1303 	 * for the one following, which is in "c".  Because our request
1304 	 * will be noticed at the end of the current grace period, we don't
1305 	 * need to explicitly start one.  We only do the lockless check
1306 	 * of rnp_root's fields if the current rcu_node structure thinks
1307 	 * there is no grace period in flight, and because we hold rnp->lock,
1308 	 * the only possible change is when rnp_root's two fields are
1309 	 * equal, in which case rnp_root->gpnum might be concurrently
1310 	 * incremented.  But that is OK, as it will just result in our
1311 	 * doing some extra useless work.
1312 	 */
1313 	if (rnp->gpnum != rnp->completed ||
1314 	    ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
1315 		rnp->need_future_gp[c & 0x1]++;
1316 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1317 		goto out;
1318 	}
1319 
1320 	/*
1321 	 * There might be no grace period in progress.  If we don't already
1322 	 * hold it, acquire the root rcu_node structure's lock in order to
1323 	 * start one (if needed).
1324 	 */
1325 	if (rnp != rnp_root) {
1326 		raw_spin_lock(&rnp_root->lock);
1327 		smp_mb__after_unlock_lock();
1328 	}
1329 
1330 	/*
1331 	 * Get a new grace-period number.  If there really is no grace
1332 	 * period in progress, it will be smaller than the one we obtained
1333 	 * earlier.  Adjust callbacks as needed.  Note that even no-CBs
1334 	 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1335 	 */
1336 	c = rcu_cbs_completed(rdp->rsp, rnp_root);
1337 	for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1338 		if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1339 			rdp->nxtcompleted[i] = c;
1340 
1341 	/*
1342 	 * If the needed for the required grace period is already
1343 	 * recorded, trace and leave.
1344 	 */
1345 	if (rnp_root->need_future_gp[c & 0x1]) {
1346 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1347 		goto unlock_out;
1348 	}
1349 
1350 	/* Record the need for the future grace period. */
1351 	rnp_root->need_future_gp[c & 0x1]++;
1352 
1353 	/* If a grace period is not already in progress, start one. */
1354 	if (rnp_root->gpnum != rnp_root->completed) {
1355 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1356 	} else {
1357 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1358 		ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1359 	}
1360 unlock_out:
1361 	if (rnp != rnp_root)
1362 		raw_spin_unlock(&rnp_root->lock);
1363 out:
1364 	if (c_out != NULL)
1365 		*c_out = c;
1366 	return ret;
1367 }
1368 
1369 /*
1370  * Clean up any old requests for the just-ended grace period.  Also return
1371  * whether any additional grace periods have been requested.  Also invoke
1372  * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1373  * waiting for this grace period to complete.
1374  */
1375 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1376 {
1377 	int c = rnp->completed;
1378 	int needmore;
1379 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1380 
1381 	rcu_nocb_gp_cleanup(rsp, rnp);
1382 	rnp->need_future_gp[c & 0x1] = 0;
1383 	needmore = rnp->need_future_gp[(c + 1) & 0x1];
1384 	trace_rcu_future_gp(rnp, rdp, c,
1385 			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1386 	return needmore;
1387 }
1388 
1389 /*
1390  * Awaken the grace-period kthread for the specified flavor of RCU.
1391  * Don't do a self-awaken, and don't bother awakening when there is
1392  * nothing for the grace-period kthread to do (as in several CPUs
1393  * raced to awaken, and we lost), and finally don't try to awaken
1394  * a kthread that has not yet been created.
1395  */
1396 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1397 {
1398 	if (current == rsp->gp_kthread ||
1399 	    !ACCESS_ONCE(rsp->gp_flags) ||
1400 	    !rsp->gp_kthread)
1401 		return;
1402 	wake_up(&rsp->gp_wq);
1403 }
1404 
1405 /*
1406  * If there is room, assign a ->completed number to any callbacks on
1407  * this CPU that have not already been assigned.  Also accelerate any
1408  * callbacks that were previously assigned a ->completed number that has
1409  * since proven to be too conservative, which can happen if callbacks get
1410  * assigned a ->completed number while RCU is idle, but with reference to
1411  * a non-root rcu_node structure.  This function is idempotent, so it does
1412  * not hurt to call it repeatedly.  Returns an flag saying that we should
1413  * awaken the RCU grace-period kthread.
1414  *
1415  * The caller must hold rnp->lock with interrupts disabled.
1416  */
1417 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1418 			       struct rcu_data *rdp)
1419 {
1420 	unsigned long c;
1421 	int i;
1422 	bool ret;
1423 
1424 	/* If the CPU has no callbacks, nothing to do. */
1425 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1426 		return false;
1427 
1428 	/*
1429 	 * Starting from the sublist containing the callbacks most
1430 	 * recently assigned a ->completed number and working down, find the
1431 	 * first sublist that is not assignable to an upcoming grace period.
1432 	 * Such a sublist has something in it (first two tests) and has
1433 	 * a ->completed number assigned that will complete sooner than
1434 	 * the ->completed number for newly arrived callbacks (last test).
1435 	 *
1436 	 * The key point is that any later sublist can be assigned the
1437 	 * same ->completed number as the newly arrived callbacks, which
1438 	 * means that the callbacks in any of these later sublist can be
1439 	 * grouped into a single sublist, whether or not they have already
1440 	 * been assigned a ->completed number.
1441 	 */
1442 	c = rcu_cbs_completed(rsp, rnp);
1443 	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1444 		if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1445 		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1446 			break;
1447 
1448 	/*
1449 	 * If there are no sublist for unassigned callbacks, leave.
1450 	 * At the same time, advance "i" one sublist, so that "i" will
1451 	 * index into the sublist where all the remaining callbacks should
1452 	 * be grouped into.
1453 	 */
1454 	if (++i >= RCU_NEXT_TAIL)
1455 		return false;
1456 
1457 	/*
1458 	 * Assign all subsequent callbacks' ->completed number to the next
1459 	 * full grace period and group them all in the sublist initially
1460 	 * indexed by "i".
1461 	 */
1462 	for (; i <= RCU_NEXT_TAIL; i++) {
1463 		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1464 		rdp->nxtcompleted[i] = c;
1465 	}
1466 	/* Record any needed additional grace periods. */
1467 	ret = rcu_start_future_gp(rnp, rdp, NULL);
1468 
1469 	/* Trace depending on how much we were able to accelerate. */
1470 	if (!*rdp->nxttail[RCU_WAIT_TAIL])
1471 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1472 	else
1473 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1474 	return ret;
1475 }
1476 
1477 /*
1478  * Move any callbacks whose grace period has completed to the
1479  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1480  * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1481  * sublist.  This function is idempotent, so it does not hurt to
1482  * invoke it repeatedly.  As long as it is not invoked -too- often...
1483  * Returns true if the RCU grace-period kthread needs to be awakened.
1484  *
1485  * The caller must hold rnp->lock with interrupts disabled.
1486  */
1487 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1488 			    struct rcu_data *rdp)
1489 {
1490 	int i, j;
1491 
1492 	/* If the CPU has no callbacks, nothing to do. */
1493 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1494 		return false;
1495 
1496 	/*
1497 	 * Find all callbacks whose ->completed numbers indicate that they
1498 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1499 	 */
1500 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1501 		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1502 			break;
1503 		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1504 	}
1505 	/* Clean up any sublist tail pointers that were misordered above. */
1506 	for (j = RCU_WAIT_TAIL; j < i; j++)
1507 		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1508 
1509 	/* Copy down callbacks to fill in empty sublists. */
1510 	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1511 		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1512 			break;
1513 		rdp->nxttail[j] = rdp->nxttail[i];
1514 		rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1515 	}
1516 
1517 	/* Classify any remaining callbacks. */
1518 	return rcu_accelerate_cbs(rsp, rnp, rdp);
1519 }
1520 
1521 /*
1522  * Update CPU-local rcu_data state to record the beginnings and ends of
1523  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1524  * structure corresponding to the current CPU, and must have irqs disabled.
1525  * Returns true if the grace-period kthread needs to be awakened.
1526  */
1527 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1528 			      struct rcu_data *rdp)
1529 {
1530 	bool ret;
1531 
1532 	/* Handle the ends of any preceding grace periods first. */
1533 	if (rdp->completed == rnp->completed) {
1534 
1535 		/* No grace period end, so just accelerate recent callbacks. */
1536 		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1537 
1538 	} else {
1539 
1540 		/* Advance callbacks. */
1541 		ret = rcu_advance_cbs(rsp, rnp, rdp);
1542 
1543 		/* Remember that we saw this grace-period completion. */
1544 		rdp->completed = rnp->completed;
1545 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1546 	}
1547 
1548 	if (rdp->gpnum != rnp->gpnum) {
1549 		/*
1550 		 * If the current grace period is waiting for this CPU,
1551 		 * set up to detect a quiescent state, otherwise don't
1552 		 * go looking for one.
1553 		 */
1554 		rdp->gpnum = rnp->gpnum;
1555 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1556 		rdp->passed_quiesce = 0;
1557 		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1558 		zero_cpu_stall_ticks(rdp);
1559 	}
1560 	return ret;
1561 }
1562 
1563 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1564 {
1565 	unsigned long flags;
1566 	bool needwake;
1567 	struct rcu_node *rnp;
1568 
1569 	local_irq_save(flags);
1570 	rnp = rdp->mynode;
1571 	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1572 	     rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
1573 	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1574 		local_irq_restore(flags);
1575 		return;
1576 	}
1577 	smp_mb__after_unlock_lock();
1578 	needwake = __note_gp_changes(rsp, rnp, rdp);
1579 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1580 	if (needwake)
1581 		rcu_gp_kthread_wake(rsp);
1582 }
1583 
1584 /*
1585  * Initialize a new grace period.  Return 0 if no grace period required.
1586  */
1587 static int rcu_gp_init(struct rcu_state *rsp)
1588 {
1589 	struct rcu_data *rdp;
1590 	struct rcu_node *rnp = rcu_get_root(rsp);
1591 
1592 	rcu_bind_gp_kthread();
1593 	raw_spin_lock_irq(&rnp->lock);
1594 	smp_mb__after_unlock_lock();
1595 	if (!ACCESS_ONCE(rsp->gp_flags)) {
1596 		/* Spurious wakeup, tell caller to go back to sleep.  */
1597 		raw_spin_unlock_irq(&rnp->lock);
1598 		return 0;
1599 	}
1600 	ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
1601 
1602 	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1603 		/*
1604 		 * Grace period already in progress, don't start another.
1605 		 * Not supposed to be able to happen.
1606 		 */
1607 		raw_spin_unlock_irq(&rnp->lock);
1608 		return 0;
1609 	}
1610 
1611 	/* Advance to a new grace period and initialize state. */
1612 	record_gp_stall_check_time(rsp);
1613 	/* Record GP times before starting GP, hence smp_store_release(). */
1614 	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1615 	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1616 	raw_spin_unlock_irq(&rnp->lock);
1617 
1618 	/* Exclude any concurrent CPU-hotplug operations. */
1619 	mutex_lock(&rsp->onoff_mutex);
1620 	smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
1621 
1622 	/*
1623 	 * Set the quiescent-state-needed bits in all the rcu_node
1624 	 * structures for all currently online CPUs in breadth-first order,
1625 	 * starting from the root rcu_node structure, relying on the layout
1626 	 * of the tree within the rsp->node[] array.  Note that other CPUs
1627 	 * will access only the leaves of the hierarchy, thus seeing that no
1628 	 * grace period is in progress, at least until the corresponding
1629 	 * leaf node has been initialized.  In addition, we have excluded
1630 	 * CPU-hotplug operations.
1631 	 *
1632 	 * The grace period cannot complete until the initialization
1633 	 * process finishes, because this kthread handles both.
1634 	 */
1635 	rcu_for_each_node_breadth_first(rsp, rnp) {
1636 		raw_spin_lock_irq(&rnp->lock);
1637 		smp_mb__after_unlock_lock();
1638 		rdp = this_cpu_ptr(rsp->rda);
1639 		rcu_preempt_check_blocked_tasks(rnp);
1640 		rnp->qsmask = rnp->qsmaskinit;
1641 		ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1642 		WARN_ON_ONCE(rnp->completed != rsp->completed);
1643 		ACCESS_ONCE(rnp->completed) = rsp->completed;
1644 		if (rnp == rdp->mynode)
1645 			(void)__note_gp_changes(rsp, rnp, rdp);
1646 		rcu_preempt_boost_start_gp(rnp);
1647 		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1648 					    rnp->level, rnp->grplo,
1649 					    rnp->grphi, rnp->qsmask);
1650 		raw_spin_unlock_irq(&rnp->lock);
1651 		cond_resched_rcu_qs();
1652 	}
1653 
1654 	mutex_unlock(&rsp->onoff_mutex);
1655 	return 1;
1656 }
1657 
1658 /*
1659  * Do one round of quiescent-state forcing.
1660  */
1661 static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1662 {
1663 	int fqs_state = fqs_state_in;
1664 	bool isidle = false;
1665 	unsigned long maxj;
1666 	struct rcu_node *rnp = rcu_get_root(rsp);
1667 
1668 	rsp->n_force_qs++;
1669 	if (fqs_state == RCU_SAVE_DYNTICK) {
1670 		/* Collect dyntick-idle snapshots. */
1671 		if (is_sysidle_rcu_state(rsp)) {
1672 			isidle = true;
1673 			maxj = jiffies - ULONG_MAX / 4;
1674 		}
1675 		force_qs_rnp(rsp, dyntick_save_progress_counter,
1676 			     &isidle, &maxj);
1677 		rcu_sysidle_report_gp(rsp, isidle, maxj);
1678 		fqs_state = RCU_FORCE_QS;
1679 	} else {
1680 		/* Handle dyntick-idle and offline CPUs. */
1681 		isidle = false;
1682 		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1683 	}
1684 	/* Clear flag to prevent immediate re-entry. */
1685 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1686 		raw_spin_lock_irq(&rnp->lock);
1687 		smp_mb__after_unlock_lock();
1688 		ACCESS_ONCE(rsp->gp_flags) =
1689 			ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
1690 		raw_spin_unlock_irq(&rnp->lock);
1691 	}
1692 	return fqs_state;
1693 }
1694 
1695 /*
1696  * Clean up after the old grace period.
1697  */
1698 static void rcu_gp_cleanup(struct rcu_state *rsp)
1699 {
1700 	unsigned long gp_duration;
1701 	bool needgp = false;
1702 	int nocb = 0;
1703 	struct rcu_data *rdp;
1704 	struct rcu_node *rnp = rcu_get_root(rsp);
1705 
1706 	raw_spin_lock_irq(&rnp->lock);
1707 	smp_mb__after_unlock_lock();
1708 	gp_duration = jiffies - rsp->gp_start;
1709 	if (gp_duration > rsp->gp_max)
1710 		rsp->gp_max = gp_duration;
1711 
1712 	/*
1713 	 * We know the grace period is complete, but to everyone else
1714 	 * it appears to still be ongoing.  But it is also the case
1715 	 * that to everyone else it looks like there is nothing that
1716 	 * they can do to advance the grace period.  It is therefore
1717 	 * safe for us to drop the lock in order to mark the grace
1718 	 * period as completed in all of the rcu_node structures.
1719 	 */
1720 	raw_spin_unlock_irq(&rnp->lock);
1721 
1722 	/*
1723 	 * Propagate new ->completed value to rcu_node structures so
1724 	 * that other CPUs don't have to wait until the start of the next
1725 	 * grace period to process their callbacks.  This also avoids
1726 	 * some nasty RCU grace-period initialization races by forcing
1727 	 * the end of the current grace period to be completely recorded in
1728 	 * all of the rcu_node structures before the beginning of the next
1729 	 * grace period is recorded in any of the rcu_node structures.
1730 	 */
1731 	rcu_for_each_node_breadth_first(rsp, rnp) {
1732 		raw_spin_lock_irq(&rnp->lock);
1733 		smp_mb__after_unlock_lock();
1734 		ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1735 		rdp = this_cpu_ptr(rsp->rda);
1736 		if (rnp == rdp->mynode)
1737 			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
1738 		/* smp_mb() provided by prior unlock-lock pair. */
1739 		nocb += rcu_future_gp_cleanup(rsp, rnp);
1740 		raw_spin_unlock_irq(&rnp->lock);
1741 		cond_resched_rcu_qs();
1742 	}
1743 	rnp = rcu_get_root(rsp);
1744 	raw_spin_lock_irq(&rnp->lock);
1745 	smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1746 	rcu_nocb_gp_set(rnp, nocb);
1747 
1748 	/* Declare grace period done. */
1749 	ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1750 	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1751 	rsp->fqs_state = RCU_GP_IDLE;
1752 	rdp = this_cpu_ptr(rsp->rda);
1753 	/* Advance CBs to reduce false positives below. */
1754 	needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
1755 	if (needgp || cpu_needs_another_gp(rsp, rdp)) {
1756 		ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
1757 		trace_rcu_grace_period(rsp->name,
1758 				       ACCESS_ONCE(rsp->gpnum),
1759 				       TPS("newreq"));
1760 	}
1761 	raw_spin_unlock_irq(&rnp->lock);
1762 }
1763 
1764 /*
1765  * Body of kthread that handles grace periods.
1766  */
1767 static int __noreturn rcu_gp_kthread(void *arg)
1768 {
1769 	int fqs_state;
1770 	int gf;
1771 	unsigned long j;
1772 	int ret;
1773 	struct rcu_state *rsp = arg;
1774 	struct rcu_node *rnp = rcu_get_root(rsp);
1775 
1776 	for (;;) {
1777 
1778 		/* Handle grace-period start. */
1779 		for (;;) {
1780 			trace_rcu_grace_period(rsp->name,
1781 					       ACCESS_ONCE(rsp->gpnum),
1782 					       TPS("reqwait"));
1783 			rsp->gp_state = RCU_GP_WAIT_GPS;
1784 			wait_event_interruptible(rsp->gp_wq,
1785 						 ACCESS_ONCE(rsp->gp_flags) &
1786 						 RCU_GP_FLAG_INIT);
1787 			/* Locking provides needed memory barrier. */
1788 			if (rcu_gp_init(rsp))
1789 				break;
1790 			cond_resched_rcu_qs();
1791 			WARN_ON(signal_pending(current));
1792 			trace_rcu_grace_period(rsp->name,
1793 					       ACCESS_ONCE(rsp->gpnum),
1794 					       TPS("reqwaitsig"));
1795 		}
1796 
1797 		/* Handle quiescent-state forcing. */
1798 		fqs_state = RCU_SAVE_DYNTICK;
1799 		j = jiffies_till_first_fqs;
1800 		if (j > HZ) {
1801 			j = HZ;
1802 			jiffies_till_first_fqs = HZ;
1803 		}
1804 		ret = 0;
1805 		for (;;) {
1806 			if (!ret)
1807 				rsp->jiffies_force_qs = jiffies + j;
1808 			trace_rcu_grace_period(rsp->name,
1809 					       ACCESS_ONCE(rsp->gpnum),
1810 					       TPS("fqswait"));
1811 			rsp->gp_state = RCU_GP_WAIT_FQS;
1812 			ret = wait_event_interruptible_timeout(rsp->gp_wq,
1813 					((gf = ACCESS_ONCE(rsp->gp_flags)) &
1814 					 RCU_GP_FLAG_FQS) ||
1815 					(!ACCESS_ONCE(rnp->qsmask) &&
1816 					 !rcu_preempt_blocked_readers_cgp(rnp)),
1817 					j);
1818 			/* Locking provides needed memory barriers. */
1819 			/* If grace period done, leave loop. */
1820 			if (!ACCESS_ONCE(rnp->qsmask) &&
1821 			    !rcu_preempt_blocked_readers_cgp(rnp))
1822 				break;
1823 			/* If time for quiescent-state forcing, do it. */
1824 			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
1825 			    (gf & RCU_GP_FLAG_FQS)) {
1826 				trace_rcu_grace_period(rsp->name,
1827 						       ACCESS_ONCE(rsp->gpnum),
1828 						       TPS("fqsstart"));
1829 				fqs_state = rcu_gp_fqs(rsp, fqs_state);
1830 				trace_rcu_grace_period(rsp->name,
1831 						       ACCESS_ONCE(rsp->gpnum),
1832 						       TPS("fqsend"));
1833 				cond_resched_rcu_qs();
1834 			} else {
1835 				/* Deal with stray signal. */
1836 				cond_resched_rcu_qs();
1837 				WARN_ON(signal_pending(current));
1838 				trace_rcu_grace_period(rsp->name,
1839 						       ACCESS_ONCE(rsp->gpnum),
1840 						       TPS("fqswaitsig"));
1841 			}
1842 			j = jiffies_till_next_fqs;
1843 			if (j > HZ) {
1844 				j = HZ;
1845 				jiffies_till_next_fqs = HZ;
1846 			} else if (j < 1) {
1847 				j = 1;
1848 				jiffies_till_next_fqs = 1;
1849 			}
1850 		}
1851 
1852 		/* Handle grace-period end. */
1853 		rcu_gp_cleanup(rsp);
1854 	}
1855 }
1856 
1857 /*
1858  * Start a new RCU grace period if warranted, re-initializing the hierarchy
1859  * in preparation for detecting the next grace period.  The caller must hold
1860  * the root node's ->lock and hard irqs must be disabled.
1861  *
1862  * Note that it is legal for a dying CPU (which is marked as offline) to
1863  * invoke this function.  This can happen when the dying CPU reports its
1864  * quiescent state.
1865  *
1866  * Returns true if the grace-period kthread must be awakened.
1867  */
1868 static bool
1869 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1870 		      struct rcu_data *rdp)
1871 {
1872 	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
1873 		/*
1874 		 * Either we have not yet spawned the grace-period
1875 		 * task, this CPU does not need another grace period,
1876 		 * or a grace period is already in progress.
1877 		 * Either way, don't start a new grace period.
1878 		 */
1879 		return false;
1880 	}
1881 	ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
1882 	trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1883 			       TPS("newreq"));
1884 
1885 	/*
1886 	 * We can't do wakeups while holding the rnp->lock, as that
1887 	 * could cause possible deadlocks with the rq->lock. Defer
1888 	 * the wakeup to our caller.
1889 	 */
1890 	return true;
1891 }
1892 
1893 /*
1894  * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
1895  * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
1896  * is invoked indirectly from rcu_advance_cbs(), which would result in
1897  * endless recursion -- or would do so if it wasn't for the self-deadlock
1898  * that is encountered beforehand.
1899  *
1900  * Returns true if the grace-period kthread needs to be awakened.
1901  */
1902 static bool rcu_start_gp(struct rcu_state *rsp)
1903 {
1904 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1905 	struct rcu_node *rnp = rcu_get_root(rsp);
1906 	bool ret = false;
1907 
1908 	/*
1909 	 * If there is no grace period in progress right now, any
1910 	 * callbacks we have up to this point will be satisfied by the
1911 	 * next grace period.  Also, advancing the callbacks reduces the
1912 	 * probability of false positives from cpu_needs_another_gp()
1913 	 * resulting in pointless grace periods.  So, advance callbacks
1914 	 * then start the grace period!
1915 	 */
1916 	ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
1917 	ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
1918 	return ret;
1919 }
1920 
1921 /*
1922  * Report a full set of quiescent states to the specified rcu_state
1923  * data structure.  This involves cleaning up after the prior grace
1924  * period and letting rcu_start_gp() start up the next grace period
1925  * if one is needed.  Note that the caller must hold rnp->lock, which
1926  * is released before return.
1927  */
1928 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1929 	__releases(rcu_get_root(rsp)->lock)
1930 {
1931 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1932 	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
1933 	rcu_gp_kthread_wake(rsp);
1934 }
1935 
1936 /*
1937  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1938  * Allows quiescent states for a group of CPUs to be reported at one go
1939  * to the specified rcu_node structure, though all the CPUs in the group
1940  * must be represented by the same rcu_node structure (which need not be
1941  * a leaf rcu_node structure, though it often will be).  That structure's
1942  * lock must be held upon entry, and it is released before return.
1943  */
1944 static void
1945 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1946 		  struct rcu_node *rnp, unsigned long flags)
1947 	__releases(rnp->lock)
1948 {
1949 	struct rcu_node *rnp_c;
1950 
1951 	/* Walk up the rcu_node hierarchy. */
1952 	for (;;) {
1953 		if (!(rnp->qsmask & mask)) {
1954 
1955 			/* Our bit has already been cleared, so done. */
1956 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1957 			return;
1958 		}
1959 		rnp->qsmask &= ~mask;
1960 		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1961 						 mask, rnp->qsmask, rnp->level,
1962 						 rnp->grplo, rnp->grphi,
1963 						 !!rnp->gp_tasks);
1964 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1965 
1966 			/* Other bits still set at this level, so done. */
1967 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1968 			return;
1969 		}
1970 		mask = rnp->grpmask;
1971 		if (rnp->parent == NULL) {
1972 
1973 			/* No more levels.  Exit loop holding root lock. */
1974 
1975 			break;
1976 		}
1977 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1978 		rnp_c = rnp;
1979 		rnp = rnp->parent;
1980 		raw_spin_lock_irqsave(&rnp->lock, flags);
1981 		smp_mb__after_unlock_lock();
1982 		WARN_ON_ONCE(rnp_c->qsmask);
1983 	}
1984 
1985 	/*
1986 	 * Get here if we are the last CPU to pass through a quiescent
1987 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1988 	 * to clean up and start the next grace period if one is needed.
1989 	 */
1990 	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
1991 }
1992 
1993 /*
1994  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1995  * structure.  This must be either called from the specified CPU, or
1996  * called when the specified CPU is known to be offline (and when it is
1997  * also known that no other CPU is concurrently trying to help the offline
1998  * CPU).  The lastcomp argument is used to make sure we are still in the
1999  * grace period of interest.  We don't want to end the current grace period
2000  * based on quiescent states detected in an earlier grace period!
2001  */
2002 static void
2003 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2004 {
2005 	unsigned long flags;
2006 	unsigned long mask;
2007 	bool needwake;
2008 	struct rcu_node *rnp;
2009 
2010 	rnp = rdp->mynode;
2011 	raw_spin_lock_irqsave(&rnp->lock, flags);
2012 	smp_mb__after_unlock_lock();
2013 	if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
2014 	    rnp->completed == rnp->gpnum) {
2015 
2016 		/*
2017 		 * The grace period in which this quiescent state was
2018 		 * recorded has ended, so don't report it upwards.
2019 		 * We will instead need a new quiescent state that lies
2020 		 * within the current grace period.
2021 		 */
2022 		rdp->passed_quiesce = 0;	/* need qs for new gp. */
2023 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2024 		return;
2025 	}
2026 	mask = rdp->grpmask;
2027 	if ((rnp->qsmask & mask) == 0) {
2028 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2029 	} else {
2030 		rdp->qs_pending = 0;
2031 
2032 		/*
2033 		 * This GP can't end until cpu checks in, so all of our
2034 		 * callbacks can be processed during the next GP.
2035 		 */
2036 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2037 
2038 		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
2039 		if (needwake)
2040 			rcu_gp_kthread_wake(rsp);
2041 	}
2042 }
2043 
2044 /*
2045  * Check to see if there is a new grace period of which this CPU
2046  * is not yet aware, and if so, set up local rcu_data state for it.
2047  * Otherwise, see if this CPU has just passed through its first
2048  * quiescent state for this grace period, and record that fact if so.
2049  */
2050 static void
2051 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2052 {
2053 	/* Check for grace-period ends and beginnings. */
2054 	note_gp_changes(rsp, rdp);
2055 
2056 	/*
2057 	 * Does this CPU still need to do its part for current grace period?
2058 	 * If no, return and let the other CPUs do their part as well.
2059 	 */
2060 	if (!rdp->qs_pending)
2061 		return;
2062 
2063 	/*
2064 	 * Was there a quiescent state since the beginning of the grace
2065 	 * period? If no, then exit and wait for the next call.
2066 	 */
2067 	if (!rdp->passed_quiesce)
2068 		return;
2069 
2070 	/*
2071 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2072 	 * judge of that).
2073 	 */
2074 	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2075 }
2076 
2077 #ifdef CONFIG_HOTPLUG_CPU
2078 
2079 /*
2080  * Send the specified CPU's RCU callbacks to the orphanage.  The
2081  * specified CPU must be offline, and the caller must hold the
2082  * ->orphan_lock.
2083  */
2084 static void
2085 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2086 			  struct rcu_node *rnp, struct rcu_data *rdp)
2087 {
2088 	/* No-CBs CPUs do not have orphanable callbacks. */
2089 	if (rcu_is_nocb_cpu(rdp->cpu))
2090 		return;
2091 
2092 	/*
2093 	 * Orphan the callbacks.  First adjust the counts.  This is safe
2094 	 * because _rcu_barrier() excludes CPU-hotplug operations, so it
2095 	 * cannot be running now.  Thus no memory barrier is required.
2096 	 */
2097 	if (rdp->nxtlist != NULL) {
2098 		rsp->qlen_lazy += rdp->qlen_lazy;
2099 		rsp->qlen += rdp->qlen;
2100 		rdp->n_cbs_orphaned += rdp->qlen;
2101 		rdp->qlen_lazy = 0;
2102 		ACCESS_ONCE(rdp->qlen) = 0;
2103 	}
2104 
2105 	/*
2106 	 * Next, move those callbacks still needing a grace period to
2107 	 * the orphanage, where some other CPU will pick them up.
2108 	 * Some of the callbacks might have gone partway through a grace
2109 	 * period, but that is too bad.  They get to start over because we
2110 	 * cannot assume that grace periods are synchronized across CPUs.
2111 	 * We don't bother updating the ->nxttail[] array yet, instead
2112 	 * we just reset the whole thing later on.
2113 	 */
2114 	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
2115 		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
2116 		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
2117 		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2118 	}
2119 
2120 	/*
2121 	 * Then move the ready-to-invoke callbacks to the orphanage,
2122 	 * where some other CPU will pick them up.  These will not be
2123 	 * required to pass though another grace period: They are done.
2124 	 */
2125 	if (rdp->nxtlist != NULL) {
2126 		*rsp->orphan_donetail = rdp->nxtlist;
2127 		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
2128 	}
2129 
2130 	/* Finally, initialize the rcu_data structure's list to empty.  */
2131 	init_callback_list(rdp);
2132 }
2133 
2134 /*
2135  * Adopt the RCU callbacks from the specified rcu_state structure's
2136  * orphanage.  The caller must hold the ->orphan_lock.
2137  */
2138 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2139 {
2140 	int i;
2141 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2142 
2143 	/* No-CBs CPUs are handled specially. */
2144 	if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
2145 		return;
2146 
2147 	/* Do the accounting first. */
2148 	rdp->qlen_lazy += rsp->qlen_lazy;
2149 	rdp->qlen += rsp->qlen;
2150 	rdp->n_cbs_adopted += rsp->qlen;
2151 	if (rsp->qlen_lazy != rsp->qlen)
2152 		rcu_idle_count_callbacks_posted();
2153 	rsp->qlen_lazy = 0;
2154 	rsp->qlen = 0;
2155 
2156 	/*
2157 	 * We do not need a memory barrier here because the only way we
2158 	 * can get here if there is an rcu_barrier() in flight is if
2159 	 * we are the task doing the rcu_barrier().
2160 	 */
2161 
2162 	/* First adopt the ready-to-invoke callbacks. */
2163 	if (rsp->orphan_donelist != NULL) {
2164 		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
2165 		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
2166 		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
2167 			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2168 				rdp->nxttail[i] = rsp->orphan_donetail;
2169 		rsp->orphan_donelist = NULL;
2170 		rsp->orphan_donetail = &rsp->orphan_donelist;
2171 	}
2172 
2173 	/* And then adopt the callbacks that still need a grace period. */
2174 	if (rsp->orphan_nxtlist != NULL) {
2175 		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
2176 		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
2177 		rsp->orphan_nxtlist = NULL;
2178 		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2179 	}
2180 }
2181 
2182 /*
2183  * Trace the fact that this CPU is going offline.
2184  */
2185 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2186 {
2187 	RCU_TRACE(unsigned long mask);
2188 	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2189 	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2190 
2191 	RCU_TRACE(mask = rdp->grpmask);
2192 	trace_rcu_grace_period(rsp->name,
2193 			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2194 			       TPS("cpuofl"));
2195 }
2196 
2197 /*
2198  * The CPU has been completely removed, and some other CPU is reporting
2199  * this fact from process context.  Do the remainder of the cleanup,
2200  * including orphaning the outgoing CPU's RCU callbacks, and also
2201  * adopting them.  There can only be one CPU hotplug operation at a time,
2202  * so no other CPU can be attempting to update rcu_cpu_kthread_task.
2203  */
2204 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2205 {
2206 	unsigned long flags;
2207 	unsigned long mask;
2208 	int need_report = 0;
2209 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2210 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2211 
2212 	/* Adjust any no-longer-needed kthreads. */
2213 	rcu_boost_kthread_setaffinity(rnp, -1);
2214 
2215 	/* Exclude any attempts to start a new grace period. */
2216 	mutex_lock(&rsp->onoff_mutex);
2217 	raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2218 
2219 	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2220 	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2221 	rcu_adopt_orphan_cbs(rsp, flags);
2222 
2223 	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
2224 	mask = rdp->grpmask;	/* rnp->grplo is constant. */
2225 	do {
2226 		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
2227 		smp_mb__after_unlock_lock();
2228 		rnp->qsmaskinit &= ~mask;
2229 		if (rnp->qsmaskinit != 0) {
2230 			if (rnp != rdp->mynode)
2231 				raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2232 			break;
2233 		}
2234 		if (rnp == rdp->mynode)
2235 			need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2236 		else
2237 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2238 		mask = rnp->grpmask;
2239 		rnp = rnp->parent;
2240 	} while (rnp != NULL);
2241 
2242 	/*
2243 	 * We still hold the leaf rcu_node structure lock here, and
2244 	 * irqs are still disabled.  The reason for this subterfuge is
2245 	 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2246 	 * held leads to deadlock.
2247 	 */
2248 	raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2249 	rnp = rdp->mynode;
2250 	if (need_report & RCU_OFL_TASKS_NORM_GP)
2251 		rcu_report_unblock_qs_rnp(rnp, flags);
2252 	else
2253 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2254 	if (need_report & RCU_OFL_TASKS_EXP_GP)
2255 		rcu_report_exp_rnp(rsp, rnp, true);
2256 	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2257 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2258 		  cpu, rdp->qlen, rdp->nxtlist);
2259 	init_callback_list(rdp);
2260 	/* Disallow further callbacks on this CPU. */
2261 	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2262 	mutex_unlock(&rsp->onoff_mutex);
2263 }
2264 
2265 #else /* #ifdef CONFIG_HOTPLUG_CPU */
2266 
2267 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2268 {
2269 }
2270 
2271 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2272 {
2273 }
2274 
2275 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2276 
2277 /*
2278  * Invoke any RCU callbacks that have made it to the end of their grace
2279  * period.  Thottle as specified by rdp->blimit.
2280  */
2281 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2282 {
2283 	unsigned long flags;
2284 	struct rcu_head *next, *list, **tail;
2285 	long bl, count, count_lazy;
2286 	int i;
2287 
2288 	/* If no callbacks are ready, just return. */
2289 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2290 		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2291 		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
2292 				    need_resched(), is_idle_task(current),
2293 				    rcu_is_callbacks_kthread());
2294 		return;
2295 	}
2296 
2297 	/*
2298 	 * Extract the list of ready callbacks, disabling to prevent
2299 	 * races with call_rcu() from interrupt handlers.
2300 	 */
2301 	local_irq_save(flags);
2302 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2303 	bl = rdp->blimit;
2304 	trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2305 	list = rdp->nxtlist;
2306 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2307 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2308 	tail = rdp->nxttail[RCU_DONE_TAIL];
2309 	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2310 		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2311 			rdp->nxttail[i] = &rdp->nxtlist;
2312 	local_irq_restore(flags);
2313 
2314 	/* Invoke callbacks. */
2315 	count = count_lazy = 0;
2316 	while (list) {
2317 		next = list->next;
2318 		prefetch(next);
2319 		debug_rcu_head_unqueue(list);
2320 		if (__rcu_reclaim(rsp->name, list))
2321 			count_lazy++;
2322 		list = next;
2323 		/* Stop only if limit reached and CPU has something to do. */
2324 		if (++count >= bl &&
2325 		    (need_resched() ||
2326 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2327 			break;
2328 	}
2329 
2330 	local_irq_save(flags);
2331 	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2332 			    is_idle_task(current),
2333 			    rcu_is_callbacks_kthread());
2334 
2335 	/* Update count, and requeue any remaining callbacks. */
2336 	if (list != NULL) {
2337 		*tail = rdp->nxtlist;
2338 		rdp->nxtlist = list;
2339 		for (i = 0; i < RCU_NEXT_SIZE; i++)
2340 			if (&rdp->nxtlist == rdp->nxttail[i])
2341 				rdp->nxttail[i] = tail;
2342 			else
2343 				break;
2344 	}
2345 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2346 	rdp->qlen_lazy -= count_lazy;
2347 	ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
2348 	rdp->n_cbs_invoked += count;
2349 
2350 	/* Reinstate batch limit if we have worked down the excess. */
2351 	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2352 		rdp->blimit = blimit;
2353 
2354 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2355 	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2356 		rdp->qlen_last_fqs_check = 0;
2357 		rdp->n_force_qs_snap = rsp->n_force_qs;
2358 	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2359 		rdp->qlen_last_fqs_check = rdp->qlen;
2360 	WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2361 
2362 	local_irq_restore(flags);
2363 
2364 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2365 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2366 		invoke_rcu_core();
2367 }
2368 
2369 /*
2370  * Check to see if this CPU is in a non-context-switch quiescent state
2371  * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2372  * Also schedule RCU core processing.
2373  *
2374  * This function must be called from hardirq context.  It is normally
2375  * invoked from the scheduling-clock interrupt.  If rcu_pending returns
2376  * false, there is no point in invoking rcu_check_callbacks().
2377  */
2378 void rcu_check_callbacks(int user)
2379 {
2380 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2381 	increment_cpu_stall_ticks();
2382 	if (user || rcu_is_cpu_rrupt_from_idle()) {
2383 
2384 		/*
2385 		 * Get here if this CPU took its interrupt from user
2386 		 * mode or from the idle loop, and if this is not a
2387 		 * nested interrupt.  In this case, the CPU is in
2388 		 * a quiescent state, so note it.
2389 		 *
2390 		 * No memory barrier is required here because both
2391 		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2392 		 * variables that other CPUs neither access nor modify,
2393 		 * at least not while the corresponding CPU is online.
2394 		 */
2395 
2396 		rcu_sched_qs();
2397 		rcu_bh_qs();
2398 
2399 	} else if (!in_softirq()) {
2400 
2401 		/*
2402 		 * Get here if this CPU did not take its interrupt from
2403 		 * softirq, in other words, if it is not interrupting
2404 		 * a rcu_bh read-side critical section.  This is an _bh
2405 		 * critical section, so note it.
2406 		 */
2407 
2408 		rcu_bh_qs();
2409 	}
2410 	rcu_preempt_check_callbacks();
2411 	if (rcu_pending())
2412 		invoke_rcu_core();
2413 	if (user)
2414 		rcu_note_voluntary_context_switch(current);
2415 	trace_rcu_utilization(TPS("End scheduler-tick"));
2416 }
2417 
2418 /*
2419  * Scan the leaf rcu_node structures, processing dyntick state for any that
2420  * have not yet encountered a quiescent state, using the function specified.
2421  * Also initiate boosting for any threads blocked on the root rcu_node.
2422  *
2423  * The caller must have suppressed start of new grace periods.
2424  */
2425 static void force_qs_rnp(struct rcu_state *rsp,
2426 			 int (*f)(struct rcu_data *rsp, bool *isidle,
2427 				  unsigned long *maxj),
2428 			 bool *isidle, unsigned long *maxj)
2429 {
2430 	unsigned long bit;
2431 	int cpu;
2432 	unsigned long flags;
2433 	unsigned long mask;
2434 	struct rcu_node *rnp;
2435 
2436 	rcu_for_each_leaf_node(rsp, rnp) {
2437 		cond_resched_rcu_qs();
2438 		mask = 0;
2439 		raw_spin_lock_irqsave(&rnp->lock, flags);
2440 		smp_mb__after_unlock_lock();
2441 		if (!rcu_gp_in_progress(rsp)) {
2442 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2443 			return;
2444 		}
2445 		if (rnp->qsmask == 0) {
2446 			rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
2447 			continue;
2448 		}
2449 		cpu = rnp->grplo;
2450 		bit = 1;
2451 		for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2452 			if ((rnp->qsmask & bit) != 0) {
2453 				if ((rnp->qsmaskinit & bit) != 0)
2454 					*isidle = false;
2455 				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2456 					mask |= bit;
2457 			}
2458 		}
2459 		if (mask != 0) {
2460 
2461 			/* rcu_report_qs_rnp() releases rnp->lock. */
2462 			rcu_report_qs_rnp(mask, rsp, rnp, flags);
2463 			continue;
2464 		}
2465 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2466 	}
2467 	rnp = rcu_get_root(rsp);
2468 	if (rnp->qsmask == 0) {
2469 		raw_spin_lock_irqsave(&rnp->lock, flags);
2470 		smp_mb__after_unlock_lock();
2471 		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2472 	}
2473 }
2474 
2475 /*
2476  * Force quiescent states on reluctant CPUs, and also detect which
2477  * CPUs are in dyntick-idle mode.
2478  */
2479 static void force_quiescent_state(struct rcu_state *rsp)
2480 {
2481 	unsigned long flags;
2482 	bool ret;
2483 	struct rcu_node *rnp;
2484 	struct rcu_node *rnp_old = NULL;
2485 
2486 	/* Funnel through hierarchy to reduce memory contention. */
2487 	rnp = __this_cpu_read(rsp->rda->mynode);
2488 	for (; rnp != NULL; rnp = rnp->parent) {
2489 		ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2490 		      !raw_spin_trylock(&rnp->fqslock);
2491 		if (rnp_old != NULL)
2492 			raw_spin_unlock(&rnp_old->fqslock);
2493 		if (ret) {
2494 			rsp->n_force_qs_lh++;
2495 			return;
2496 		}
2497 		rnp_old = rnp;
2498 	}
2499 	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2500 
2501 	/* Reached the root of the rcu_node tree, acquire lock. */
2502 	raw_spin_lock_irqsave(&rnp_old->lock, flags);
2503 	smp_mb__after_unlock_lock();
2504 	raw_spin_unlock(&rnp_old->fqslock);
2505 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2506 		rsp->n_force_qs_lh++;
2507 		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2508 		return;  /* Someone beat us to it. */
2509 	}
2510 	ACCESS_ONCE(rsp->gp_flags) =
2511 		ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
2512 	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2513 	rcu_gp_kthread_wake(rsp);
2514 }
2515 
2516 /*
2517  * This does the RCU core processing work for the specified rcu_state
2518  * and rcu_data structures.  This may be called only from the CPU to
2519  * whom the rdp belongs.
2520  */
2521 static void
2522 __rcu_process_callbacks(struct rcu_state *rsp)
2523 {
2524 	unsigned long flags;
2525 	bool needwake;
2526 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2527 
2528 	WARN_ON_ONCE(rdp->beenonline == 0);
2529 
2530 	/* Update RCU state based on any recent quiescent states. */
2531 	rcu_check_quiescent_state(rsp, rdp);
2532 
2533 	/* Does this CPU require a not-yet-started grace period? */
2534 	local_irq_save(flags);
2535 	if (cpu_needs_another_gp(rsp, rdp)) {
2536 		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2537 		needwake = rcu_start_gp(rsp);
2538 		raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2539 		if (needwake)
2540 			rcu_gp_kthread_wake(rsp);
2541 	} else {
2542 		local_irq_restore(flags);
2543 	}
2544 
2545 	/* If there are callbacks ready, invoke them. */
2546 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2547 		invoke_rcu_callbacks(rsp, rdp);
2548 
2549 	/* Do any needed deferred wakeups of rcuo kthreads. */
2550 	do_nocb_deferred_wakeup(rdp);
2551 }
2552 
2553 /*
2554  * Do RCU core processing for the current CPU.
2555  */
2556 static void rcu_process_callbacks(struct softirq_action *unused)
2557 {
2558 	struct rcu_state *rsp;
2559 
2560 	if (cpu_is_offline(smp_processor_id()))
2561 		return;
2562 	trace_rcu_utilization(TPS("Start RCU core"));
2563 	for_each_rcu_flavor(rsp)
2564 		__rcu_process_callbacks(rsp);
2565 	trace_rcu_utilization(TPS("End RCU core"));
2566 }
2567 
2568 /*
2569  * Schedule RCU callback invocation.  If the specified type of RCU
2570  * does not support RCU priority boosting, just do a direct call,
2571  * otherwise wake up the per-CPU kernel kthread.  Note that because we
2572  * are running on the current CPU with interrupts disabled, the
2573  * rcu_cpu_kthread_task cannot disappear out from under us.
2574  */
2575 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2576 {
2577 	if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2578 		return;
2579 	if (likely(!rsp->boost)) {
2580 		rcu_do_batch(rsp, rdp);
2581 		return;
2582 	}
2583 	invoke_rcu_callbacks_kthread();
2584 }
2585 
2586 static void invoke_rcu_core(void)
2587 {
2588 	if (cpu_online(smp_processor_id()))
2589 		raise_softirq(RCU_SOFTIRQ);
2590 }
2591 
2592 /*
2593  * Handle any core-RCU processing required by a call_rcu() invocation.
2594  */
2595 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2596 			    struct rcu_head *head, unsigned long flags)
2597 {
2598 	bool needwake;
2599 
2600 	/*
2601 	 * If called from an extended quiescent state, invoke the RCU
2602 	 * core in order to force a re-evaluation of RCU's idleness.
2603 	 */
2604 	if (!rcu_is_watching() && cpu_online(smp_processor_id()))
2605 		invoke_rcu_core();
2606 
2607 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2608 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2609 		return;
2610 
2611 	/*
2612 	 * Force the grace period if too many callbacks or too long waiting.
2613 	 * Enforce hysteresis, and don't invoke force_quiescent_state()
2614 	 * if some other CPU has recently done so.  Also, don't bother
2615 	 * invoking force_quiescent_state() if the newly enqueued callback
2616 	 * is the only one waiting for a grace period to complete.
2617 	 */
2618 	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2619 
2620 		/* Are we ignoring a completed grace period? */
2621 		note_gp_changes(rsp, rdp);
2622 
2623 		/* Start a new grace period if one not already started. */
2624 		if (!rcu_gp_in_progress(rsp)) {
2625 			struct rcu_node *rnp_root = rcu_get_root(rsp);
2626 
2627 			raw_spin_lock(&rnp_root->lock);
2628 			smp_mb__after_unlock_lock();
2629 			needwake = rcu_start_gp(rsp);
2630 			raw_spin_unlock(&rnp_root->lock);
2631 			if (needwake)
2632 				rcu_gp_kthread_wake(rsp);
2633 		} else {
2634 			/* Give the grace period a kick. */
2635 			rdp->blimit = LONG_MAX;
2636 			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2637 			    *rdp->nxttail[RCU_DONE_TAIL] != head)
2638 				force_quiescent_state(rsp);
2639 			rdp->n_force_qs_snap = rsp->n_force_qs;
2640 			rdp->qlen_last_fqs_check = rdp->qlen;
2641 		}
2642 	}
2643 }
2644 
2645 /*
2646  * RCU callback function to leak a callback.
2647  */
2648 static void rcu_leak_callback(struct rcu_head *rhp)
2649 {
2650 }
2651 
2652 /*
2653  * Helper function for call_rcu() and friends.  The cpu argument will
2654  * normally be -1, indicating "currently running CPU".  It may specify
2655  * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2656  * is expected to specify a CPU.
2657  */
2658 static void
2659 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2660 	   struct rcu_state *rsp, int cpu, bool lazy)
2661 {
2662 	unsigned long flags;
2663 	struct rcu_data *rdp;
2664 
2665 	WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
2666 	if (debug_rcu_head_queue(head)) {
2667 		/* Probable double call_rcu(), so leak the callback. */
2668 		ACCESS_ONCE(head->func) = rcu_leak_callback;
2669 		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2670 		return;
2671 	}
2672 	head->func = func;
2673 	head->next = NULL;
2674 
2675 	/*
2676 	 * Opportunistically note grace-period endings and beginnings.
2677 	 * Note that we might see a beginning right after we see an
2678 	 * end, but never vice versa, since this CPU has to pass through
2679 	 * a quiescent state betweentimes.
2680 	 */
2681 	local_irq_save(flags);
2682 	rdp = this_cpu_ptr(rsp->rda);
2683 
2684 	/* Add the callback to our list. */
2685 	if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2686 		int offline;
2687 
2688 		if (cpu != -1)
2689 			rdp = per_cpu_ptr(rsp->rda, cpu);
2690 		offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2691 		WARN_ON_ONCE(offline);
2692 		/* _call_rcu() is illegal on offline CPU; leak the callback. */
2693 		local_irq_restore(flags);
2694 		return;
2695 	}
2696 	ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
2697 	if (lazy)
2698 		rdp->qlen_lazy++;
2699 	else
2700 		rcu_idle_count_callbacks_posted();
2701 	smp_mb();  /* Count before adding callback for rcu_barrier(). */
2702 	*rdp->nxttail[RCU_NEXT_TAIL] = head;
2703 	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2704 
2705 	if (__is_kfree_rcu_offset((unsigned long)func))
2706 		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2707 					 rdp->qlen_lazy, rdp->qlen);
2708 	else
2709 		trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
2710 
2711 	/* Go handle any RCU core processing required. */
2712 	__call_rcu_core(rsp, rdp, head, flags);
2713 	local_irq_restore(flags);
2714 }
2715 
2716 /*
2717  * Queue an RCU-sched callback for invocation after a grace period.
2718  */
2719 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2720 {
2721 	__call_rcu(head, func, &rcu_sched_state, -1, 0);
2722 }
2723 EXPORT_SYMBOL_GPL(call_rcu_sched);
2724 
2725 /*
2726  * Queue an RCU callback for invocation after a quicker grace period.
2727  */
2728 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2729 {
2730 	__call_rcu(head, func, &rcu_bh_state, -1, 0);
2731 }
2732 EXPORT_SYMBOL_GPL(call_rcu_bh);
2733 
2734 /*
2735  * Queue an RCU callback for lazy invocation after a grace period.
2736  * This will likely be later named something like "call_rcu_lazy()",
2737  * but this change will require some way of tagging the lazy RCU
2738  * callbacks in the list of pending callbacks. Until then, this
2739  * function may only be called from __kfree_rcu().
2740  */
2741 void kfree_call_rcu(struct rcu_head *head,
2742 		    void (*func)(struct rcu_head *rcu))
2743 {
2744 	__call_rcu(head, func, rcu_state_p, -1, 1);
2745 }
2746 EXPORT_SYMBOL_GPL(kfree_call_rcu);
2747 
2748 /*
2749  * Because a context switch is a grace period for RCU-sched and RCU-bh,
2750  * any blocking grace-period wait automatically implies a grace period
2751  * if there is only one CPU online at any point time during execution
2752  * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
2753  * occasionally incorrectly indicate that there are multiple CPUs online
2754  * when there was in fact only one the whole time, as this just adds
2755  * some overhead: RCU still operates correctly.
2756  */
2757 static inline int rcu_blocking_is_gp(void)
2758 {
2759 	int ret;
2760 
2761 	might_sleep();  /* Check for RCU read-side critical section. */
2762 	preempt_disable();
2763 	ret = num_online_cpus() <= 1;
2764 	preempt_enable();
2765 	return ret;
2766 }
2767 
2768 /**
2769  * synchronize_sched - wait until an rcu-sched grace period has elapsed.
2770  *
2771  * Control will return to the caller some time after a full rcu-sched
2772  * grace period has elapsed, in other words after all currently executing
2773  * rcu-sched read-side critical sections have completed.   These read-side
2774  * critical sections are delimited by rcu_read_lock_sched() and
2775  * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
2776  * local_irq_disable(), and so on may be used in place of
2777  * rcu_read_lock_sched().
2778  *
2779  * This means that all preempt_disable code sequences, including NMI and
2780  * non-threaded hardware-interrupt handlers, in progress on entry will
2781  * have completed before this primitive returns.  However, this does not
2782  * guarantee that softirq handlers will have completed, since in some
2783  * kernels, these handlers can run in process context, and can block.
2784  *
2785  * Note that this guarantee implies further memory-ordering guarantees.
2786  * On systems with more than one CPU, when synchronize_sched() returns,
2787  * each CPU is guaranteed to have executed a full memory barrier since the
2788  * end of its last RCU-sched read-side critical section whose beginning
2789  * preceded the call to synchronize_sched().  In addition, each CPU having
2790  * an RCU read-side critical section that extends beyond the return from
2791  * synchronize_sched() is guaranteed to have executed a full memory barrier
2792  * after the beginning of synchronize_sched() and before the beginning of
2793  * that RCU read-side critical section.  Note that these guarantees include
2794  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2795  * that are executing in the kernel.
2796  *
2797  * Furthermore, if CPU A invoked synchronize_sched(), which returned
2798  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2799  * to have executed a full memory barrier during the execution of
2800  * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2801  * again only if the system has more than one CPU).
2802  *
2803  * This primitive provides the guarantees made by the (now removed)
2804  * synchronize_kernel() API.  In contrast, synchronize_rcu() only
2805  * guarantees that rcu_read_lock() sections will have completed.
2806  * In "classic RCU", these two guarantees happen to be one and
2807  * the same, but can differ in realtime RCU implementations.
2808  */
2809 void synchronize_sched(void)
2810 {
2811 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2812 			   !lock_is_held(&rcu_lock_map) &&
2813 			   !lock_is_held(&rcu_sched_lock_map),
2814 			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
2815 	if (rcu_blocking_is_gp())
2816 		return;
2817 	if (rcu_expedited)
2818 		synchronize_sched_expedited();
2819 	else
2820 		wait_rcu_gp(call_rcu_sched);
2821 }
2822 EXPORT_SYMBOL_GPL(synchronize_sched);
2823 
2824 /**
2825  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
2826  *
2827  * Control will return to the caller some time after a full rcu_bh grace
2828  * period has elapsed, in other words after all currently executing rcu_bh
2829  * read-side critical sections have completed.  RCU read-side critical
2830  * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2831  * and may be nested.
2832  *
2833  * See the description of synchronize_sched() for more detailed information
2834  * on memory ordering guarantees.
2835  */
2836 void synchronize_rcu_bh(void)
2837 {
2838 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2839 			   !lock_is_held(&rcu_lock_map) &&
2840 			   !lock_is_held(&rcu_sched_lock_map),
2841 			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2842 	if (rcu_blocking_is_gp())
2843 		return;
2844 	if (rcu_expedited)
2845 		synchronize_rcu_bh_expedited();
2846 	else
2847 		wait_rcu_gp(call_rcu_bh);
2848 }
2849 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2850 
2851 /**
2852  * get_state_synchronize_rcu - Snapshot current RCU state
2853  *
2854  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2855  * to determine whether or not a full grace period has elapsed in the
2856  * meantime.
2857  */
2858 unsigned long get_state_synchronize_rcu(void)
2859 {
2860 	/*
2861 	 * Any prior manipulation of RCU-protected data must happen
2862 	 * before the load from ->gpnum.
2863 	 */
2864 	smp_mb();  /* ^^^ */
2865 
2866 	/*
2867 	 * Make sure this load happens before the purportedly
2868 	 * time-consuming work between get_state_synchronize_rcu()
2869 	 * and cond_synchronize_rcu().
2870 	 */
2871 	return smp_load_acquire(&rcu_state_p->gpnum);
2872 }
2873 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2874 
2875 /**
2876  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2877  *
2878  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2879  *
2880  * If a full RCU grace period has elapsed since the earlier call to
2881  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
2882  * synchronize_rcu() to wait for a full grace period.
2883  *
2884  * Yes, this function does not take counter wrap into account.  But
2885  * counter wrap is harmless.  If the counter wraps, we have waited for
2886  * more than 2 billion grace periods (and way more on a 64-bit system!),
2887  * so waiting for one additional grace period should be just fine.
2888  */
2889 void cond_synchronize_rcu(unsigned long oldstate)
2890 {
2891 	unsigned long newstate;
2892 
2893 	/*
2894 	 * Ensure that this load happens before any RCU-destructive
2895 	 * actions the caller might carry out after we return.
2896 	 */
2897 	newstate = smp_load_acquire(&rcu_state_p->completed);
2898 	if (ULONG_CMP_GE(oldstate, newstate))
2899 		synchronize_rcu();
2900 }
2901 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2902 
2903 static int synchronize_sched_expedited_cpu_stop(void *data)
2904 {
2905 	/*
2906 	 * There must be a full memory barrier on each affected CPU
2907 	 * between the time that try_stop_cpus() is called and the
2908 	 * time that it returns.
2909 	 *
2910 	 * In the current initial implementation of cpu_stop, the
2911 	 * above condition is already met when the control reaches
2912 	 * this point and the following smp_mb() is not strictly
2913 	 * necessary.  Do smp_mb() anyway for documentation and
2914 	 * robustness against future implementation changes.
2915 	 */
2916 	smp_mb(); /* See above comment block. */
2917 	return 0;
2918 }
2919 
2920 /**
2921  * synchronize_sched_expedited - Brute-force RCU-sched grace period
2922  *
2923  * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
2924  * approach to force the grace period to end quickly.  This consumes
2925  * significant time on all CPUs and is unfriendly to real-time workloads,
2926  * so is thus not recommended for any sort of common-case code.  In fact,
2927  * if you are using synchronize_sched_expedited() in a loop, please
2928  * restructure your code to batch your updates, and then use a single
2929  * synchronize_sched() instead.
2930  *
2931  * This implementation can be thought of as an application of ticket
2932  * locking to RCU, with sync_sched_expedited_started and
2933  * sync_sched_expedited_done taking on the roles of the halves
2934  * of the ticket-lock word.  Each task atomically increments
2935  * sync_sched_expedited_started upon entry, snapshotting the old value,
2936  * then attempts to stop all the CPUs.  If this succeeds, then each
2937  * CPU will have executed a context switch, resulting in an RCU-sched
2938  * grace period.  We are then done, so we use atomic_cmpxchg() to
2939  * update sync_sched_expedited_done to match our snapshot -- but
2940  * only if someone else has not already advanced past our snapshot.
2941  *
2942  * On the other hand, if try_stop_cpus() fails, we check the value
2943  * of sync_sched_expedited_done.  If it has advanced past our
2944  * initial snapshot, then someone else must have forced a grace period
2945  * some time after we took our snapshot.  In this case, our work is
2946  * done for us, and we can simply return.  Otherwise, we try again,
2947  * but keep our initial snapshot for purposes of checking for someone
2948  * doing our work for us.
2949  *
2950  * If we fail too many times in a row, we fall back to synchronize_sched().
2951  */
2952 void synchronize_sched_expedited(void)
2953 {
2954 	cpumask_var_t cm;
2955 	bool cma = false;
2956 	int cpu;
2957 	long firstsnap, s, snap;
2958 	int trycount = 0;
2959 	struct rcu_state *rsp = &rcu_sched_state;
2960 
2961 	/*
2962 	 * If we are in danger of counter wrap, just do synchronize_sched().
2963 	 * By allowing sync_sched_expedited_started to advance no more than
2964 	 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2965 	 * that more than 3.5 billion CPUs would be required to force a
2966 	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
2967 	 * course be required on a 64-bit system.
2968 	 */
2969 	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2970 			 (ulong)atomic_long_read(&rsp->expedited_done) +
2971 			 ULONG_MAX / 8)) {
2972 		synchronize_sched();
2973 		atomic_long_inc(&rsp->expedited_wrap);
2974 		return;
2975 	}
2976 
2977 	/*
2978 	 * Take a ticket.  Note that atomic_inc_return() implies a
2979 	 * full memory barrier.
2980 	 */
2981 	snap = atomic_long_inc_return(&rsp->expedited_start);
2982 	firstsnap = snap;
2983 	if (!try_get_online_cpus()) {
2984 		/* CPU hotplug operation in flight, fall back to normal GP. */
2985 		wait_rcu_gp(call_rcu_sched);
2986 		atomic_long_inc(&rsp->expedited_normal);
2987 		return;
2988 	}
2989 	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2990 
2991 	/* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
2992 	cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
2993 	if (cma) {
2994 		cpumask_copy(cm, cpu_online_mask);
2995 		cpumask_clear_cpu(raw_smp_processor_id(), cm);
2996 		for_each_cpu(cpu, cm) {
2997 			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2998 
2999 			if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3000 				cpumask_clear_cpu(cpu, cm);
3001 		}
3002 		if (cpumask_weight(cm) == 0)
3003 			goto all_cpus_idle;
3004 	}
3005 
3006 	/*
3007 	 * Each pass through the following loop attempts to force a
3008 	 * context switch on each CPU.
3009 	 */
3010 	while (try_stop_cpus(cma ? cm : cpu_online_mask,
3011 			     synchronize_sched_expedited_cpu_stop,
3012 			     NULL) == -EAGAIN) {
3013 		put_online_cpus();
3014 		atomic_long_inc(&rsp->expedited_tryfail);
3015 
3016 		/* Check to see if someone else did our work for us. */
3017 		s = atomic_long_read(&rsp->expedited_done);
3018 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3019 			/* ensure test happens before caller kfree */
3020 			smp_mb__before_atomic(); /* ^^^ */
3021 			atomic_long_inc(&rsp->expedited_workdone1);
3022 			free_cpumask_var(cm);
3023 			return;
3024 		}
3025 
3026 		/* No joy, try again later.  Or just synchronize_sched(). */
3027 		if (trycount++ < 10) {
3028 			udelay(trycount * num_online_cpus());
3029 		} else {
3030 			wait_rcu_gp(call_rcu_sched);
3031 			atomic_long_inc(&rsp->expedited_normal);
3032 			free_cpumask_var(cm);
3033 			return;
3034 		}
3035 
3036 		/* Recheck to see if someone else did our work for us. */
3037 		s = atomic_long_read(&rsp->expedited_done);
3038 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3039 			/* ensure test happens before caller kfree */
3040 			smp_mb__before_atomic(); /* ^^^ */
3041 			atomic_long_inc(&rsp->expedited_workdone2);
3042 			free_cpumask_var(cm);
3043 			return;
3044 		}
3045 
3046 		/*
3047 		 * Refetching sync_sched_expedited_started allows later
3048 		 * callers to piggyback on our grace period.  We retry
3049 		 * after they started, so our grace period works for them,
3050 		 * and they started after our first try, so their grace
3051 		 * period works for us.
3052 		 */
3053 		if (!try_get_online_cpus()) {
3054 			/* CPU hotplug operation in flight, use normal GP. */
3055 			wait_rcu_gp(call_rcu_sched);
3056 			atomic_long_inc(&rsp->expedited_normal);
3057 			free_cpumask_var(cm);
3058 			return;
3059 		}
3060 		snap = atomic_long_read(&rsp->expedited_start);
3061 		smp_mb(); /* ensure read is before try_stop_cpus(). */
3062 	}
3063 	atomic_long_inc(&rsp->expedited_stoppedcpus);
3064 
3065 all_cpus_idle:
3066 	free_cpumask_var(cm);
3067 
3068 	/*
3069 	 * Everyone up to our most recent fetch is covered by our grace
3070 	 * period.  Update the counter, but only if our work is still
3071 	 * relevant -- which it won't be if someone who started later
3072 	 * than we did already did their update.
3073 	 */
3074 	do {
3075 		atomic_long_inc(&rsp->expedited_done_tries);
3076 		s = atomic_long_read(&rsp->expedited_done);
3077 		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
3078 			/* ensure test happens before caller kfree */
3079 			smp_mb__before_atomic(); /* ^^^ */
3080 			atomic_long_inc(&rsp->expedited_done_lost);
3081 			break;
3082 		}
3083 	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
3084 	atomic_long_inc(&rsp->expedited_done_exit);
3085 
3086 	put_online_cpus();
3087 }
3088 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3089 
3090 /*
3091  * Check to see if there is any immediate RCU-related work to be done
3092  * by the current CPU, for the specified type of RCU, returning 1 if so.
3093  * The checks are in order of increasing expense: checks that can be
3094  * carried out against CPU-local state are performed first.  However,
3095  * we must check for CPU stalls first, else we might not get a chance.
3096  */
3097 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3098 {
3099 	struct rcu_node *rnp = rdp->mynode;
3100 
3101 	rdp->n_rcu_pending++;
3102 
3103 	/* Check for CPU stalls, if enabled. */
3104 	check_cpu_stall(rsp, rdp);
3105 
3106 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3107 	if (rcu_nohz_full_cpu(rsp))
3108 		return 0;
3109 
3110 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3111 	if (rcu_scheduler_fully_active &&
3112 	    rdp->qs_pending && !rdp->passed_quiesce) {
3113 		rdp->n_rp_qs_pending++;
3114 	} else if (rdp->qs_pending && rdp->passed_quiesce) {
3115 		rdp->n_rp_report_qs++;
3116 		return 1;
3117 	}
3118 
3119 	/* Does this CPU have callbacks ready to invoke? */
3120 	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
3121 		rdp->n_rp_cb_ready++;
3122 		return 1;
3123 	}
3124 
3125 	/* Has RCU gone idle with this CPU needing another grace period? */
3126 	if (cpu_needs_another_gp(rsp, rdp)) {
3127 		rdp->n_rp_cpu_needs_gp++;
3128 		return 1;
3129 	}
3130 
3131 	/* Has another RCU grace period completed?  */
3132 	if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
3133 		rdp->n_rp_gp_completed++;
3134 		return 1;
3135 	}
3136 
3137 	/* Has a new RCU grace period started? */
3138 	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
3139 		rdp->n_rp_gp_started++;
3140 		return 1;
3141 	}
3142 
3143 	/* Does this CPU need a deferred NOCB wakeup? */
3144 	if (rcu_nocb_need_deferred_wakeup(rdp)) {
3145 		rdp->n_rp_nocb_defer_wakeup++;
3146 		return 1;
3147 	}
3148 
3149 	/* nothing to do */
3150 	rdp->n_rp_need_nothing++;
3151 	return 0;
3152 }
3153 
3154 /*
3155  * Check to see if there is any immediate RCU-related work to be done
3156  * by the current CPU, returning 1 if so.  This function is part of the
3157  * RCU implementation; it is -not- an exported member of the RCU API.
3158  */
3159 static int rcu_pending(void)
3160 {
3161 	struct rcu_state *rsp;
3162 
3163 	for_each_rcu_flavor(rsp)
3164 		if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3165 			return 1;
3166 	return 0;
3167 }
3168 
3169 /*
3170  * Return true if the specified CPU has any callback.  If all_lazy is
3171  * non-NULL, store an indication of whether all callbacks are lazy.
3172  * (If there are no callbacks, all of them are deemed to be lazy.)
3173  */
3174 static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3175 {
3176 	bool al = true;
3177 	bool hc = false;
3178 	struct rcu_data *rdp;
3179 	struct rcu_state *rsp;
3180 
3181 	for_each_rcu_flavor(rsp) {
3182 		rdp = this_cpu_ptr(rsp->rda);
3183 		if (!rdp->nxtlist)
3184 			continue;
3185 		hc = true;
3186 		if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
3187 			al = false;
3188 			break;
3189 		}
3190 	}
3191 	if (all_lazy)
3192 		*all_lazy = al;
3193 	return hc;
3194 }
3195 
3196 /*
3197  * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
3198  * the compiler is expected to optimize this away.
3199  */
3200 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3201 			       int cpu, unsigned long done)
3202 {
3203 	trace_rcu_barrier(rsp->name, s, cpu,
3204 			  atomic_read(&rsp->barrier_cpu_count), done);
3205 }
3206 
3207 /*
3208  * RCU callback function for _rcu_barrier().  If we are last, wake
3209  * up the task executing _rcu_barrier().
3210  */
3211 static void rcu_barrier_callback(struct rcu_head *rhp)
3212 {
3213 	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3214 	struct rcu_state *rsp = rdp->rsp;
3215 
3216 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3217 		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
3218 		complete(&rsp->barrier_completion);
3219 	} else {
3220 		_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
3221 	}
3222 }
3223 
3224 /*
3225  * Called with preemption disabled, and from cross-cpu IRQ context.
3226  */
3227 static void rcu_barrier_func(void *type)
3228 {
3229 	struct rcu_state *rsp = type;
3230 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3231 
3232 	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
3233 	atomic_inc(&rsp->barrier_cpu_count);
3234 	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
3235 }
3236 
3237 /*
3238  * Orchestrate the specified type of RCU barrier, waiting for all
3239  * RCU callbacks of the specified type to complete.
3240  */
3241 static void _rcu_barrier(struct rcu_state *rsp)
3242 {
3243 	int cpu;
3244 	struct rcu_data *rdp;
3245 	unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
3246 	unsigned long snap_done;
3247 
3248 	_rcu_barrier_trace(rsp, "Begin", -1, snap);
3249 
3250 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3251 	mutex_lock(&rsp->barrier_mutex);
3252 
3253 	/*
3254 	 * Ensure that all prior references, including to ->n_barrier_done,
3255 	 * are ordered before the _rcu_barrier() machinery.
3256 	 */
3257 	smp_mb();  /* See above block comment. */
3258 
3259 	/*
3260 	 * Recheck ->n_barrier_done to see if others did our work for us.
3261 	 * This means checking ->n_barrier_done for an even-to-odd-to-even
3262 	 * transition.  The "if" expression below therefore rounds the old
3263 	 * value up to the next even number and adds two before comparing.
3264 	 */
3265 	snap_done = rsp->n_barrier_done;
3266 	_rcu_barrier_trace(rsp, "Check", -1, snap_done);
3267 
3268 	/*
3269 	 * If the value in snap is odd, we needed to wait for the current
3270 	 * rcu_barrier() to complete, then wait for the next one, in other
3271 	 * words, we need the value of snap_done to be three larger than
3272 	 * the value of snap.  On the other hand, if the value in snap is
3273 	 * even, we only had to wait for the next rcu_barrier() to complete,
3274 	 * in other words, we need the value of snap_done to be only two
3275 	 * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
3276 	 * this for us (thank you, Linus!).
3277 	 */
3278 	if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3279 		_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3280 		smp_mb(); /* caller's subsequent code after above check. */
3281 		mutex_unlock(&rsp->barrier_mutex);
3282 		return;
3283 	}
3284 
3285 	/*
3286 	 * Increment ->n_barrier_done to avoid duplicate work.  Use
3287 	 * ACCESS_ONCE() to prevent the compiler from speculating
3288 	 * the increment to precede the early-exit check.
3289 	 */
3290 	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3291 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3292 	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3293 	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
3294 
3295 	/*
3296 	 * Initialize the count to one rather than to zero in order to
3297 	 * avoid a too-soon return to zero in case of a short grace period
3298 	 * (or preemption of this task).  Exclude CPU-hotplug operations
3299 	 * to ensure that no offline CPU has callbacks queued.
3300 	 */
3301 	init_completion(&rsp->barrier_completion);
3302 	atomic_set(&rsp->barrier_cpu_count, 1);
3303 	get_online_cpus();
3304 
3305 	/*
3306 	 * Force each CPU with callbacks to register a new callback.
3307 	 * When that callback is invoked, we will know that all of the
3308 	 * corresponding CPU's preceding callbacks have been invoked.
3309 	 */
3310 	for_each_possible_cpu(cpu) {
3311 		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3312 			continue;
3313 		rdp = per_cpu_ptr(rsp->rda, cpu);
3314 		if (rcu_is_nocb_cpu(cpu)) {
3315 			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3316 				_rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3317 						   rsp->n_barrier_done);
3318 			} else {
3319 				_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3320 						   rsp->n_barrier_done);
3321 				atomic_inc(&rsp->barrier_cpu_count);
3322 				__call_rcu(&rdp->barrier_head,
3323 					   rcu_barrier_callback, rsp, cpu, 0);
3324 			}
3325 		} else if (ACCESS_ONCE(rdp->qlen)) {
3326 			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
3327 					   rsp->n_barrier_done);
3328 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3329 		} else {
3330 			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3331 					   rsp->n_barrier_done);
3332 		}
3333 	}
3334 	put_online_cpus();
3335 
3336 	/*
3337 	 * Now that we have an rcu_barrier_callback() callback on each
3338 	 * CPU, and thus each counted, remove the initial count.
3339 	 */
3340 	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3341 		complete(&rsp->barrier_completion);
3342 
3343 	/* Increment ->n_barrier_done to prevent duplicate work. */
3344 	smp_mb(); /* Keep increment after above mechanism. */
3345 	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3346 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3347 	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3348 	smp_mb(); /* Keep increment before caller's subsequent code. */
3349 
3350 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3351 	wait_for_completion(&rsp->barrier_completion);
3352 
3353 	/* Other rcu_barrier() invocations can now safely proceed. */
3354 	mutex_unlock(&rsp->barrier_mutex);
3355 }
3356 
3357 /**
3358  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3359  */
3360 void rcu_barrier_bh(void)
3361 {
3362 	_rcu_barrier(&rcu_bh_state);
3363 }
3364 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3365 
3366 /**
3367  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3368  */
3369 void rcu_barrier_sched(void)
3370 {
3371 	_rcu_barrier(&rcu_sched_state);
3372 }
3373 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3374 
3375 /*
3376  * Do boot-time initialization of a CPU's per-CPU RCU data.
3377  */
3378 static void __init
3379 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3380 {
3381 	unsigned long flags;
3382 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3383 	struct rcu_node *rnp = rcu_get_root(rsp);
3384 
3385 	/* Set up local state, ensuring consistent view of global state. */
3386 	raw_spin_lock_irqsave(&rnp->lock, flags);
3387 	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3388 	init_callback_list(rdp);
3389 	rdp->qlen_lazy = 0;
3390 	ACCESS_ONCE(rdp->qlen) = 0;
3391 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3392 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3393 	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
3394 	rdp->cpu = cpu;
3395 	rdp->rsp = rsp;
3396 	rcu_boot_init_nocb_percpu_data(rdp);
3397 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
3398 }
3399 
3400 /*
3401  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
3402  * offline event can be happening at a given time.  Note also that we
3403  * can accept some slop in the rsp->completed access due to the fact
3404  * that this CPU cannot possibly have any RCU callbacks in flight yet.
3405  */
3406 static void
3407 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3408 {
3409 	unsigned long flags;
3410 	unsigned long mask;
3411 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3412 	struct rcu_node *rnp = rcu_get_root(rsp);
3413 
3414 	/* Exclude new grace periods. */
3415 	mutex_lock(&rsp->onoff_mutex);
3416 
3417 	/* Set up local state, ensuring consistent view of global state. */
3418 	raw_spin_lock_irqsave(&rnp->lock, flags);
3419 	rdp->beenonline = 1;	 /* We have now been online. */
3420 	rdp->qlen_last_fqs_check = 0;
3421 	rdp->n_force_qs_snap = rsp->n_force_qs;
3422 	rdp->blimit = blimit;
3423 	init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
3424 	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3425 	rcu_sysidle_init_percpu_data(rdp->dynticks);
3426 	atomic_set(&rdp->dynticks->dynticks,
3427 		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3428 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
3429 
3430 	/* Add CPU to rcu_node bitmasks. */
3431 	rnp = rdp->mynode;
3432 	mask = rdp->grpmask;
3433 	do {
3434 		/* Exclude any attempts to start a new GP on small systems. */
3435 		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
3436 		rnp->qsmaskinit |= mask;
3437 		mask = rnp->grpmask;
3438 		if (rnp == rdp->mynode) {
3439 			/*
3440 			 * If there is a grace period in progress, we will
3441 			 * set up to wait for it next time we run the
3442 			 * RCU core code.
3443 			 */
3444 			rdp->gpnum = rnp->completed;
3445 			rdp->completed = rnp->completed;
3446 			rdp->passed_quiesce = 0;
3447 			rdp->qs_pending = 0;
3448 			trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3449 		}
3450 		raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
3451 		rnp = rnp->parent;
3452 	} while (rnp != NULL && !(rnp->qsmaskinit & mask));
3453 	local_irq_restore(flags);
3454 
3455 	mutex_unlock(&rsp->onoff_mutex);
3456 }
3457 
3458 static void rcu_prepare_cpu(int cpu)
3459 {
3460 	struct rcu_state *rsp;
3461 
3462 	for_each_rcu_flavor(rsp)
3463 		rcu_init_percpu_data(cpu, rsp);
3464 }
3465 
3466 /*
3467  * Handle CPU online/offline notification events.
3468  */
3469 static int rcu_cpu_notify(struct notifier_block *self,
3470 				    unsigned long action, void *hcpu)
3471 {
3472 	long cpu = (long)hcpu;
3473 	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3474 	struct rcu_node *rnp = rdp->mynode;
3475 	struct rcu_state *rsp;
3476 
3477 	trace_rcu_utilization(TPS("Start CPU hotplug"));
3478 	switch (action) {
3479 	case CPU_UP_PREPARE:
3480 	case CPU_UP_PREPARE_FROZEN:
3481 		rcu_prepare_cpu(cpu);
3482 		rcu_prepare_kthreads(cpu);
3483 		rcu_spawn_all_nocb_kthreads(cpu);
3484 		break;
3485 	case CPU_ONLINE:
3486 	case CPU_DOWN_FAILED:
3487 		rcu_boost_kthread_setaffinity(rnp, -1);
3488 		break;
3489 	case CPU_DOWN_PREPARE:
3490 		rcu_boost_kthread_setaffinity(rnp, cpu);
3491 		break;
3492 	case CPU_DYING:
3493 	case CPU_DYING_FROZEN:
3494 		for_each_rcu_flavor(rsp)
3495 			rcu_cleanup_dying_cpu(rsp);
3496 		break;
3497 	case CPU_DEAD:
3498 	case CPU_DEAD_FROZEN:
3499 	case CPU_UP_CANCELED:
3500 	case CPU_UP_CANCELED_FROZEN:
3501 		for_each_rcu_flavor(rsp) {
3502 			rcu_cleanup_dead_cpu(cpu, rsp);
3503 			do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3504 		}
3505 		break;
3506 	default:
3507 		break;
3508 	}
3509 	trace_rcu_utilization(TPS("End CPU hotplug"));
3510 	return NOTIFY_OK;
3511 }
3512 
3513 static int rcu_pm_notify(struct notifier_block *self,
3514 			 unsigned long action, void *hcpu)
3515 {
3516 	switch (action) {
3517 	case PM_HIBERNATION_PREPARE:
3518 	case PM_SUSPEND_PREPARE:
3519 		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3520 			rcu_expedited = 1;
3521 		break;
3522 	case PM_POST_HIBERNATION:
3523 	case PM_POST_SUSPEND:
3524 		rcu_expedited = 0;
3525 		break;
3526 	default:
3527 		break;
3528 	}
3529 	return NOTIFY_OK;
3530 }
3531 
3532 /*
3533  * Spawn the kthreads that handle each RCU flavor's grace periods.
3534  */
3535 static int __init rcu_spawn_gp_kthread(void)
3536 {
3537 	unsigned long flags;
3538 	struct rcu_node *rnp;
3539 	struct rcu_state *rsp;
3540 	struct task_struct *t;
3541 
3542 	rcu_scheduler_fully_active = 1;
3543 	for_each_rcu_flavor(rsp) {
3544 		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3545 		BUG_ON(IS_ERR(t));
3546 		rnp = rcu_get_root(rsp);
3547 		raw_spin_lock_irqsave(&rnp->lock, flags);
3548 		rsp->gp_kthread = t;
3549 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
3550 	}
3551 	rcu_spawn_nocb_kthreads();
3552 	rcu_spawn_boost_kthreads();
3553 	return 0;
3554 }
3555 early_initcall(rcu_spawn_gp_kthread);
3556 
3557 /*
3558  * This function is invoked towards the end of the scheduler's initialization
3559  * process.  Before this is called, the idle task might contain
3560  * RCU read-side critical sections (during which time, this idle
3561  * task is booting the system).  After this function is called, the
3562  * idle tasks are prohibited from containing RCU read-side critical
3563  * sections.  This function also enables RCU lockdep checking.
3564  */
3565 void rcu_scheduler_starting(void)
3566 {
3567 	WARN_ON(num_online_cpus() != 1);
3568 	WARN_ON(nr_context_switches() > 0);
3569 	rcu_scheduler_active = 1;
3570 }
3571 
3572 /*
3573  * Compute the per-level fanout, either using the exact fanout specified
3574  * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3575  */
3576 #ifdef CONFIG_RCU_FANOUT_EXACT
3577 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3578 {
3579 	int i;
3580 
3581 	rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3582 	for (i = rcu_num_lvls - 2; i >= 0; i--)
3583 		rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3584 }
3585 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3586 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3587 {
3588 	int ccur;
3589 	int cprv;
3590 	int i;
3591 
3592 	cprv = nr_cpu_ids;
3593 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3594 		ccur = rsp->levelcnt[i];
3595 		rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3596 		cprv = ccur;
3597 	}
3598 }
3599 #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3600 
3601 /*
3602  * Helper function for rcu_init() that initializes one rcu_state structure.
3603  */
3604 static void __init rcu_init_one(struct rcu_state *rsp,
3605 		struct rcu_data __percpu *rda)
3606 {
3607 	static const char * const buf[] = {
3608 		"rcu_node_0",
3609 		"rcu_node_1",
3610 		"rcu_node_2",
3611 		"rcu_node_3" };  /* Match MAX_RCU_LVLS */
3612 	static const char * const fqs[] = {
3613 		"rcu_node_fqs_0",
3614 		"rcu_node_fqs_1",
3615 		"rcu_node_fqs_2",
3616 		"rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
3617 	static u8 fl_mask = 0x1;
3618 	int cpustride = 1;
3619 	int i;
3620 	int j;
3621 	struct rcu_node *rnp;
3622 
3623 	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3624 
3625 	/* Silence gcc 4.8 warning about array index out of range. */
3626 	if (rcu_num_lvls > RCU_NUM_LVLS)
3627 		panic("rcu_init_one: rcu_num_lvls overflow");
3628 
3629 	/* Initialize the level-tracking arrays. */
3630 
3631 	for (i = 0; i < rcu_num_lvls; i++)
3632 		rsp->levelcnt[i] = num_rcu_lvl[i];
3633 	for (i = 1; i < rcu_num_lvls; i++)
3634 		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3635 	rcu_init_levelspread(rsp);
3636 	rsp->flavor_mask = fl_mask;
3637 	fl_mask <<= 1;
3638 
3639 	/* Initialize the elements themselves, starting from the leaves. */
3640 
3641 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3642 		cpustride *= rsp->levelspread[i];
3643 		rnp = rsp->level[i];
3644 		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3645 			raw_spin_lock_init(&rnp->lock);
3646 			lockdep_set_class_and_name(&rnp->lock,
3647 						   &rcu_node_class[i], buf[i]);
3648 			raw_spin_lock_init(&rnp->fqslock);
3649 			lockdep_set_class_and_name(&rnp->fqslock,
3650 						   &rcu_fqs_class[i], fqs[i]);
3651 			rnp->gpnum = rsp->gpnum;
3652 			rnp->completed = rsp->completed;
3653 			rnp->qsmask = 0;
3654 			rnp->qsmaskinit = 0;
3655 			rnp->grplo = j * cpustride;
3656 			rnp->grphi = (j + 1) * cpustride - 1;
3657 			if (rnp->grphi >= nr_cpu_ids)
3658 				rnp->grphi = nr_cpu_ids - 1;
3659 			if (i == 0) {
3660 				rnp->grpnum = 0;
3661 				rnp->grpmask = 0;
3662 				rnp->parent = NULL;
3663 			} else {
3664 				rnp->grpnum = j % rsp->levelspread[i - 1];
3665 				rnp->grpmask = 1UL << rnp->grpnum;
3666 				rnp->parent = rsp->level[i - 1] +
3667 					      j / rsp->levelspread[i - 1];
3668 			}
3669 			rnp->level = i;
3670 			INIT_LIST_HEAD(&rnp->blkd_tasks);
3671 			rcu_init_one_nocb(rnp);
3672 		}
3673 	}
3674 
3675 	rsp->rda = rda;
3676 	init_waitqueue_head(&rsp->gp_wq);
3677 	rnp = rsp->level[rcu_num_lvls - 1];
3678 	for_each_possible_cpu(i) {
3679 		while (i > rnp->grphi)
3680 			rnp++;
3681 		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3682 		rcu_boot_init_percpu_data(i, rsp);
3683 	}
3684 	list_add(&rsp->flavors, &rcu_struct_flavors);
3685 }
3686 
3687 /*
3688  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3689  * replace the definitions in tree.h because those are needed to size
3690  * the ->node array in the rcu_state structure.
3691  */
3692 static void __init rcu_init_geometry(void)
3693 {
3694 	ulong d;
3695 	int i;
3696 	int j;
3697 	int n = nr_cpu_ids;
3698 	int rcu_capacity[MAX_RCU_LVLS + 1];
3699 
3700 	/*
3701 	 * Initialize any unspecified boot parameters.
3702 	 * The default values of jiffies_till_first_fqs and
3703 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3704 	 * value, which is a function of HZ, then adding one for each
3705 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3706 	 */
3707 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3708 	if (jiffies_till_first_fqs == ULONG_MAX)
3709 		jiffies_till_first_fqs = d;
3710 	if (jiffies_till_next_fqs == ULONG_MAX)
3711 		jiffies_till_next_fqs = d;
3712 
3713 	/* If the compile-time values are accurate, just leave. */
3714 	if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3715 	    nr_cpu_ids == NR_CPUS)
3716 		return;
3717 	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
3718 		rcu_fanout_leaf, nr_cpu_ids);
3719 
3720 	/*
3721 	 * Compute number of nodes that can be handled an rcu_node tree
3722 	 * with the given number of levels.  Setting rcu_capacity[0] makes
3723 	 * some of the arithmetic easier.
3724 	 */
3725 	rcu_capacity[0] = 1;
3726 	rcu_capacity[1] = rcu_fanout_leaf;
3727 	for (i = 2; i <= MAX_RCU_LVLS; i++)
3728 		rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
3729 
3730 	/*
3731 	 * The boot-time rcu_fanout_leaf parameter is only permitted
3732 	 * to increase the leaf-level fanout, not decrease it.  Of course,
3733 	 * the leaf-level fanout cannot exceed the number of bits in
3734 	 * the rcu_node masks.  Finally, the tree must be able to accommodate
3735 	 * the configured number of CPUs.  Complain and fall back to the
3736 	 * compile-time values if these limits are exceeded.
3737 	 */
3738 	if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
3739 	    rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
3740 	    n > rcu_capacity[MAX_RCU_LVLS]) {
3741 		WARN_ON(1);
3742 		return;
3743 	}
3744 
3745 	/* Calculate the number of rcu_nodes at each level of the tree. */
3746 	for (i = 1; i <= MAX_RCU_LVLS; i++)
3747 		if (n <= rcu_capacity[i]) {
3748 			for (j = 0; j <= i; j++)
3749 				num_rcu_lvl[j] =
3750 					DIV_ROUND_UP(n, rcu_capacity[i - j]);
3751 			rcu_num_lvls = i;
3752 			for (j = i + 1; j <= MAX_RCU_LVLS; j++)
3753 				num_rcu_lvl[j] = 0;
3754 			break;
3755 		}
3756 
3757 	/* Calculate the total number of rcu_node structures. */
3758 	rcu_num_nodes = 0;
3759 	for (i = 0; i <= MAX_RCU_LVLS; i++)
3760 		rcu_num_nodes += num_rcu_lvl[i];
3761 	rcu_num_nodes -= n;
3762 }
3763 
3764 void __init rcu_init(void)
3765 {
3766 	int cpu;
3767 
3768 	rcu_bootup_announce();
3769 	rcu_init_geometry();
3770 	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3771 	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3772 	__rcu_init_preempt();
3773 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3774 
3775 	/*
3776 	 * We don't need protection against CPU-hotplug here because
3777 	 * this is called early in boot, before either interrupts
3778 	 * or the scheduler are operational.
3779 	 */
3780 	cpu_notifier(rcu_cpu_notify, 0);
3781 	pm_notifier(rcu_pm_notify, 0);
3782 	for_each_online_cpu(cpu)
3783 		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3784 
3785 	rcu_early_boot_tests();
3786 }
3787 
3788 #include "tree_plugin.h"
3789