xref: /openbmc/linux/kernel/rcu/tree.c (revision b34e08d5)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23  *
24  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26  *
27  * For detailed explanation of Read-Copy Update mechanism see -
28  *	Documentation/RCU
29  */
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp.h>
35 #include <linux/rcupdate.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/nmi.h>
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/export.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/module.h>
45 #include <linux/percpu.h>
46 #include <linux/notifier.h>
47 #include <linux/cpu.h>
48 #include <linux/mutex.h>
49 #include <linux/time.h>
50 #include <linux/kernel_stat.h>
51 #include <linux/wait.h>
52 #include <linux/kthread.h>
53 #include <linux/prefetch.h>
54 #include <linux/delay.h>
55 #include <linux/stop_machine.h>
56 #include <linux/random.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/suspend.h>
59 
60 #include "tree.h"
61 #include "rcu.h"
62 
63 MODULE_ALIAS("rcutree");
64 #ifdef MODULE_PARAM_PREFIX
65 #undef MODULE_PARAM_PREFIX
66 #endif
67 #define MODULE_PARAM_PREFIX "rcutree."
68 
69 /* Data structures. */
70 
71 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73 
74 /*
75  * In order to export the rcu_state name to the tracing tools, it
76  * needs to be added in the __tracepoint_string section.
77  * This requires defining a separate variable tp_<sname>_varname
78  * that points to the string being used, and this will allow
79  * the tracing userspace tools to be able to decipher the string
80  * address to the matching string.
81  */
82 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
83 static char sname##_varname[] = #sname; \
84 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
85 struct rcu_state sname##_state = { \
86 	.level = { &sname##_state.node[0] }, \
87 	.call = cr, \
88 	.fqs_state = RCU_GP_IDLE, \
89 	.gpnum = 0UL - 300UL, \
90 	.completed = 0UL - 300UL, \
91 	.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
92 	.orphan_nxttail = &sname##_state.orphan_nxtlist, \
93 	.orphan_donetail = &sname##_state.orphan_donelist, \
94 	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
95 	.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
96 	.name = sname##_varname, \
97 	.abbr = sabbr, \
98 }; \
99 DEFINE_PER_CPU(struct rcu_data, sname##_data)
100 
101 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
102 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
103 
104 static struct rcu_state *rcu_state;
105 LIST_HEAD(rcu_struct_flavors);
106 
107 /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
108 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
109 module_param(rcu_fanout_leaf, int, 0444);
110 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
111 static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
112 	NUM_RCU_LVL_0,
113 	NUM_RCU_LVL_1,
114 	NUM_RCU_LVL_2,
115 	NUM_RCU_LVL_3,
116 	NUM_RCU_LVL_4,
117 };
118 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
119 
120 /*
121  * The rcu_scheduler_active variable transitions from zero to one just
122  * before the first task is spawned.  So when this variable is zero, RCU
123  * can assume that there is but one task, allowing RCU to (for example)
124  * optimize synchronize_sched() to a simple barrier().  When this variable
125  * is one, RCU must actually do all the hard work required to detect real
126  * grace periods.  This variable is also used to suppress boot-time false
127  * positives from lockdep-RCU error checking.
128  */
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131 
132 /*
133  * The rcu_scheduler_fully_active variable transitions from zero to one
134  * during the early_initcall() processing, which is after the scheduler
135  * is capable of creating new tasks.  So RCU processing (for example,
136  * creating tasks for RCU priority boosting) must be delayed until after
137  * rcu_scheduler_fully_active transitions from zero to one.  We also
138  * currently delay invocation of any RCU callbacks until after this point.
139  *
140  * It might later prove better for people registering RCU callbacks during
141  * early boot to take responsibility for these callbacks, but one step at
142  * a time.
143  */
144 static int rcu_scheduler_fully_active __read_mostly;
145 
146 #ifdef CONFIG_RCU_BOOST
147 
148 /*
149  * Control variables for per-CPU and per-rcu_node kthreads.  These
150  * handle all flavors of RCU.
151  */
152 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
153 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
154 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
155 DEFINE_PER_CPU(char, rcu_cpu_has_work);
156 
157 #endif /* #ifdef CONFIG_RCU_BOOST */
158 
159 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
160 static void invoke_rcu_core(void);
161 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
162 
163 /*
164  * Track the rcutorture test sequence number and the update version
165  * number within a given test.  The rcutorture_testseq is incremented
166  * on every rcutorture module load and unload, so has an odd value
167  * when a test is running.  The rcutorture_vernum is set to zero
168  * when rcutorture starts and is incremented on each rcutorture update.
169  * These variables enable correlating rcutorture output with the
170  * RCU tracing information.
171  */
172 unsigned long rcutorture_testseq;
173 unsigned long rcutorture_vernum;
174 
175 /*
176  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
177  * permit this function to be invoked without holding the root rcu_node
178  * structure's ->lock, but of course results can be subject to change.
179  */
180 static int rcu_gp_in_progress(struct rcu_state *rsp)
181 {
182 	return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
183 }
184 
185 /*
186  * Note a quiescent state.  Because we do not need to know
187  * how many quiescent states passed, just if there was at least
188  * one since the start of the grace period, this just sets a flag.
189  * The caller must have disabled preemption.
190  */
191 void rcu_sched_qs(int cpu)
192 {
193 	struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
194 
195 	if (rdp->passed_quiesce == 0)
196 		trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
197 	rdp->passed_quiesce = 1;
198 }
199 
200 void rcu_bh_qs(int cpu)
201 {
202 	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
203 
204 	if (rdp->passed_quiesce == 0)
205 		trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
206 	rdp->passed_quiesce = 1;
207 }
208 
209 /*
210  * Note a context switch.  This is a quiescent state for RCU-sched,
211  * and requires special handling for preemptible RCU.
212  * The caller must have disabled preemption.
213  */
214 void rcu_note_context_switch(int cpu)
215 {
216 	trace_rcu_utilization(TPS("Start context switch"));
217 	rcu_sched_qs(cpu);
218 	rcu_preempt_note_context_switch(cpu);
219 	trace_rcu_utilization(TPS("End context switch"));
220 }
221 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222 
223 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 	.dynticks = ATOMIC_INIT(1),
226 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 	.dynticks_idle = ATOMIC_INIT(1),
229 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230 };
231 
232 static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
233 static long qhimark = 10000;	/* If this many pending, ignore blimit. */
234 static long qlowmark = 100;	/* Once only this many pending, use blimit. */
235 
236 module_param(blimit, long, 0444);
237 module_param(qhimark, long, 0444);
238 module_param(qlowmark, long, 0444);
239 
240 static ulong jiffies_till_first_fqs = ULONG_MAX;
241 static ulong jiffies_till_next_fqs = ULONG_MAX;
242 
243 module_param(jiffies_till_first_fqs, ulong, 0644);
244 module_param(jiffies_till_next_fqs, ulong, 0644);
245 
246 static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 				  struct rcu_data *rdp);
248 static void force_qs_rnp(struct rcu_state *rsp,
249 			 int (*f)(struct rcu_data *rsp, bool *isidle,
250 				  unsigned long *maxj),
251 			 bool *isidle, unsigned long *maxj);
252 static void force_quiescent_state(struct rcu_state *rsp);
253 static int rcu_pending(int cpu);
254 
255 /*
256  * Return the number of RCU-sched batches processed thus far for debug & stats.
257  */
258 long rcu_batches_completed_sched(void)
259 {
260 	return rcu_sched_state.completed;
261 }
262 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
263 
264 /*
265  * Return the number of RCU BH batches processed thus far for debug & stats.
266  */
267 long rcu_batches_completed_bh(void)
268 {
269 	return rcu_bh_state.completed;
270 }
271 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
272 
273 /*
274  * Force a quiescent state for RCU BH.
275  */
276 void rcu_bh_force_quiescent_state(void)
277 {
278 	force_quiescent_state(&rcu_bh_state);
279 }
280 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
281 
282 /*
283  * Record the number of times rcutorture tests have been initiated and
284  * terminated.  This information allows the debugfs tracing stats to be
285  * correlated to the rcutorture messages, even when the rcutorture module
286  * is being repeatedly loaded and unloaded.  In other words, we cannot
287  * store this state in rcutorture itself.
288  */
289 void rcutorture_record_test_transition(void)
290 {
291 	rcutorture_testseq++;
292 	rcutorture_vernum = 0;
293 }
294 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
295 
296 /*
297  * Record the number of writer passes through the current rcutorture test.
298  * This is also used to correlate debugfs tracing stats with the rcutorture
299  * messages.
300  */
301 void rcutorture_record_progress(unsigned long vernum)
302 {
303 	rcutorture_vernum++;
304 }
305 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
306 
307 /*
308  * Force a quiescent state for RCU-sched.
309  */
310 void rcu_sched_force_quiescent_state(void)
311 {
312 	force_quiescent_state(&rcu_sched_state);
313 }
314 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
315 
316 /*
317  * Does the CPU have callbacks ready to be invoked?
318  */
319 static int
320 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
321 {
322 	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
323 	       rdp->nxttail[RCU_DONE_TAIL] != NULL;
324 }
325 
326 /*
327  * Does the current CPU require a not-yet-started grace period?
328  * The caller must have disabled interrupts to prevent races with
329  * normal callback registry.
330  */
331 static int
332 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
333 {
334 	int i;
335 
336 	if (rcu_gp_in_progress(rsp))
337 		return 0;  /* No, a grace period is already in progress. */
338 	if (rcu_nocb_needs_gp(rsp))
339 		return 1;  /* Yes, a no-CBs CPU needs one. */
340 	if (!rdp->nxttail[RCU_NEXT_TAIL])
341 		return 0;  /* No, this is a no-CBs (or offline) CPU. */
342 	if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
343 		return 1;  /* Yes, this CPU has newly registered callbacks. */
344 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
345 		if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
346 		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
347 				 rdp->nxtcompleted[i]))
348 			return 1;  /* Yes, CBs for future grace period. */
349 	return 0; /* No grace period needed. */
350 }
351 
352 /*
353  * Return the root node of the specified rcu_state structure.
354  */
355 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
356 {
357 	return &rsp->node[0];
358 }
359 
360 /*
361  * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
362  *
363  * If the new value of the ->dynticks_nesting counter now is zero,
364  * we really have entered idle, and must do the appropriate accounting.
365  * The caller must have disabled interrupts.
366  */
367 static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
368 				bool user)
369 {
370 	struct rcu_state *rsp;
371 	struct rcu_data *rdp;
372 
373 	trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
374 	if (!user && !is_idle_task(current)) {
375 		struct task_struct *idle __maybe_unused =
376 			idle_task(smp_processor_id());
377 
378 		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
379 		ftrace_dump(DUMP_ORIG);
380 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
381 			  current->pid, current->comm,
382 			  idle->pid, idle->comm); /* must be idle task! */
383 	}
384 	for_each_rcu_flavor(rsp) {
385 		rdp = this_cpu_ptr(rsp->rda);
386 		do_nocb_deferred_wakeup(rdp);
387 	}
388 	rcu_prepare_for_idle(smp_processor_id());
389 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
390 	smp_mb__before_atomic_inc();  /* See above. */
391 	atomic_inc(&rdtp->dynticks);
392 	smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
393 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
394 
395 	/*
396 	 * It is illegal to enter an extended quiescent state while
397 	 * in an RCU read-side critical section.
398 	 */
399 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
400 			   "Illegal idle entry in RCU read-side critical section.");
401 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
402 			   "Illegal idle entry in RCU-bh read-side critical section.");
403 	rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
404 			   "Illegal idle entry in RCU-sched read-side critical section.");
405 }
406 
407 /*
408  * Enter an RCU extended quiescent state, which can be either the
409  * idle loop or adaptive-tickless usermode execution.
410  */
411 static void rcu_eqs_enter(bool user)
412 {
413 	long long oldval;
414 	struct rcu_dynticks *rdtp;
415 
416 	rdtp = this_cpu_ptr(&rcu_dynticks);
417 	oldval = rdtp->dynticks_nesting;
418 	WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
419 	if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
420 		rdtp->dynticks_nesting = 0;
421 		rcu_eqs_enter_common(rdtp, oldval, user);
422 	} else {
423 		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
424 	}
425 }
426 
427 /**
428  * rcu_idle_enter - inform RCU that current CPU is entering idle
429  *
430  * Enter idle mode, in other words, -leave- the mode in which RCU
431  * read-side critical sections can occur.  (Though RCU read-side
432  * critical sections can occur in irq handlers in idle, a possibility
433  * handled by irq_enter() and irq_exit().)
434  *
435  * We crowbar the ->dynticks_nesting field to zero to allow for
436  * the possibility of usermode upcalls having messed up our count
437  * of interrupt nesting level during the prior busy period.
438  */
439 void rcu_idle_enter(void)
440 {
441 	unsigned long flags;
442 
443 	local_irq_save(flags);
444 	rcu_eqs_enter(false);
445 	rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
446 	local_irq_restore(flags);
447 }
448 EXPORT_SYMBOL_GPL(rcu_idle_enter);
449 
450 #ifdef CONFIG_RCU_USER_QS
451 /**
452  * rcu_user_enter - inform RCU that we are resuming userspace.
453  *
454  * Enter RCU idle mode right before resuming userspace.  No use of RCU
455  * is permitted between this call and rcu_user_exit(). This way the
456  * CPU doesn't need to maintain the tick for RCU maintenance purposes
457  * when the CPU runs in userspace.
458  */
459 void rcu_user_enter(void)
460 {
461 	rcu_eqs_enter(1);
462 }
463 #endif /* CONFIG_RCU_USER_QS */
464 
465 /**
466  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
467  *
468  * Exit from an interrupt handler, which might possibly result in entering
469  * idle mode, in other words, leaving the mode in which read-side critical
470  * sections can occur.
471  *
472  * This code assumes that the idle loop never does anything that might
473  * result in unbalanced calls to irq_enter() and irq_exit().  If your
474  * architecture violates this assumption, RCU will give you what you
475  * deserve, good and hard.  But very infrequently and irreproducibly.
476  *
477  * Use things like work queues to work around this limitation.
478  *
479  * You have been warned.
480  */
481 void rcu_irq_exit(void)
482 {
483 	unsigned long flags;
484 	long long oldval;
485 	struct rcu_dynticks *rdtp;
486 
487 	local_irq_save(flags);
488 	rdtp = this_cpu_ptr(&rcu_dynticks);
489 	oldval = rdtp->dynticks_nesting;
490 	rdtp->dynticks_nesting--;
491 	WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
492 	if (rdtp->dynticks_nesting)
493 		trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
494 	else
495 		rcu_eqs_enter_common(rdtp, oldval, true);
496 	rcu_sysidle_enter(rdtp, 1);
497 	local_irq_restore(flags);
498 }
499 
500 /*
501  * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
502  *
503  * If the new value of the ->dynticks_nesting counter was previously zero,
504  * we really have exited idle, and must do the appropriate accounting.
505  * The caller must have disabled interrupts.
506  */
507 static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
508 			       int user)
509 {
510 	smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
511 	atomic_inc(&rdtp->dynticks);
512 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
513 	smp_mb__after_atomic_inc();  /* See above. */
514 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
515 	rcu_cleanup_after_idle(smp_processor_id());
516 	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
517 	if (!user && !is_idle_task(current)) {
518 		struct task_struct *idle __maybe_unused =
519 			idle_task(smp_processor_id());
520 
521 		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
522 				  oldval, rdtp->dynticks_nesting);
523 		ftrace_dump(DUMP_ORIG);
524 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
525 			  current->pid, current->comm,
526 			  idle->pid, idle->comm); /* must be idle task! */
527 	}
528 }
529 
530 /*
531  * Exit an RCU extended quiescent state, which can be either the
532  * idle loop or adaptive-tickless usermode execution.
533  */
534 static void rcu_eqs_exit(bool user)
535 {
536 	struct rcu_dynticks *rdtp;
537 	long long oldval;
538 
539 	rdtp = this_cpu_ptr(&rcu_dynticks);
540 	oldval = rdtp->dynticks_nesting;
541 	WARN_ON_ONCE(oldval < 0);
542 	if (oldval & DYNTICK_TASK_NEST_MASK) {
543 		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
544 	} else {
545 		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
546 		rcu_eqs_exit_common(rdtp, oldval, user);
547 	}
548 }
549 
550 /**
551  * rcu_idle_exit - inform RCU that current CPU is leaving idle
552  *
553  * Exit idle mode, in other words, -enter- the mode in which RCU
554  * read-side critical sections can occur.
555  *
556  * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
557  * allow for the possibility of usermode upcalls messing up our count
558  * of interrupt nesting level during the busy period that is just
559  * now starting.
560  */
561 void rcu_idle_exit(void)
562 {
563 	unsigned long flags;
564 
565 	local_irq_save(flags);
566 	rcu_eqs_exit(false);
567 	rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
568 	local_irq_restore(flags);
569 }
570 EXPORT_SYMBOL_GPL(rcu_idle_exit);
571 
572 #ifdef CONFIG_RCU_USER_QS
573 /**
574  * rcu_user_exit - inform RCU that we are exiting userspace.
575  *
576  * Exit RCU idle mode while entering the kernel because it can
577  * run a RCU read side critical section anytime.
578  */
579 void rcu_user_exit(void)
580 {
581 	rcu_eqs_exit(1);
582 }
583 #endif /* CONFIG_RCU_USER_QS */
584 
585 /**
586  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
587  *
588  * Enter an interrupt handler, which might possibly result in exiting
589  * idle mode, in other words, entering the mode in which read-side critical
590  * sections can occur.
591  *
592  * Note that the Linux kernel is fully capable of entering an interrupt
593  * handler that it never exits, for example when doing upcalls to
594  * user mode!  This code assumes that the idle loop never does upcalls to
595  * user mode.  If your architecture does do upcalls from the idle loop (or
596  * does anything else that results in unbalanced calls to the irq_enter()
597  * and irq_exit() functions), RCU will give you what you deserve, good
598  * and hard.  But very infrequently and irreproducibly.
599  *
600  * Use things like work queues to work around this limitation.
601  *
602  * You have been warned.
603  */
604 void rcu_irq_enter(void)
605 {
606 	unsigned long flags;
607 	struct rcu_dynticks *rdtp;
608 	long long oldval;
609 
610 	local_irq_save(flags);
611 	rdtp = this_cpu_ptr(&rcu_dynticks);
612 	oldval = rdtp->dynticks_nesting;
613 	rdtp->dynticks_nesting++;
614 	WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
615 	if (oldval)
616 		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
617 	else
618 		rcu_eqs_exit_common(rdtp, oldval, true);
619 	rcu_sysidle_exit(rdtp, 1);
620 	local_irq_restore(flags);
621 }
622 
623 /**
624  * rcu_nmi_enter - inform RCU of entry to NMI context
625  *
626  * If the CPU was idle with dynamic ticks active, and there is no
627  * irq handler running, this updates rdtp->dynticks_nmi to let the
628  * RCU grace-period handling know that the CPU is active.
629  */
630 void rcu_nmi_enter(void)
631 {
632 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
633 
634 	if (rdtp->dynticks_nmi_nesting == 0 &&
635 	    (atomic_read(&rdtp->dynticks) & 0x1))
636 		return;
637 	rdtp->dynticks_nmi_nesting++;
638 	smp_mb__before_atomic_inc();  /* Force delay from prior write. */
639 	atomic_inc(&rdtp->dynticks);
640 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
641 	smp_mb__after_atomic_inc();  /* See above. */
642 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
643 }
644 
645 /**
646  * rcu_nmi_exit - inform RCU of exit from NMI context
647  *
648  * If the CPU was idle with dynamic ticks active, and there is no
649  * irq handler running, this updates rdtp->dynticks_nmi to let the
650  * RCU grace-period handling know that the CPU is no longer active.
651  */
652 void rcu_nmi_exit(void)
653 {
654 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
655 
656 	if (rdtp->dynticks_nmi_nesting == 0 ||
657 	    --rdtp->dynticks_nmi_nesting != 0)
658 		return;
659 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
660 	smp_mb__before_atomic_inc();  /* See above. */
661 	atomic_inc(&rdtp->dynticks);
662 	smp_mb__after_atomic_inc();  /* Force delay to next write. */
663 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
664 }
665 
666 /**
667  * __rcu_is_watching - are RCU read-side critical sections safe?
668  *
669  * Return true if RCU is watching the running CPU, which means that
670  * this CPU can safely enter RCU read-side critical sections.  Unlike
671  * rcu_is_watching(), the caller of __rcu_is_watching() must have at
672  * least disabled preemption.
673  */
674 bool notrace __rcu_is_watching(void)
675 {
676 	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
677 }
678 
679 /**
680  * rcu_is_watching - see if RCU thinks that the current CPU is idle
681  *
682  * If the current CPU is in its idle loop and is neither in an interrupt
683  * or NMI handler, return true.
684  */
685 bool notrace rcu_is_watching(void)
686 {
687 	int ret;
688 
689 	preempt_disable();
690 	ret = __rcu_is_watching();
691 	preempt_enable();
692 	return ret;
693 }
694 EXPORT_SYMBOL_GPL(rcu_is_watching);
695 
696 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
697 
698 /*
699  * Is the current CPU online?  Disable preemption to avoid false positives
700  * that could otherwise happen due to the current CPU number being sampled,
701  * this task being preempted, its old CPU being taken offline, resuming
702  * on some other CPU, then determining that its old CPU is now offline.
703  * It is OK to use RCU on an offline processor during initial boot, hence
704  * the check for rcu_scheduler_fully_active.  Note also that it is OK
705  * for a CPU coming online to use RCU for one jiffy prior to marking itself
706  * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
707  * offline to continue to use RCU for one jiffy after marking itself
708  * offline in the cpu_online_mask.  This leniency is necessary given the
709  * non-atomic nature of the online and offline processing, for example,
710  * the fact that a CPU enters the scheduler after completing the CPU_DYING
711  * notifiers.
712  *
713  * This is also why RCU internally marks CPUs online during the
714  * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
715  *
716  * Disable checking if in an NMI handler because we cannot safely report
717  * errors from NMI handlers anyway.
718  */
719 bool rcu_lockdep_current_cpu_online(void)
720 {
721 	struct rcu_data *rdp;
722 	struct rcu_node *rnp;
723 	bool ret;
724 
725 	if (in_nmi())
726 		return true;
727 	preempt_disable();
728 	rdp = this_cpu_ptr(&rcu_sched_data);
729 	rnp = rdp->mynode;
730 	ret = (rdp->grpmask & rnp->qsmaskinit) ||
731 	      !rcu_scheduler_fully_active;
732 	preempt_enable();
733 	return ret;
734 }
735 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
736 
737 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
738 
739 /**
740  * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
741  *
742  * If the current CPU is idle or running at a first-level (not nested)
743  * interrupt from idle, return true.  The caller must have at least
744  * disabled preemption.
745  */
746 static int rcu_is_cpu_rrupt_from_idle(void)
747 {
748 	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
749 }
750 
751 /*
752  * Snapshot the specified CPU's dynticks counter so that we can later
753  * credit them with an implicit quiescent state.  Return 1 if this CPU
754  * is in dynticks idle mode, which is an extended quiescent state.
755  */
756 static int dyntick_save_progress_counter(struct rcu_data *rdp,
757 					 bool *isidle, unsigned long *maxj)
758 {
759 	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
760 	rcu_sysidle_check_cpu(rdp, isidle, maxj);
761 	return (rdp->dynticks_snap & 0x1) == 0;
762 }
763 
764 /*
765  * This function really isn't for public consumption, but RCU is special in
766  * that context switches can allow the state machine to make progress.
767  */
768 extern void resched_cpu(int cpu);
769 
770 /*
771  * Return true if the specified CPU has passed through a quiescent
772  * state by virtue of being in or having passed through an dynticks
773  * idle state since the last call to dyntick_save_progress_counter()
774  * for this same CPU, or by virtue of having been offline.
775  */
776 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
777 				    bool *isidle, unsigned long *maxj)
778 {
779 	unsigned int curr;
780 	unsigned int snap;
781 
782 	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
783 	snap = (unsigned int)rdp->dynticks_snap;
784 
785 	/*
786 	 * If the CPU passed through or entered a dynticks idle phase with
787 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
788 	 * already acknowledged the request to pass through a quiescent
789 	 * state.  Either way, that CPU cannot possibly be in an RCU
790 	 * read-side critical section that started before the beginning
791 	 * of the current RCU grace period.
792 	 */
793 	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
794 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
795 		rdp->dynticks_fqs++;
796 		return 1;
797 	}
798 
799 	/*
800 	 * Check for the CPU being offline, but only if the grace period
801 	 * is old enough.  We don't need to worry about the CPU changing
802 	 * state: If we see it offline even once, it has been through a
803 	 * quiescent state.
804 	 *
805 	 * The reason for insisting that the grace period be at least
806 	 * one jiffy old is that CPUs that are not quite online and that
807 	 * have just gone offline can still execute RCU read-side critical
808 	 * sections.
809 	 */
810 	if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
811 		return 0;  /* Grace period is not old enough. */
812 	barrier();
813 	if (cpu_is_offline(rdp->cpu)) {
814 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
815 		rdp->offline_fqs++;
816 		return 1;
817 	}
818 
819 	/*
820 	 * There is a possibility that a CPU in adaptive-ticks state
821 	 * might run in the kernel with the scheduling-clock tick disabled
822 	 * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
823 	 * force the CPU to restart the scheduling-clock tick in this
824 	 * CPU is in this state.
825 	 */
826 	rcu_kick_nohz_cpu(rdp->cpu);
827 
828 	/*
829 	 * Alternatively, the CPU might be running in the kernel
830 	 * for an extended period of time without a quiescent state.
831 	 * Attempt to force the CPU through the scheduler to gain the
832 	 * needed quiescent state, but only if the grace period has gone
833 	 * on for an uncommonly long time.  If there are many stuck CPUs,
834 	 * we will beat on the first one until it gets unstuck, then move
835 	 * to the next.  Only do this for the primary flavor of RCU.
836 	 */
837 	if (rdp->rsp == rcu_state &&
838 	    ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
839 		rdp->rsp->jiffies_resched += 5;
840 		resched_cpu(rdp->cpu);
841 	}
842 
843 	return 0;
844 }
845 
846 static void record_gp_stall_check_time(struct rcu_state *rsp)
847 {
848 	unsigned long j = jiffies;
849 	unsigned long j1;
850 
851 	rsp->gp_start = j;
852 	smp_wmb(); /* Record start time before stall time. */
853 	j1 = rcu_jiffies_till_stall_check();
854 	rsp->jiffies_stall = j + j1;
855 	rsp->jiffies_resched = j + j1 / 2;
856 }
857 
858 /*
859  * Dump stacks of all tasks running on stalled CPUs.  This is a fallback
860  * for architectures that do not implement trigger_all_cpu_backtrace().
861  * The NMI-triggered stack traces are more accurate because they are
862  * printed by the target CPU.
863  */
864 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
865 {
866 	int cpu;
867 	unsigned long flags;
868 	struct rcu_node *rnp;
869 
870 	rcu_for_each_leaf_node(rsp, rnp) {
871 		raw_spin_lock_irqsave(&rnp->lock, flags);
872 		if (rnp->qsmask != 0) {
873 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
874 				if (rnp->qsmask & (1UL << cpu))
875 					dump_cpu_task(rnp->grplo + cpu);
876 		}
877 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
878 	}
879 }
880 
881 static void print_other_cpu_stall(struct rcu_state *rsp)
882 {
883 	int cpu;
884 	long delta;
885 	unsigned long flags;
886 	int ndetected = 0;
887 	struct rcu_node *rnp = rcu_get_root(rsp);
888 	long totqlen = 0;
889 
890 	/* Only let one CPU complain about others per time interval. */
891 
892 	raw_spin_lock_irqsave(&rnp->lock, flags);
893 	delta = jiffies - rsp->jiffies_stall;
894 	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
895 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
896 		return;
897 	}
898 	rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
899 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
900 
901 	/*
902 	 * OK, time to rat on our buddy...
903 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
904 	 * RCU CPU stall warnings.
905 	 */
906 	pr_err("INFO: %s detected stalls on CPUs/tasks:",
907 	       rsp->name);
908 	print_cpu_stall_info_begin();
909 	rcu_for_each_leaf_node(rsp, rnp) {
910 		raw_spin_lock_irqsave(&rnp->lock, flags);
911 		ndetected += rcu_print_task_stall(rnp);
912 		if (rnp->qsmask != 0) {
913 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
914 				if (rnp->qsmask & (1UL << cpu)) {
915 					print_cpu_stall_info(rsp,
916 							     rnp->grplo + cpu);
917 					ndetected++;
918 				}
919 		}
920 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
921 	}
922 
923 	/*
924 	 * Now rat on any tasks that got kicked up to the root rcu_node
925 	 * due to CPU offlining.
926 	 */
927 	rnp = rcu_get_root(rsp);
928 	raw_spin_lock_irqsave(&rnp->lock, flags);
929 	ndetected += rcu_print_task_stall(rnp);
930 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
931 
932 	print_cpu_stall_info_end();
933 	for_each_possible_cpu(cpu)
934 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
935 	pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
936 	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
937 	       rsp->gpnum, rsp->completed, totqlen);
938 	if (ndetected == 0)
939 		pr_err("INFO: Stall ended before state dump start\n");
940 	else if (!trigger_all_cpu_backtrace())
941 		rcu_dump_cpu_stacks(rsp);
942 
943 	/* Complain about tasks blocking the grace period. */
944 
945 	rcu_print_detail_task_stall(rsp);
946 
947 	force_quiescent_state(rsp);  /* Kick them all. */
948 }
949 
950 /*
951  * This function really isn't for public consumption, but RCU is special in
952  * that context switches can allow the state machine to make progress.
953  */
954 extern void resched_cpu(int cpu);
955 
956 static void print_cpu_stall(struct rcu_state *rsp)
957 {
958 	int cpu;
959 	unsigned long flags;
960 	struct rcu_node *rnp = rcu_get_root(rsp);
961 	long totqlen = 0;
962 
963 	/*
964 	 * OK, time to rat on ourselves...
965 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
966 	 * RCU CPU stall warnings.
967 	 */
968 	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
969 	print_cpu_stall_info_begin();
970 	print_cpu_stall_info(rsp, smp_processor_id());
971 	print_cpu_stall_info_end();
972 	for_each_possible_cpu(cpu)
973 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
974 	pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
975 		jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
976 	if (!trigger_all_cpu_backtrace())
977 		dump_stack();
978 
979 	raw_spin_lock_irqsave(&rnp->lock, flags);
980 	if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
981 		rsp->jiffies_stall = jiffies +
982 				     3 * rcu_jiffies_till_stall_check() + 3;
983 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
984 
985 	/*
986 	 * Attempt to revive the RCU machinery by forcing a context switch.
987 	 *
988 	 * A context switch would normally allow the RCU state machine to make
989 	 * progress and it could be we're stuck in kernel space without context
990 	 * switches for an entirely unreasonable amount of time.
991 	 */
992 	resched_cpu(smp_processor_id());
993 }
994 
995 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
996 {
997 	unsigned long completed;
998 	unsigned long gpnum;
999 	unsigned long gps;
1000 	unsigned long j;
1001 	unsigned long js;
1002 	struct rcu_node *rnp;
1003 
1004 	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1005 		return;
1006 	j = jiffies;
1007 
1008 	/*
1009 	 * Lots of memory barriers to reject false positives.
1010 	 *
1011 	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1012 	 * then rsp->gp_start, and finally rsp->completed.  These values
1013 	 * are updated in the opposite order with memory barriers (or
1014 	 * equivalent) during grace-period initialization and cleanup.
1015 	 * Now, a false positive can occur if we get an new value of
1016 	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
1017 	 * the memory barriers, the only way that this can happen is if one
1018 	 * grace period ends and another starts between these two fetches.
1019 	 * Detect this by comparing rsp->completed with the previous fetch
1020 	 * from rsp->gpnum.
1021 	 *
1022 	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1023 	 * and rsp->gp_start suffice to forestall false positives.
1024 	 */
1025 	gpnum = ACCESS_ONCE(rsp->gpnum);
1026 	smp_rmb(); /* Pick up ->gpnum first... */
1027 	js = ACCESS_ONCE(rsp->jiffies_stall);
1028 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1029 	gps = ACCESS_ONCE(rsp->gp_start);
1030 	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1031 	completed = ACCESS_ONCE(rsp->completed);
1032 	if (ULONG_CMP_GE(completed, gpnum) ||
1033 	    ULONG_CMP_LT(j, js) ||
1034 	    ULONG_CMP_GE(gps, js))
1035 		return; /* No stall or GP completed since entering function. */
1036 	rnp = rdp->mynode;
1037 	if (rcu_gp_in_progress(rsp) &&
1038 	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
1039 
1040 		/* We haven't checked in, so go dump stack. */
1041 		print_cpu_stall(rsp);
1042 
1043 	} else if (rcu_gp_in_progress(rsp) &&
1044 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1045 
1046 		/* They had a few time units to dump stack, so complain. */
1047 		print_other_cpu_stall(rsp);
1048 	}
1049 }
1050 
1051 /**
1052  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1053  *
1054  * Set the stall-warning timeout way off into the future, thus preventing
1055  * any RCU CPU stall-warning messages from appearing in the current set of
1056  * RCU grace periods.
1057  *
1058  * The caller must disable hard irqs.
1059  */
1060 void rcu_cpu_stall_reset(void)
1061 {
1062 	struct rcu_state *rsp;
1063 
1064 	for_each_rcu_flavor(rsp)
1065 		rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
1066 }
1067 
1068 /*
1069  * Initialize the specified rcu_data structure's callback list to empty.
1070  */
1071 static void init_callback_list(struct rcu_data *rdp)
1072 {
1073 	int i;
1074 
1075 	if (init_nocb_callback_list(rdp))
1076 		return;
1077 	rdp->nxtlist = NULL;
1078 	for (i = 0; i < RCU_NEXT_SIZE; i++)
1079 		rdp->nxttail[i] = &rdp->nxtlist;
1080 }
1081 
1082 /*
1083  * Determine the value that ->completed will have at the end of the
1084  * next subsequent grace period.  This is used to tag callbacks so that
1085  * a CPU can invoke callbacks in a timely fashion even if that CPU has
1086  * been dyntick-idle for an extended period with callbacks under the
1087  * influence of RCU_FAST_NO_HZ.
1088  *
1089  * The caller must hold rnp->lock with interrupts disabled.
1090  */
1091 static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1092 				       struct rcu_node *rnp)
1093 {
1094 	/*
1095 	 * If RCU is idle, we just wait for the next grace period.
1096 	 * But we can only be sure that RCU is idle if we are looking
1097 	 * at the root rcu_node structure -- otherwise, a new grace
1098 	 * period might have started, but just not yet gotten around
1099 	 * to initializing the current non-root rcu_node structure.
1100 	 */
1101 	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1102 		return rnp->completed + 1;
1103 
1104 	/*
1105 	 * Otherwise, wait for a possible partial grace period and
1106 	 * then the subsequent full grace period.
1107 	 */
1108 	return rnp->completed + 2;
1109 }
1110 
1111 /*
1112  * Trace-event helper function for rcu_start_future_gp() and
1113  * rcu_nocb_wait_gp().
1114  */
1115 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1116 				unsigned long c, const char *s)
1117 {
1118 	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1119 				      rnp->completed, c, rnp->level,
1120 				      rnp->grplo, rnp->grphi, s);
1121 }
1122 
1123 /*
1124  * Start some future grace period, as needed to handle newly arrived
1125  * callbacks.  The required future grace periods are recorded in each
1126  * rcu_node structure's ->need_future_gp field.
1127  *
1128  * The caller must hold the specified rcu_node structure's ->lock.
1129  */
1130 static unsigned long __maybe_unused
1131 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1132 {
1133 	unsigned long c;
1134 	int i;
1135 	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1136 
1137 	/*
1138 	 * Pick up grace-period number for new callbacks.  If this
1139 	 * grace period is already marked as needed, return to the caller.
1140 	 */
1141 	c = rcu_cbs_completed(rdp->rsp, rnp);
1142 	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1143 	if (rnp->need_future_gp[c & 0x1]) {
1144 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1145 		return c;
1146 	}
1147 
1148 	/*
1149 	 * If either this rcu_node structure or the root rcu_node structure
1150 	 * believe that a grace period is in progress, then we must wait
1151 	 * for the one following, which is in "c".  Because our request
1152 	 * will be noticed at the end of the current grace period, we don't
1153 	 * need to explicitly start one.
1154 	 */
1155 	if (rnp->gpnum != rnp->completed ||
1156 	    ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
1157 		rnp->need_future_gp[c & 0x1]++;
1158 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1159 		return c;
1160 	}
1161 
1162 	/*
1163 	 * There might be no grace period in progress.  If we don't already
1164 	 * hold it, acquire the root rcu_node structure's lock in order to
1165 	 * start one (if needed).
1166 	 */
1167 	if (rnp != rnp_root) {
1168 		raw_spin_lock(&rnp_root->lock);
1169 		smp_mb__after_unlock_lock();
1170 	}
1171 
1172 	/*
1173 	 * Get a new grace-period number.  If there really is no grace
1174 	 * period in progress, it will be smaller than the one we obtained
1175 	 * earlier.  Adjust callbacks as needed.  Note that even no-CBs
1176 	 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1177 	 */
1178 	c = rcu_cbs_completed(rdp->rsp, rnp_root);
1179 	for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1180 		if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1181 			rdp->nxtcompleted[i] = c;
1182 
1183 	/*
1184 	 * If the needed for the required grace period is already
1185 	 * recorded, trace and leave.
1186 	 */
1187 	if (rnp_root->need_future_gp[c & 0x1]) {
1188 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1189 		goto unlock_out;
1190 	}
1191 
1192 	/* Record the need for the future grace period. */
1193 	rnp_root->need_future_gp[c & 0x1]++;
1194 
1195 	/* If a grace period is not already in progress, start one. */
1196 	if (rnp_root->gpnum != rnp_root->completed) {
1197 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1198 	} else {
1199 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1200 		rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1201 	}
1202 unlock_out:
1203 	if (rnp != rnp_root)
1204 		raw_spin_unlock(&rnp_root->lock);
1205 	return c;
1206 }
1207 
1208 /*
1209  * Clean up any old requests for the just-ended grace period.  Also return
1210  * whether any additional grace periods have been requested.  Also invoke
1211  * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1212  * waiting for this grace period to complete.
1213  */
1214 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1215 {
1216 	int c = rnp->completed;
1217 	int needmore;
1218 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1219 
1220 	rcu_nocb_gp_cleanup(rsp, rnp);
1221 	rnp->need_future_gp[c & 0x1] = 0;
1222 	needmore = rnp->need_future_gp[(c + 1) & 0x1];
1223 	trace_rcu_future_gp(rnp, rdp, c,
1224 			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1225 	return needmore;
1226 }
1227 
1228 /*
1229  * If there is room, assign a ->completed number to any callbacks on
1230  * this CPU that have not already been assigned.  Also accelerate any
1231  * callbacks that were previously assigned a ->completed number that has
1232  * since proven to be too conservative, which can happen if callbacks get
1233  * assigned a ->completed number while RCU is idle, but with reference to
1234  * a non-root rcu_node structure.  This function is idempotent, so it does
1235  * not hurt to call it repeatedly.
1236  *
1237  * The caller must hold rnp->lock with interrupts disabled.
1238  */
1239 static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1240 			       struct rcu_data *rdp)
1241 {
1242 	unsigned long c;
1243 	int i;
1244 
1245 	/* If the CPU has no callbacks, nothing to do. */
1246 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1247 		return;
1248 
1249 	/*
1250 	 * Starting from the sublist containing the callbacks most
1251 	 * recently assigned a ->completed number and working down, find the
1252 	 * first sublist that is not assignable to an upcoming grace period.
1253 	 * Such a sublist has something in it (first two tests) and has
1254 	 * a ->completed number assigned that will complete sooner than
1255 	 * the ->completed number for newly arrived callbacks (last test).
1256 	 *
1257 	 * The key point is that any later sublist can be assigned the
1258 	 * same ->completed number as the newly arrived callbacks, which
1259 	 * means that the callbacks in any of these later sublist can be
1260 	 * grouped into a single sublist, whether or not they have already
1261 	 * been assigned a ->completed number.
1262 	 */
1263 	c = rcu_cbs_completed(rsp, rnp);
1264 	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1265 		if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1266 		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1267 			break;
1268 
1269 	/*
1270 	 * If there are no sublist for unassigned callbacks, leave.
1271 	 * At the same time, advance "i" one sublist, so that "i" will
1272 	 * index into the sublist where all the remaining callbacks should
1273 	 * be grouped into.
1274 	 */
1275 	if (++i >= RCU_NEXT_TAIL)
1276 		return;
1277 
1278 	/*
1279 	 * Assign all subsequent callbacks' ->completed number to the next
1280 	 * full grace period and group them all in the sublist initially
1281 	 * indexed by "i".
1282 	 */
1283 	for (; i <= RCU_NEXT_TAIL; i++) {
1284 		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1285 		rdp->nxtcompleted[i] = c;
1286 	}
1287 	/* Record any needed additional grace periods. */
1288 	rcu_start_future_gp(rnp, rdp);
1289 
1290 	/* Trace depending on how much we were able to accelerate. */
1291 	if (!*rdp->nxttail[RCU_WAIT_TAIL])
1292 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1293 	else
1294 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1295 }
1296 
1297 /*
1298  * Move any callbacks whose grace period has completed to the
1299  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1300  * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1301  * sublist.  This function is idempotent, so it does not hurt to
1302  * invoke it repeatedly.  As long as it is not invoked -too- often...
1303  *
1304  * The caller must hold rnp->lock with interrupts disabled.
1305  */
1306 static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1307 			    struct rcu_data *rdp)
1308 {
1309 	int i, j;
1310 
1311 	/* If the CPU has no callbacks, nothing to do. */
1312 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1313 		return;
1314 
1315 	/*
1316 	 * Find all callbacks whose ->completed numbers indicate that they
1317 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1318 	 */
1319 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1320 		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1321 			break;
1322 		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1323 	}
1324 	/* Clean up any sublist tail pointers that were misordered above. */
1325 	for (j = RCU_WAIT_TAIL; j < i; j++)
1326 		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1327 
1328 	/* Copy down callbacks to fill in empty sublists. */
1329 	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1330 		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1331 			break;
1332 		rdp->nxttail[j] = rdp->nxttail[i];
1333 		rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1334 	}
1335 
1336 	/* Classify any remaining callbacks. */
1337 	rcu_accelerate_cbs(rsp, rnp, rdp);
1338 }
1339 
1340 /*
1341  * Update CPU-local rcu_data state to record the beginnings and ends of
1342  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1343  * structure corresponding to the current CPU, and must have irqs disabled.
1344  */
1345 static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
1346 {
1347 	/* Handle the ends of any preceding grace periods first. */
1348 	if (rdp->completed == rnp->completed) {
1349 
1350 		/* No grace period end, so just accelerate recent callbacks. */
1351 		rcu_accelerate_cbs(rsp, rnp, rdp);
1352 
1353 	} else {
1354 
1355 		/* Advance callbacks. */
1356 		rcu_advance_cbs(rsp, rnp, rdp);
1357 
1358 		/* Remember that we saw this grace-period completion. */
1359 		rdp->completed = rnp->completed;
1360 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1361 	}
1362 
1363 	if (rdp->gpnum != rnp->gpnum) {
1364 		/*
1365 		 * If the current grace period is waiting for this CPU,
1366 		 * set up to detect a quiescent state, otherwise don't
1367 		 * go looking for one.
1368 		 */
1369 		rdp->gpnum = rnp->gpnum;
1370 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1371 		rdp->passed_quiesce = 0;
1372 		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1373 		zero_cpu_stall_ticks(rdp);
1374 	}
1375 }
1376 
1377 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1378 {
1379 	unsigned long flags;
1380 	struct rcu_node *rnp;
1381 
1382 	local_irq_save(flags);
1383 	rnp = rdp->mynode;
1384 	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1385 	     rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
1386 	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1387 		local_irq_restore(flags);
1388 		return;
1389 	}
1390 	smp_mb__after_unlock_lock();
1391 	__note_gp_changes(rsp, rnp, rdp);
1392 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1393 }
1394 
1395 /*
1396  * Initialize a new grace period.  Return 0 if no grace period required.
1397  */
1398 static int rcu_gp_init(struct rcu_state *rsp)
1399 {
1400 	struct rcu_data *rdp;
1401 	struct rcu_node *rnp = rcu_get_root(rsp);
1402 
1403 	rcu_bind_gp_kthread();
1404 	raw_spin_lock_irq(&rnp->lock);
1405 	smp_mb__after_unlock_lock();
1406 	if (rsp->gp_flags == 0) {
1407 		/* Spurious wakeup, tell caller to go back to sleep.  */
1408 		raw_spin_unlock_irq(&rnp->lock);
1409 		return 0;
1410 	}
1411 	rsp->gp_flags = 0; /* Clear all flags: New grace period. */
1412 
1413 	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1414 		/*
1415 		 * Grace period already in progress, don't start another.
1416 		 * Not supposed to be able to happen.
1417 		 */
1418 		raw_spin_unlock_irq(&rnp->lock);
1419 		return 0;
1420 	}
1421 
1422 	/* Advance to a new grace period and initialize state. */
1423 	record_gp_stall_check_time(rsp);
1424 	/* Record GP times before starting GP, hence smp_store_release(). */
1425 	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1426 	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1427 	raw_spin_unlock_irq(&rnp->lock);
1428 
1429 	/* Exclude any concurrent CPU-hotplug operations. */
1430 	mutex_lock(&rsp->onoff_mutex);
1431 	smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
1432 
1433 	/*
1434 	 * Set the quiescent-state-needed bits in all the rcu_node
1435 	 * structures for all currently online CPUs in breadth-first order,
1436 	 * starting from the root rcu_node structure, relying on the layout
1437 	 * of the tree within the rsp->node[] array.  Note that other CPUs
1438 	 * will access only the leaves of the hierarchy, thus seeing that no
1439 	 * grace period is in progress, at least until the corresponding
1440 	 * leaf node has been initialized.  In addition, we have excluded
1441 	 * CPU-hotplug operations.
1442 	 *
1443 	 * The grace period cannot complete until the initialization
1444 	 * process finishes, because this kthread handles both.
1445 	 */
1446 	rcu_for_each_node_breadth_first(rsp, rnp) {
1447 		raw_spin_lock_irq(&rnp->lock);
1448 		smp_mb__after_unlock_lock();
1449 		rdp = this_cpu_ptr(rsp->rda);
1450 		rcu_preempt_check_blocked_tasks(rnp);
1451 		rnp->qsmask = rnp->qsmaskinit;
1452 		ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1453 		WARN_ON_ONCE(rnp->completed != rsp->completed);
1454 		ACCESS_ONCE(rnp->completed) = rsp->completed;
1455 		if (rnp == rdp->mynode)
1456 			__note_gp_changes(rsp, rnp, rdp);
1457 		rcu_preempt_boost_start_gp(rnp);
1458 		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1459 					    rnp->level, rnp->grplo,
1460 					    rnp->grphi, rnp->qsmask);
1461 		raw_spin_unlock_irq(&rnp->lock);
1462 #ifdef CONFIG_PROVE_RCU_DELAY
1463 		if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
1464 		    system_state == SYSTEM_RUNNING)
1465 			udelay(200);
1466 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
1467 		cond_resched();
1468 	}
1469 
1470 	mutex_unlock(&rsp->onoff_mutex);
1471 	return 1;
1472 }
1473 
1474 /*
1475  * Do one round of quiescent-state forcing.
1476  */
1477 static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1478 {
1479 	int fqs_state = fqs_state_in;
1480 	bool isidle = false;
1481 	unsigned long maxj;
1482 	struct rcu_node *rnp = rcu_get_root(rsp);
1483 
1484 	rsp->n_force_qs++;
1485 	if (fqs_state == RCU_SAVE_DYNTICK) {
1486 		/* Collect dyntick-idle snapshots. */
1487 		if (is_sysidle_rcu_state(rsp)) {
1488 			isidle = 1;
1489 			maxj = jiffies - ULONG_MAX / 4;
1490 		}
1491 		force_qs_rnp(rsp, dyntick_save_progress_counter,
1492 			     &isidle, &maxj);
1493 		rcu_sysidle_report_gp(rsp, isidle, maxj);
1494 		fqs_state = RCU_FORCE_QS;
1495 	} else {
1496 		/* Handle dyntick-idle and offline CPUs. */
1497 		isidle = 0;
1498 		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1499 	}
1500 	/* Clear flag to prevent immediate re-entry. */
1501 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1502 		raw_spin_lock_irq(&rnp->lock);
1503 		smp_mb__after_unlock_lock();
1504 		rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1505 		raw_spin_unlock_irq(&rnp->lock);
1506 	}
1507 	return fqs_state;
1508 }
1509 
1510 /*
1511  * Clean up after the old grace period.
1512  */
1513 static void rcu_gp_cleanup(struct rcu_state *rsp)
1514 {
1515 	unsigned long gp_duration;
1516 	int nocb = 0;
1517 	struct rcu_data *rdp;
1518 	struct rcu_node *rnp = rcu_get_root(rsp);
1519 
1520 	raw_spin_lock_irq(&rnp->lock);
1521 	smp_mb__after_unlock_lock();
1522 	gp_duration = jiffies - rsp->gp_start;
1523 	if (gp_duration > rsp->gp_max)
1524 		rsp->gp_max = gp_duration;
1525 
1526 	/*
1527 	 * We know the grace period is complete, but to everyone else
1528 	 * it appears to still be ongoing.  But it is also the case
1529 	 * that to everyone else it looks like there is nothing that
1530 	 * they can do to advance the grace period.  It is therefore
1531 	 * safe for us to drop the lock in order to mark the grace
1532 	 * period as completed in all of the rcu_node structures.
1533 	 */
1534 	raw_spin_unlock_irq(&rnp->lock);
1535 
1536 	/*
1537 	 * Propagate new ->completed value to rcu_node structures so
1538 	 * that other CPUs don't have to wait until the start of the next
1539 	 * grace period to process their callbacks.  This also avoids
1540 	 * some nasty RCU grace-period initialization races by forcing
1541 	 * the end of the current grace period to be completely recorded in
1542 	 * all of the rcu_node structures before the beginning of the next
1543 	 * grace period is recorded in any of the rcu_node structures.
1544 	 */
1545 	rcu_for_each_node_breadth_first(rsp, rnp) {
1546 		raw_spin_lock_irq(&rnp->lock);
1547 		smp_mb__after_unlock_lock();
1548 		ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1549 		rdp = this_cpu_ptr(rsp->rda);
1550 		if (rnp == rdp->mynode)
1551 			__note_gp_changes(rsp, rnp, rdp);
1552 		/* smp_mb() provided by prior unlock-lock pair. */
1553 		nocb += rcu_future_gp_cleanup(rsp, rnp);
1554 		raw_spin_unlock_irq(&rnp->lock);
1555 		cond_resched();
1556 	}
1557 	rnp = rcu_get_root(rsp);
1558 	raw_spin_lock_irq(&rnp->lock);
1559 	smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1560 	rcu_nocb_gp_set(rnp, nocb);
1561 
1562 	/* Declare grace period done. */
1563 	ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1564 	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1565 	rsp->fqs_state = RCU_GP_IDLE;
1566 	rdp = this_cpu_ptr(rsp->rda);
1567 	rcu_advance_cbs(rsp, rnp, rdp);  /* Reduce false positives below. */
1568 	if (cpu_needs_another_gp(rsp, rdp)) {
1569 		rsp->gp_flags = RCU_GP_FLAG_INIT;
1570 		trace_rcu_grace_period(rsp->name,
1571 				       ACCESS_ONCE(rsp->gpnum),
1572 				       TPS("newreq"));
1573 	}
1574 	raw_spin_unlock_irq(&rnp->lock);
1575 }
1576 
1577 /*
1578  * Body of kthread that handles grace periods.
1579  */
1580 static int __noreturn rcu_gp_kthread(void *arg)
1581 {
1582 	int fqs_state;
1583 	int gf;
1584 	unsigned long j;
1585 	int ret;
1586 	struct rcu_state *rsp = arg;
1587 	struct rcu_node *rnp = rcu_get_root(rsp);
1588 
1589 	for (;;) {
1590 
1591 		/* Handle grace-period start. */
1592 		for (;;) {
1593 			trace_rcu_grace_period(rsp->name,
1594 					       ACCESS_ONCE(rsp->gpnum),
1595 					       TPS("reqwait"));
1596 			wait_event_interruptible(rsp->gp_wq,
1597 						 ACCESS_ONCE(rsp->gp_flags) &
1598 						 RCU_GP_FLAG_INIT);
1599 			/* Locking provides needed memory barrier. */
1600 			if (rcu_gp_init(rsp))
1601 				break;
1602 			cond_resched();
1603 			flush_signals(current);
1604 			trace_rcu_grace_period(rsp->name,
1605 					       ACCESS_ONCE(rsp->gpnum),
1606 					       TPS("reqwaitsig"));
1607 		}
1608 
1609 		/* Handle quiescent-state forcing. */
1610 		fqs_state = RCU_SAVE_DYNTICK;
1611 		j = jiffies_till_first_fqs;
1612 		if (j > HZ) {
1613 			j = HZ;
1614 			jiffies_till_first_fqs = HZ;
1615 		}
1616 		ret = 0;
1617 		for (;;) {
1618 			if (!ret)
1619 				rsp->jiffies_force_qs = jiffies + j;
1620 			trace_rcu_grace_period(rsp->name,
1621 					       ACCESS_ONCE(rsp->gpnum),
1622 					       TPS("fqswait"));
1623 			ret = wait_event_interruptible_timeout(rsp->gp_wq,
1624 					((gf = ACCESS_ONCE(rsp->gp_flags)) &
1625 					 RCU_GP_FLAG_FQS) ||
1626 					(!ACCESS_ONCE(rnp->qsmask) &&
1627 					 !rcu_preempt_blocked_readers_cgp(rnp)),
1628 					j);
1629 			/* Locking provides needed memory barriers. */
1630 			/* If grace period done, leave loop. */
1631 			if (!ACCESS_ONCE(rnp->qsmask) &&
1632 			    !rcu_preempt_blocked_readers_cgp(rnp))
1633 				break;
1634 			/* If time for quiescent-state forcing, do it. */
1635 			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
1636 			    (gf & RCU_GP_FLAG_FQS)) {
1637 				trace_rcu_grace_period(rsp->name,
1638 						       ACCESS_ONCE(rsp->gpnum),
1639 						       TPS("fqsstart"));
1640 				fqs_state = rcu_gp_fqs(rsp, fqs_state);
1641 				trace_rcu_grace_period(rsp->name,
1642 						       ACCESS_ONCE(rsp->gpnum),
1643 						       TPS("fqsend"));
1644 				cond_resched();
1645 			} else {
1646 				/* Deal with stray signal. */
1647 				cond_resched();
1648 				flush_signals(current);
1649 				trace_rcu_grace_period(rsp->name,
1650 						       ACCESS_ONCE(rsp->gpnum),
1651 						       TPS("fqswaitsig"));
1652 			}
1653 			j = jiffies_till_next_fqs;
1654 			if (j > HZ) {
1655 				j = HZ;
1656 				jiffies_till_next_fqs = HZ;
1657 			} else if (j < 1) {
1658 				j = 1;
1659 				jiffies_till_next_fqs = 1;
1660 			}
1661 		}
1662 
1663 		/* Handle grace-period end. */
1664 		rcu_gp_cleanup(rsp);
1665 	}
1666 }
1667 
1668 static void rsp_wakeup(struct irq_work *work)
1669 {
1670 	struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
1671 
1672 	/* Wake up rcu_gp_kthread() to start the grace period. */
1673 	wake_up(&rsp->gp_wq);
1674 }
1675 
1676 /*
1677  * Start a new RCU grace period if warranted, re-initializing the hierarchy
1678  * in preparation for detecting the next grace period.  The caller must hold
1679  * the root node's ->lock and hard irqs must be disabled.
1680  *
1681  * Note that it is legal for a dying CPU (which is marked as offline) to
1682  * invoke this function.  This can happen when the dying CPU reports its
1683  * quiescent state.
1684  */
1685 static void
1686 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1687 		      struct rcu_data *rdp)
1688 {
1689 	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
1690 		/*
1691 		 * Either we have not yet spawned the grace-period
1692 		 * task, this CPU does not need another grace period,
1693 		 * or a grace period is already in progress.
1694 		 * Either way, don't start a new grace period.
1695 		 */
1696 		return;
1697 	}
1698 	rsp->gp_flags = RCU_GP_FLAG_INIT;
1699 	trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1700 			       TPS("newreq"));
1701 
1702 	/*
1703 	 * We can't do wakeups while holding the rnp->lock, as that
1704 	 * could cause possible deadlocks with the rq->lock. Defer
1705 	 * the wakeup to interrupt context.  And don't bother waking
1706 	 * up the running kthread.
1707 	 */
1708 	if (current != rsp->gp_kthread)
1709 		irq_work_queue(&rsp->wakeup_work);
1710 }
1711 
1712 /*
1713  * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
1714  * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
1715  * is invoked indirectly from rcu_advance_cbs(), which would result in
1716  * endless recursion -- or would do so if it wasn't for the self-deadlock
1717  * that is encountered beforehand.
1718  */
1719 static void
1720 rcu_start_gp(struct rcu_state *rsp)
1721 {
1722 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1723 	struct rcu_node *rnp = rcu_get_root(rsp);
1724 
1725 	/*
1726 	 * If there is no grace period in progress right now, any
1727 	 * callbacks we have up to this point will be satisfied by the
1728 	 * next grace period.  Also, advancing the callbacks reduces the
1729 	 * probability of false positives from cpu_needs_another_gp()
1730 	 * resulting in pointless grace periods.  So, advance callbacks
1731 	 * then start the grace period!
1732 	 */
1733 	rcu_advance_cbs(rsp, rnp, rdp);
1734 	rcu_start_gp_advanced(rsp, rnp, rdp);
1735 }
1736 
1737 /*
1738  * Report a full set of quiescent states to the specified rcu_state
1739  * data structure.  This involves cleaning up after the prior grace
1740  * period and letting rcu_start_gp() start up the next grace period
1741  * if one is needed.  Note that the caller must hold rnp->lock, which
1742  * is released before return.
1743  */
1744 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1745 	__releases(rcu_get_root(rsp)->lock)
1746 {
1747 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1748 	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
1749 	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
1750 }
1751 
1752 /*
1753  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1754  * Allows quiescent states for a group of CPUs to be reported at one go
1755  * to the specified rcu_node structure, though all the CPUs in the group
1756  * must be represented by the same rcu_node structure (which need not be
1757  * a leaf rcu_node structure, though it often will be).  That structure's
1758  * lock must be held upon entry, and it is released before return.
1759  */
1760 static void
1761 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1762 		  struct rcu_node *rnp, unsigned long flags)
1763 	__releases(rnp->lock)
1764 {
1765 	struct rcu_node *rnp_c;
1766 
1767 	/* Walk up the rcu_node hierarchy. */
1768 	for (;;) {
1769 		if (!(rnp->qsmask & mask)) {
1770 
1771 			/* Our bit has already been cleared, so done. */
1772 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1773 			return;
1774 		}
1775 		rnp->qsmask &= ~mask;
1776 		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1777 						 mask, rnp->qsmask, rnp->level,
1778 						 rnp->grplo, rnp->grphi,
1779 						 !!rnp->gp_tasks);
1780 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1781 
1782 			/* Other bits still set at this level, so done. */
1783 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1784 			return;
1785 		}
1786 		mask = rnp->grpmask;
1787 		if (rnp->parent == NULL) {
1788 
1789 			/* No more levels.  Exit loop holding root lock. */
1790 
1791 			break;
1792 		}
1793 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1794 		rnp_c = rnp;
1795 		rnp = rnp->parent;
1796 		raw_spin_lock_irqsave(&rnp->lock, flags);
1797 		smp_mb__after_unlock_lock();
1798 		WARN_ON_ONCE(rnp_c->qsmask);
1799 	}
1800 
1801 	/*
1802 	 * Get here if we are the last CPU to pass through a quiescent
1803 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1804 	 * to clean up and start the next grace period if one is needed.
1805 	 */
1806 	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
1807 }
1808 
1809 /*
1810  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1811  * structure.  This must be either called from the specified CPU, or
1812  * called when the specified CPU is known to be offline (and when it is
1813  * also known that no other CPU is concurrently trying to help the offline
1814  * CPU).  The lastcomp argument is used to make sure we are still in the
1815  * grace period of interest.  We don't want to end the current grace period
1816  * based on quiescent states detected in an earlier grace period!
1817  */
1818 static void
1819 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1820 {
1821 	unsigned long flags;
1822 	unsigned long mask;
1823 	struct rcu_node *rnp;
1824 
1825 	rnp = rdp->mynode;
1826 	raw_spin_lock_irqsave(&rnp->lock, flags);
1827 	smp_mb__after_unlock_lock();
1828 	if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
1829 	    rnp->completed == rnp->gpnum) {
1830 
1831 		/*
1832 		 * The grace period in which this quiescent state was
1833 		 * recorded has ended, so don't report it upwards.
1834 		 * We will instead need a new quiescent state that lies
1835 		 * within the current grace period.
1836 		 */
1837 		rdp->passed_quiesce = 0;	/* need qs for new gp. */
1838 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1839 		return;
1840 	}
1841 	mask = rdp->grpmask;
1842 	if ((rnp->qsmask & mask) == 0) {
1843 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1844 	} else {
1845 		rdp->qs_pending = 0;
1846 
1847 		/*
1848 		 * This GP can't end until cpu checks in, so all of our
1849 		 * callbacks can be processed during the next GP.
1850 		 */
1851 		rcu_accelerate_cbs(rsp, rnp, rdp);
1852 
1853 		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
1854 	}
1855 }
1856 
1857 /*
1858  * Check to see if there is a new grace period of which this CPU
1859  * is not yet aware, and if so, set up local rcu_data state for it.
1860  * Otherwise, see if this CPU has just passed through its first
1861  * quiescent state for this grace period, and record that fact if so.
1862  */
1863 static void
1864 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1865 {
1866 	/* Check for grace-period ends and beginnings. */
1867 	note_gp_changes(rsp, rdp);
1868 
1869 	/*
1870 	 * Does this CPU still need to do its part for current grace period?
1871 	 * If no, return and let the other CPUs do their part as well.
1872 	 */
1873 	if (!rdp->qs_pending)
1874 		return;
1875 
1876 	/*
1877 	 * Was there a quiescent state since the beginning of the grace
1878 	 * period? If no, then exit and wait for the next call.
1879 	 */
1880 	if (!rdp->passed_quiesce)
1881 		return;
1882 
1883 	/*
1884 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1885 	 * judge of that).
1886 	 */
1887 	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
1888 }
1889 
1890 #ifdef CONFIG_HOTPLUG_CPU
1891 
1892 /*
1893  * Send the specified CPU's RCU callbacks to the orphanage.  The
1894  * specified CPU must be offline, and the caller must hold the
1895  * ->orphan_lock.
1896  */
1897 static void
1898 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1899 			  struct rcu_node *rnp, struct rcu_data *rdp)
1900 {
1901 	/* No-CBs CPUs do not have orphanable callbacks. */
1902 	if (rcu_is_nocb_cpu(rdp->cpu))
1903 		return;
1904 
1905 	/*
1906 	 * Orphan the callbacks.  First adjust the counts.  This is safe
1907 	 * because _rcu_barrier() excludes CPU-hotplug operations, so it
1908 	 * cannot be running now.  Thus no memory barrier is required.
1909 	 */
1910 	if (rdp->nxtlist != NULL) {
1911 		rsp->qlen_lazy += rdp->qlen_lazy;
1912 		rsp->qlen += rdp->qlen;
1913 		rdp->n_cbs_orphaned += rdp->qlen;
1914 		rdp->qlen_lazy = 0;
1915 		ACCESS_ONCE(rdp->qlen) = 0;
1916 	}
1917 
1918 	/*
1919 	 * Next, move those callbacks still needing a grace period to
1920 	 * the orphanage, where some other CPU will pick them up.
1921 	 * Some of the callbacks might have gone partway through a grace
1922 	 * period, but that is too bad.  They get to start over because we
1923 	 * cannot assume that grace periods are synchronized across CPUs.
1924 	 * We don't bother updating the ->nxttail[] array yet, instead
1925 	 * we just reset the whole thing later on.
1926 	 */
1927 	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
1928 		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
1929 		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
1930 		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
1931 	}
1932 
1933 	/*
1934 	 * Then move the ready-to-invoke callbacks to the orphanage,
1935 	 * where some other CPU will pick them up.  These will not be
1936 	 * required to pass though another grace period: They are done.
1937 	 */
1938 	if (rdp->nxtlist != NULL) {
1939 		*rsp->orphan_donetail = rdp->nxtlist;
1940 		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
1941 	}
1942 
1943 	/* Finally, initialize the rcu_data structure's list to empty.  */
1944 	init_callback_list(rdp);
1945 }
1946 
1947 /*
1948  * Adopt the RCU callbacks from the specified rcu_state structure's
1949  * orphanage.  The caller must hold the ->orphan_lock.
1950  */
1951 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
1952 {
1953 	int i;
1954 	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
1955 
1956 	/* No-CBs CPUs are handled specially. */
1957 	if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
1958 		return;
1959 
1960 	/* Do the accounting first. */
1961 	rdp->qlen_lazy += rsp->qlen_lazy;
1962 	rdp->qlen += rsp->qlen;
1963 	rdp->n_cbs_adopted += rsp->qlen;
1964 	if (rsp->qlen_lazy != rsp->qlen)
1965 		rcu_idle_count_callbacks_posted();
1966 	rsp->qlen_lazy = 0;
1967 	rsp->qlen = 0;
1968 
1969 	/*
1970 	 * We do not need a memory barrier here because the only way we
1971 	 * can get here if there is an rcu_barrier() in flight is if
1972 	 * we are the task doing the rcu_barrier().
1973 	 */
1974 
1975 	/* First adopt the ready-to-invoke callbacks. */
1976 	if (rsp->orphan_donelist != NULL) {
1977 		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
1978 		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
1979 		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
1980 			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
1981 				rdp->nxttail[i] = rsp->orphan_donetail;
1982 		rsp->orphan_donelist = NULL;
1983 		rsp->orphan_donetail = &rsp->orphan_donelist;
1984 	}
1985 
1986 	/* And then adopt the callbacks that still need a grace period. */
1987 	if (rsp->orphan_nxtlist != NULL) {
1988 		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
1989 		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
1990 		rsp->orphan_nxtlist = NULL;
1991 		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
1992 	}
1993 }
1994 
1995 /*
1996  * Trace the fact that this CPU is going offline.
1997  */
1998 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1999 {
2000 	RCU_TRACE(unsigned long mask);
2001 	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2002 	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2003 
2004 	RCU_TRACE(mask = rdp->grpmask);
2005 	trace_rcu_grace_period(rsp->name,
2006 			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2007 			       TPS("cpuofl"));
2008 }
2009 
2010 /*
2011  * The CPU has been completely removed, and some other CPU is reporting
2012  * this fact from process context.  Do the remainder of the cleanup,
2013  * including orphaning the outgoing CPU's RCU callbacks, and also
2014  * adopting them.  There can only be one CPU hotplug operation at a time,
2015  * so no other CPU can be attempting to update rcu_cpu_kthread_task.
2016  */
2017 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2018 {
2019 	unsigned long flags;
2020 	unsigned long mask;
2021 	int need_report = 0;
2022 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2023 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2024 
2025 	/* Adjust any no-longer-needed kthreads. */
2026 	rcu_boost_kthread_setaffinity(rnp, -1);
2027 
2028 	/* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
2029 
2030 	/* Exclude any attempts to start a new grace period. */
2031 	mutex_lock(&rsp->onoff_mutex);
2032 	raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2033 
2034 	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2035 	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2036 	rcu_adopt_orphan_cbs(rsp, flags);
2037 
2038 	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
2039 	mask = rdp->grpmask;	/* rnp->grplo is constant. */
2040 	do {
2041 		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
2042 		smp_mb__after_unlock_lock();
2043 		rnp->qsmaskinit &= ~mask;
2044 		if (rnp->qsmaskinit != 0) {
2045 			if (rnp != rdp->mynode)
2046 				raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2047 			break;
2048 		}
2049 		if (rnp == rdp->mynode)
2050 			need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2051 		else
2052 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2053 		mask = rnp->grpmask;
2054 		rnp = rnp->parent;
2055 	} while (rnp != NULL);
2056 
2057 	/*
2058 	 * We still hold the leaf rcu_node structure lock here, and
2059 	 * irqs are still disabled.  The reason for this subterfuge is
2060 	 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2061 	 * held leads to deadlock.
2062 	 */
2063 	raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2064 	rnp = rdp->mynode;
2065 	if (need_report & RCU_OFL_TASKS_NORM_GP)
2066 		rcu_report_unblock_qs_rnp(rnp, flags);
2067 	else
2068 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2069 	if (need_report & RCU_OFL_TASKS_EXP_GP)
2070 		rcu_report_exp_rnp(rsp, rnp, true);
2071 	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2072 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2073 		  cpu, rdp->qlen, rdp->nxtlist);
2074 	init_callback_list(rdp);
2075 	/* Disallow further callbacks on this CPU. */
2076 	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2077 	mutex_unlock(&rsp->onoff_mutex);
2078 }
2079 
2080 #else /* #ifdef CONFIG_HOTPLUG_CPU */
2081 
2082 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2083 {
2084 }
2085 
2086 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2087 {
2088 }
2089 
2090 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2091 
2092 /*
2093  * Invoke any RCU callbacks that have made it to the end of their grace
2094  * period.  Thottle as specified by rdp->blimit.
2095  */
2096 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2097 {
2098 	unsigned long flags;
2099 	struct rcu_head *next, *list, **tail;
2100 	long bl, count, count_lazy;
2101 	int i;
2102 
2103 	/* If no callbacks are ready, just return. */
2104 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2105 		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2106 		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
2107 				    need_resched(), is_idle_task(current),
2108 				    rcu_is_callbacks_kthread());
2109 		return;
2110 	}
2111 
2112 	/*
2113 	 * Extract the list of ready callbacks, disabling to prevent
2114 	 * races with call_rcu() from interrupt handlers.
2115 	 */
2116 	local_irq_save(flags);
2117 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2118 	bl = rdp->blimit;
2119 	trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2120 	list = rdp->nxtlist;
2121 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2122 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2123 	tail = rdp->nxttail[RCU_DONE_TAIL];
2124 	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2125 		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2126 			rdp->nxttail[i] = &rdp->nxtlist;
2127 	local_irq_restore(flags);
2128 
2129 	/* Invoke callbacks. */
2130 	count = count_lazy = 0;
2131 	while (list) {
2132 		next = list->next;
2133 		prefetch(next);
2134 		debug_rcu_head_unqueue(list);
2135 		if (__rcu_reclaim(rsp->name, list))
2136 			count_lazy++;
2137 		list = next;
2138 		/* Stop only if limit reached and CPU has something to do. */
2139 		if (++count >= bl &&
2140 		    (need_resched() ||
2141 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2142 			break;
2143 	}
2144 
2145 	local_irq_save(flags);
2146 	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2147 			    is_idle_task(current),
2148 			    rcu_is_callbacks_kthread());
2149 
2150 	/* Update count, and requeue any remaining callbacks. */
2151 	if (list != NULL) {
2152 		*tail = rdp->nxtlist;
2153 		rdp->nxtlist = list;
2154 		for (i = 0; i < RCU_NEXT_SIZE; i++)
2155 			if (&rdp->nxtlist == rdp->nxttail[i])
2156 				rdp->nxttail[i] = tail;
2157 			else
2158 				break;
2159 	}
2160 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2161 	rdp->qlen_lazy -= count_lazy;
2162 	ACCESS_ONCE(rdp->qlen) -= count;
2163 	rdp->n_cbs_invoked += count;
2164 
2165 	/* Reinstate batch limit if we have worked down the excess. */
2166 	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2167 		rdp->blimit = blimit;
2168 
2169 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2170 	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2171 		rdp->qlen_last_fqs_check = 0;
2172 		rdp->n_force_qs_snap = rsp->n_force_qs;
2173 	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2174 		rdp->qlen_last_fqs_check = rdp->qlen;
2175 	WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2176 
2177 	local_irq_restore(flags);
2178 
2179 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2180 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2181 		invoke_rcu_core();
2182 }
2183 
2184 /*
2185  * Check to see if this CPU is in a non-context-switch quiescent state
2186  * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2187  * Also schedule RCU core processing.
2188  *
2189  * This function must be called from hardirq context.  It is normally
2190  * invoked from the scheduling-clock interrupt.  If rcu_pending returns
2191  * false, there is no point in invoking rcu_check_callbacks().
2192  */
2193 void rcu_check_callbacks(int cpu, int user)
2194 {
2195 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2196 	increment_cpu_stall_ticks();
2197 	if (user || rcu_is_cpu_rrupt_from_idle()) {
2198 
2199 		/*
2200 		 * Get here if this CPU took its interrupt from user
2201 		 * mode or from the idle loop, and if this is not a
2202 		 * nested interrupt.  In this case, the CPU is in
2203 		 * a quiescent state, so note it.
2204 		 *
2205 		 * No memory barrier is required here because both
2206 		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2207 		 * variables that other CPUs neither access nor modify,
2208 		 * at least not while the corresponding CPU is online.
2209 		 */
2210 
2211 		rcu_sched_qs(cpu);
2212 		rcu_bh_qs(cpu);
2213 
2214 	} else if (!in_softirq()) {
2215 
2216 		/*
2217 		 * Get here if this CPU did not take its interrupt from
2218 		 * softirq, in other words, if it is not interrupting
2219 		 * a rcu_bh read-side critical section.  This is an _bh
2220 		 * critical section, so note it.
2221 		 */
2222 
2223 		rcu_bh_qs(cpu);
2224 	}
2225 	rcu_preempt_check_callbacks(cpu);
2226 	if (rcu_pending(cpu))
2227 		invoke_rcu_core();
2228 	trace_rcu_utilization(TPS("End scheduler-tick"));
2229 }
2230 
2231 /*
2232  * Scan the leaf rcu_node structures, processing dyntick state for any that
2233  * have not yet encountered a quiescent state, using the function specified.
2234  * Also initiate boosting for any threads blocked on the root rcu_node.
2235  *
2236  * The caller must have suppressed start of new grace periods.
2237  */
2238 static void force_qs_rnp(struct rcu_state *rsp,
2239 			 int (*f)(struct rcu_data *rsp, bool *isidle,
2240 				  unsigned long *maxj),
2241 			 bool *isidle, unsigned long *maxj)
2242 {
2243 	unsigned long bit;
2244 	int cpu;
2245 	unsigned long flags;
2246 	unsigned long mask;
2247 	struct rcu_node *rnp;
2248 
2249 	rcu_for_each_leaf_node(rsp, rnp) {
2250 		cond_resched();
2251 		mask = 0;
2252 		raw_spin_lock_irqsave(&rnp->lock, flags);
2253 		smp_mb__after_unlock_lock();
2254 		if (!rcu_gp_in_progress(rsp)) {
2255 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2256 			return;
2257 		}
2258 		if (rnp->qsmask == 0) {
2259 			rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
2260 			continue;
2261 		}
2262 		cpu = rnp->grplo;
2263 		bit = 1;
2264 		for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2265 			if ((rnp->qsmask & bit) != 0) {
2266 				if ((rnp->qsmaskinit & bit) != 0)
2267 					*isidle = 0;
2268 				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2269 					mask |= bit;
2270 			}
2271 		}
2272 		if (mask != 0) {
2273 
2274 			/* rcu_report_qs_rnp() releases rnp->lock. */
2275 			rcu_report_qs_rnp(mask, rsp, rnp, flags);
2276 			continue;
2277 		}
2278 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2279 	}
2280 	rnp = rcu_get_root(rsp);
2281 	if (rnp->qsmask == 0) {
2282 		raw_spin_lock_irqsave(&rnp->lock, flags);
2283 		smp_mb__after_unlock_lock();
2284 		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2285 	}
2286 }
2287 
2288 /*
2289  * Force quiescent states on reluctant CPUs, and also detect which
2290  * CPUs are in dyntick-idle mode.
2291  */
2292 static void force_quiescent_state(struct rcu_state *rsp)
2293 {
2294 	unsigned long flags;
2295 	bool ret;
2296 	struct rcu_node *rnp;
2297 	struct rcu_node *rnp_old = NULL;
2298 
2299 	/* Funnel through hierarchy to reduce memory contention. */
2300 	rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
2301 	for (; rnp != NULL; rnp = rnp->parent) {
2302 		ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2303 		      !raw_spin_trylock(&rnp->fqslock);
2304 		if (rnp_old != NULL)
2305 			raw_spin_unlock(&rnp_old->fqslock);
2306 		if (ret) {
2307 			ACCESS_ONCE(rsp->n_force_qs_lh)++;
2308 			return;
2309 		}
2310 		rnp_old = rnp;
2311 	}
2312 	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2313 
2314 	/* Reached the root of the rcu_node tree, acquire lock. */
2315 	raw_spin_lock_irqsave(&rnp_old->lock, flags);
2316 	smp_mb__after_unlock_lock();
2317 	raw_spin_unlock(&rnp_old->fqslock);
2318 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2319 		ACCESS_ONCE(rsp->n_force_qs_lh)++;
2320 		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2321 		return;  /* Someone beat us to it. */
2322 	}
2323 	rsp->gp_flags |= RCU_GP_FLAG_FQS;
2324 	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2325 	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
2326 }
2327 
2328 /*
2329  * This does the RCU core processing work for the specified rcu_state
2330  * and rcu_data structures.  This may be called only from the CPU to
2331  * whom the rdp belongs.
2332  */
2333 static void
2334 __rcu_process_callbacks(struct rcu_state *rsp)
2335 {
2336 	unsigned long flags;
2337 	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2338 
2339 	WARN_ON_ONCE(rdp->beenonline == 0);
2340 
2341 	/* Update RCU state based on any recent quiescent states. */
2342 	rcu_check_quiescent_state(rsp, rdp);
2343 
2344 	/* Does this CPU require a not-yet-started grace period? */
2345 	local_irq_save(flags);
2346 	if (cpu_needs_another_gp(rsp, rdp)) {
2347 		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2348 		rcu_start_gp(rsp);
2349 		raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2350 	} else {
2351 		local_irq_restore(flags);
2352 	}
2353 
2354 	/* If there are callbacks ready, invoke them. */
2355 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2356 		invoke_rcu_callbacks(rsp, rdp);
2357 
2358 	/* Do any needed deferred wakeups of rcuo kthreads. */
2359 	do_nocb_deferred_wakeup(rdp);
2360 }
2361 
2362 /*
2363  * Do RCU core processing for the current CPU.
2364  */
2365 static void rcu_process_callbacks(struct softirq_action *unused)
2366 {
2367 	struct rcu_state *rsp;
2368 
2369 	if (cpu_is_offline(smp_processor_id()))
2370 		return;
2371 	trace_rcu_utilization(TPS("Start RCU core"));
2372 	for_each_rcu_flavor(rsp)
2373 		__rcu_process_callbacks(rsp);
2374 	trace_rcu_utilization(TPS("End RCU core"));
2375 }
2376 
2377 /*
2378  * Schedule RCU callback invocation.  If the specified type of RCU
2379  * does not support RCU priority boosting, just do a direct call,
2380  * otherwise wake up the per-CPU kernel kthread.  Note that because we
2381  * are running on the current CPU with interrupts disabled, the
2382  * rcu_cpu_kthread_task cannot disappear out from under us.
2383  */
2384 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2385 {
2386 	if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2387 		return;
2388 	if (likely(!rsp->boost)) {
2389 		rcu_do_batch(rsp, rdp);
2390 		return;
2391 	}
2392 	invoke_rcu_callbacks_kthread();
2393 }
2394 
2395 static void invoke_rcu_core(void)
2396 {
2397 	if (cpu_online(smp_processor_id()))
2398 		raise_softirq(RCU_SOFTIRQ);
2399 }
2400 
2401 /*
2402  * Handle any core-RCU processing required by a call_rcu() invocation.
2403  */
2404 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2405 			    struct rcu_head *head, unsigned long flags)
2406 {
2407 	/*
2408 	 * If called from an extended quiescent state, invoke the RCU
2409 	 * core in order to force a re-evaluation of RCU's idleness.
2410 	 */
2411 	if (!rcu_is_watching() && cpu_online(smp_processor_id()))
2412 		invoke_rcu_core();
2413 
2414 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2415 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2416 		return;
2417 
2418 	/*
2419 	 * Force the grace period if too many callbacks or too long waiting.
2420 	 * Enforce hysteresis, and don't invoke force_quiescent_state()
2421 	 * if some other CPU has recently done so.  Also, don't bother
2422 	 * invoking force_quiescent_state() if the newly enqueued callback
2423 	 * is the only one waiting for a grace period to complete.
2424 	 */
2425 	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2426 
2427 		/* Are we ignoring a completed grace period? */
2428 		note_gp_changes(rsp, rdp);
2429 
2430 		/* Start a new grace period if one not already started. */
2431 		if (!rcu_gp_in_progress(rsp)) {
2432 			struct rcu_node *rnp_root = rcu_get_root(rsp);
2433 
2434 			raw_spin_lock(&rnp_root->lock);
2435 			smp_mb__after_unlock_lock();
2436 			rcu_start_gp(rsp);
2437 			raw_spin_unlock(&rnp_root->lock);
2438 		} else {
2439 			/* Give the grace period a kick. */
2440 			rdp->blimit = LONG_MAX;
2441 			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2442 			    *rdp->nxttail[RCU_DONE_TAIL] != head)
2443 				force_quiescent_state(rsp);
2444 			rdp->n_force_qs_snap = rsp->n_force_qs;
2445 			rdp->qlen_last_fqs_check = rdp->qlen;
2446 		}
2447 	}
2448 }
2449 
2450 /*
2451  * RCU callback function to leak a callback.
2452  */
2453 static void rcu_leak_callback(struct rcu_head *rhp)
2454 {
2455 }
2456 
2457 /*
2458  * Helper function for call_rcu() and friends.  The cpu argument will
2459  * normally be -1, indicating "currently running CPU".  It may specify
2460  * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2461  * is expected to specify a CPU.
2462  */
2463 static void
2464 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2465 	   struct rcu_state *rsp, int cpu, bool lazy)
2466 {
2467 	unsigned long flags;
2468 	struct rcu_data *rdp;
2469 
2470 	WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
2471 	if (debug_rcu_head_queue(head)) {
2472 		/* Probable double call_rcu(), so leak the callback. */
2473 		ACCESS_ONCE(head->func) = rcu_leak_callback;
2474 		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2475 		return;
2476 	}
2477 	head->func = func;
2478 	head->next = NULL;
2479 
2480 	/*
2481 	 * Opportunistically note grace-period endings and beginnings.
2482 	 * Note that we might see a beginning right after we see an
2483 	 * end, but never vice versa, since this CPU has to pass through
2484 	 * a quiescent state betweentimes.
2485 	 */
2486 	local_irq_save(flags);
2487 	rdp = this_cpu_ptr(rsp->rda);
2488 
2489 	/* Add the callback to our list. */
2490 	if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2491 		int offline;
2492 
2493 		if (cpu != -1)
2494 			rdp = per_cpu_ptr(rsp->rda, cpu);
2495 		offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2496 		WARN_ON_ONCE(offline);
2497 		/* _call_rcu() is illegal on offline CPU; leak the callback. */
2498 		local_irq_restore(flags);
2499 		return;
2500 	}
2501 	ACCESS_ONCE(rdp->qlen)++;
2502 	if (lazy)
2503 		rdp->qlen_lazy++;
2504 	else
2505 		rcu_idle_count_callbacks_posted();
2506 	smp_mb();  /* Count before adding callback for rcu_barrier(). */
2507 	*rdp->nxttail[RCU_NEXT_TAIL] = head;
2508 	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2509 
2510 	if (__is_kfree_rcu_offset((unsigned long)func))
2511 		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2512 					 rdp->qlen_lazy, rdp->qlen);
2513 	else
2514 		trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
2515 
2516 	/* Go handle any RCU core processing required. */
2517 	__call_rcu_core(rsp, rdp, head, flags);
2518 	local_irq_restore(flags);
2519 }
2520 
2521 /*
2522  * Queue an RCU-sched callback for invocation after a grace period.
2523  */
2524 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2525 {
2526 	__call_rcu(head, func, &rcu_sched_state, -1, 0);
2527 }
2528 EXPORT_SYMBOL_GPL(call_rcu_sched);
2529 
2530 /*
2531  * Queue an RCU callback for invocation after a quicker grace period.
2532  */
2533 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2534 {
2535 	__call_rcu(head, func, &rcu_bh_state, -1, 0);
2536 }
2537 EXPORT_SYMBOL_GPL(call_rcu_bh);
2538 
2539 /*
2540  * Because a context switch is a grace period for RCU-sched and RCU-bh,
2541  * any blocking grace-period wait automatically implies a grace period
2542  * if there is only one CPU online at any point time during execution
2543  * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
2544  * occasionally incorrectly indicate that there are multiple CPUs online
2545  * when there was in fact only one the whole time, as this just adds
2546  * some overhead: RCU still operates correctly.
2547  */
2548 static inline int rcu_blocking_is_gp(void)
2549 {
2550 	int ret;
2551 
2552 	might_sleep();  /* Check for RCU read-side critical section. */
2553 	preempt_disable();
2554 	ret = num_online_cpus() <= 1;
2555 	preempt_enable();
2556 	return ret;
2557 }
2558 
2559 /**
2560  * synchronize_sched - wait until an rcu-sched grace period has elapsed.
2561  *
2562  * Control will return to the caller some time after a full rcu-sched
2563  * grace period has elapsed, in other words after all currently executing
2564  * rcu-sched read-side critical sections have completed.   These read-side
2565  * critical sections are delimited by rcu_read_lock_sched() and
2566  * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
2567  * local_irq_disable(), and so on may be used in place of
2568  * rcu_read_lock_sched().
2569  *
2570  * This means that all preempt_disable code sequences, including NMI and
2571  * non-threaded hardware-interrupt handlers, in progress on entry will
2572  * have completed before this primitive returns.  However, this does not
2573  * guarantee that softirq handlers will have completed, since in some
2574  * kernels, these handlers can run in process context, and can block.
2575  *
2576  * Note that this guarantee implies further memory-ordering guarantees.
2577  * On systems with more than one CPU, when synchronize_sched() returns,
2578  * each CPU is guaranteed to have executed a full memory barrier since the
2579  * end of its last RCU-sched read-side critical section whose beginning
2580  * preceded the call to synchronize_sched().  In addition, each CPU having
2581  * an RCU read-side critical section that extends beyond the return from
2582  * synchronize_sched() is guaranteed to have executed a full memory barrier
2583  * after the beginning of synchronize_sched() and before the beginning of
2584  * that RCU read-side critical section.  Note that these guarantees include
2585  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2586  * that are executing in the kernel.
2587  *
2588  * Furthermore, if CPU A invoked synchronize_sched(), which returned
2589  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2590  * to have executed a full memory barrier during the execution of
2591  * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2592  * again only if the system has more than one CPU).
2593  *
2594  * This primitive provides the guarantees made by the (now removed)
2595  * synchronize_kernel() API.  In contrast, synchronize_rcu() only
2596  * guarantees that rcu_read_lock() sections will have completed.
2597  * In "classic RCU", these two guarantees happen to be one and
2598  * the same, but can differ in realtime RCU implementations.
2599  */
2600 void synchronize_sched(void)
2601 {
2602 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2603 			   !lock_is_held(&rcu_lock_map) &&
2604 			   !lock_is_held(&rcu_sched_lock_map),
2605 			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
2606 	if (rcu_blocking_is_gp())
2607 		return;
2608 	if (rcu_expedited)
2609 		synchronize_sched_expedited();
2610 	else
2611 		wait_rcu_gp(call_rcu_sched);
2612 }
2613 EXPORT_SYMBOL_GPL(synchronize_sched);
2614 
2615 /**
2616  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
2617  *
2618  * Control will return to the caller some time after a full rcu_bh grace
2619  * period has elapsed, in other words after all currently executing rcu_bh
2620  * read-side critical sections have completed.  RCU read-side critical
2621  * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2622  * and may be nested.
2623  *
2624  * See the description of synchronize_sched() for more detailed information
2625  * on memory ordering guarantees.
2626  */
2627 void synchronize_rcu_bh(void)
2628 {
2629 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2630 			   !lock_is_held(&rcu_lock_map) &&
2631 			   !lock_is_held(&rcu_sched_lock_map),
2632 			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2633 	if (rcu_blocking_is_gp())
2634 		return;
2635 	if (rcu_expedited)
2636 		synchronize_rcu_bh_expedited();
2637 	else
2638 		wait_rcu_gp(call_rcu_bh);
2639 }
2640 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2641 
2642 /**
2643  * get_state_synchronize_rcu - Snapshot current RCU state
2644  *
2645  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2646  * to determine whether or not a full grace period has elapsed in the
2647  * meantime.
2648  */
2649 unsigned long get_state_synchronize_rcu(void)
2650 {
2651 	/*
2652 	 * Any prior manipulation of RCU-protected data must happen
2653 	 * before the load from ->gpnum.
2654 	 */
2655 	smp_mb();  /* ^^^ */
2656 
2657 	/*
2658 	 * Make sure this load happens before the purportedly
2659 	 * time-consuming work between get_state_synchronize_rcu()
2660 	 * and cond_synchronize_rcu().
2661 	 */
2662 	return smp_load_acquire(&rcu_state->gpnum);
2663 }
2664 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2665 
2666 /**
2667  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2668  *
2669  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2670  *
2671  * If a full RCU grace period has elapsed since the earlier call to
2672  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
2673  * synchronize_rcu() to wait for a full grace period.
2674  *
2675  * Yes, this function does not take counter wrap into account.  But
2676  * counter wrap is harmless.  If the counter wraps, we have waited for
2677  * more than 2 billion grace periods (and way more on a 64-bit system!),
2678  * so waiting for one additional grace period should be just fine.
2679  */
2680 void cond_synchronize_rcu(unsigned long oldstate)
2681 {
2682 	unsigned long newstate;
2683 
2684 	/*
2685 	 * Ensure that this load happens before any RCU-destructive
2686 	 * actions the caller might carry out after we return.
2687 	 */
2688 	newstate = smp_load_acquire(&rcu_state->completed);
2689 	if (ULONG_CMP_GE(oldstate, newstate))
2690 		synchronize_rcu();
2691 }
2692 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2693 
2694 static int synchronize_sched_expedited_cpu_stop(void *data)
2695 {
2696 	/*
2697 	 * There must be a full memory barrier on each affected CPU
2698 	 * between the time that try_stop_cpus() is called and the
2699 	 * time that it returns.
2700 	 *
2701 	 * In the current initial implementation of cpu_stop, the
2702 	 * above condition is already met when the control reaches
2703 	 * this point and the following smp_mb() is not strictly
2704 	 * necessary.  Do smp_mb() anyway for documentation and
2705 	 * robustness against future implementation changes.
2706 	 */
2707 	smp_mb(); /* See above comment block. */
2708 	return 0;
2709 }
2710 
2711 /**
2712  * synchronize_sched_expedited - Brute-force RCU-sched grace period
2713  *
2714  * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
2715  * approach to force the grace period to end quickly.  This consumes
2716  * significant time on all CPUs and is unfriendly to real-time workloads,
2717  * so is thus not recommended for any sort of common-case code.  In fact,
2718  * if you are using synchronize_sched_expedited() in a loop, please
2719  * restructure your code to batch your updates, and then use a single
2720  * synchronize_sched() instead.
2721  *
2722  * Note that it is illegal to call this function while holding any lock
2723  * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
2724  * to call this function from a CPU-hotplug notifier.  Failing to observe
2725  * these restriction will result in deadlock.
2726  *
2727  * This implementation can be thought of as an application of ticket
2728  * locking to RCU, with sync_sched_expedited_started and
2729  * sync_sched_expedited_done taking on the roles of the halves
2730  * of the ticket-lock word.  Each task atomically increments
2731  * sync_sched_expedited_started upon entry, snapshotting the old value,
2732  * then attempts to stop all the CPUs.  If this succeeds, then each
2733  * CPU will have executed a context switch, resulting in an RCU-sched
2734  * grace period.  We are then done, so we use atomic_cmpxchg() to
2735  * update sync_sched_expedited_done to match our snapshot -- but
2736  * only if someone else has not already advanced past our snapshot.
2737  *
2738  * On the other hand, if try_stop_cpus() fails, we check the value
2739  * of sync_sched_expedited_done.  If it has advanced past our
2740  * initial snapshot, then someone else must have forced a grace period
2741  * some time after we took our snapshot.  In this case, our work is
2742  * done for us, and we can simply return.  Otherwise, we try again,
2743  * but keep our initial snapshot for purposes of checking for someone
2744  * doing our work for us.
2745  *
2746  * If we fail too many times in a row, we fall back to synchronize_sched().
2747  */
2748 void synchronize_sched_expedited(void)
2749 {
2750 	long firstsnap, s, snap;
2751 	int trycount = 0;
2752 	struct rcu_state *rsp = &rcu_sched_state;
2753 
2754 	/*
2755 	 * If we are in danger of counter wrap, just do synchronize_sched().
2756 	 * By allowing sync_sched_expedited_started to advance no more than
2757 	 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2758 	 * that more than 3.5 billion CPUs would be required to force a
2759 	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
2760 	 * course be required on a 64-bit system.
2761 	 */
2762 	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2763 			 (ulong)atomic_long_read(&rsp->expedited_done) +
2764 			 ULONG_MAX / 8)) {
2765 		synchronize_sched();
2766 		atomic_long_inc(&rsp->expedited_wrap);
2767 		return;
2768 	}
2769 
2770 	/*
2771 	 * Take a ticket.  Note that atomic_inc_return() implies a
2772 	 * full memory barrier.
2773 	 */
2774 	snap = atomic_long_inc_return(&rsp->expedited_start);
2775 	firstsnap = snap;
2776 	get_online_cpus();
2777 	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2778 
2779 	/*
2780 	 * Each pass through the following loop attempts to force a
2781 	 * context switch on each CPU.
2782 	 */
2783 	while (try_stop_cpus(cpu_online_mask,
2784 			     synchronize_sched_expedited_cpu_stop,
2785 			     NULL) == -EAGAIN) {
2786 		put_online_cpus();
2787 		atomic_long_inc(&rsp->expedited_tryfail);
2788 
2789 		/* Check to see if someone else did our work for us. */
2790 		s = atomic_long_read(&rsp->expedited_done);
2791 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2792 			/* ensure test happens before caller kfree */
2793 			smp_mb__before_atomic_inc(); /* ^^^ */
2794 			atomic_long_inc(&rsp->expedited_workdone1);
2795 			return;
2796 		}
2797 
2798 		/* No joy, try again later.  Or just synchronize_sched(). */
2799 		if (trycount++ < 10) {
2800 			udelay(trycount * num_online_cpus());
2801 		} else {
2802 			wait_rcu_gp(call_rcu_sched);
2803 			atomic_long_inc(&rsp->expedited_normal);
2804 			return;
2805 		}
2806 
2807 		/* Recheck to see if someone else did our work for us. */
2808 		s = atomic_long_read(&rsp->expedited_done);
2809 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2810 			/* ensure test happens before caller kfree */
2811 			smp_mb__before_atomic_inc(); /* ^^^ */
2812 			atomic_long_inc(&rsp->expedited_workdone2);
2813 			return;
2814 		}
2815 
2816 		/*
2817 		 * Refetching sync_sched_expedited_started allows later
2818 		 * callers to piggyback on our grace period.  We retry
2819 		 * after they started, so our grace period works for them,
2820 		 * and they started after our first try, so their grace
2821 		 * period works for us.
2822 		 */
2823 		get_online_cpus();
2824 		snap = atomic_long_read(&rsp->expedited_start);
2825 		smp_mb(); /* ensure read is before try_stop_cpus(). */
2826 	}
2827 	atomic_long_inc(&rsp->expedited_stoppedcpus);
2828 
2829 	/*
2830 	 * Everyone up to our most recent fetch is covered by our grace
2831 	 * period.  Update the counter, but only if our work is still
2832 	 * relevant -- which it won't be if someone who started later
2833 	 * than we did already did their update.
2834 	 */
2835 	do {
2836 		atomic_long_inc(&rsp->expedited_done_tries);
2837 		s = atomic_long_read(&rsp->expedited_done);
2838 		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2839 			/* ensure test happens before caller kfree */
2840 			smp_mb__before_atomic_inc(); /* ^^^ */
2841 			atomic_long_inc(&rsp->expedited_done_lost);
2842 			break;
2843 		}
2844 	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
2845 	atomic_long_inc(&rsp->expedited_done_exit);
2846 
2847 	put_online_cpus();
2848 }
2849 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2850 
2851 /*
2852  * Check to see if there is any immediate RCU-related work to be done
2853  * by the current CPU, for the specified type of RCU, returning 1 if so.
2854  * The checks are in order of increasing expense: checks that can be
2855  * carried out against CPU-local state are performed first.  However,
2856  * we must check for CPU stalls first, else we might not get a chance.
2857  */
2858 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2859 {
2860 	struct rcu_node *rnp = rdp->mynode;
2861 
2862 	rdp->n_rcu_pending++;
2863 
2864 	/* Check for CPU stalls, if enabled. */
2865 	check_cpu_stall(rsp, rdp);
2866 
2867 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
2868 	if (rcu_nohz_full_cpu(rsp))
2869 		return 0;
2870 
2871 	/* Is the RCU core waiting for a quiescent state from this CPU? */
2872 	if (rcu_scheduler_fully_active &&
2873 	    rdp->qs_pending && !rdp->passed_quiesce) {
2874 		rdp->n_rp_qs_pending++;
2875 	} else if (rdp->qs_pending && rdp->passed_quiesce) {
2876 		rdp->n_rp_report_qs++;
2877 		return 1;
2878 	}
2879 
2880 	/* Does this CPU have callbacks ready to invoke? */
2881 	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
2882 		rdp->n_rp_cb_ready++;
2883 		return 1;
2884 	}
2885 
2886 	/* Has RCU gone idle with this CPU needing another grace period? */
2887 	if (cpu_needs_another_gp(rsp, rdp)) {
2888 		rdp->n_rp_cpu_needs_gp++;
2889 		return 1;
2890 	}
2891 
2892 	/* Has another RCU grace period completed?  */
2893 	if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
2894 		rdp->n_rp_gp_completed++;
2895 		return 1;
2896 	}
2897 
2898 	/* Has a new RCU grace period started? */
2899 	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
2900 		rdp->n_rp_gp_started++;
2901 		return 1;
2902 	}
2903 
2904 	/* Does this CPU need a deferred NOCB wakeup? */
2905 	if (rcu_nocb_need_deferred_wakeup(rdp)) {
2906 		rdp->n_rp_nocb_defer_wakeup++;
2907 		return 1;
2908 	}
2909 
2910 	/* nothing to do */
2911 	rdp->n_rp_need_nothing++;
2912 	return 0;
2913 }
2914 
2915 /*
2916  * Check to see if there is any immediate RCU-related work to be done
2917  * by the current CPU, returning 1 if so.  This function is part of the
2918  * RCU implementation; it is -not- an exported member of the RCU API.
2919  */
2920 static int rcu_pending(int cpu)
2921 {
2922 	struct rcu_state *rsp;
2923 
2924 	for_each_rcu_flavor(rsp)
2925 		if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
2926 			return 1;
2927 	return 0;
2928 }
2929 
2930 /*
2931  * Return true if the specified CPU has any callback.  If all_lazy is
2932  * non-NULL, store an indication of whether all callbacks are lazy.
2933  * (If there are no callbacks, all of them are deemed to be lazy.)
2934  */
2935 static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
2936 {
2937 	bool al = true;
2938 	bool hc = false;
2939 	struct rcu_data *rdp;
2940 	struct rcu_state *rsp;
2941 
2942 	for_each_rcu_flavor(rsp) {
2943 		rdp = per_cpu_ptr(rsp->rda, cpu);
2944 		if (!rdp->nxtlist)
2945 			continue;
2946 		hc = true;
2947 		if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
2948 			al = false;
2949 			break;
2950 		}
2951 	}
2952 	if (all_lazy)
2953 		*all_lazy = al;
2954 	return hc;
2955 }
2956 
2957 /*
2958  * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
2959  * the compiler is expected to optimize this away.
2960  */
2961 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
2962 			       int cpu, unsigned long done)
2963 {
2964 	trace_rcu_barrier(rsp->name, s, cpu,
2965 			  atomic_read(&rsp->barrier_cpu_count), done);
2966 }
2967 
2968 /*
2969  * RCU callback function for _rcu_barrier().  If we are last, wake
2970  * up the task executing _rcu_barrier().
2971  */
2972 static void rcu_barrier_callback(struct rcu_head *rhp)
2973 {
2974 	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
2975 	struct rcu_state *rsp = rdp->rsp;
2976 
2977 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
2978 		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
2979 		complete(&rsp->barrier_completion);
2980 	} else {
2981 		_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
2982 	}
2983 }
2984 
2985 /*
2986  * Called with preemption disabled, and from cross-cpu IRQ context.
2987  */
2988 static void rcu_barrier_func(void *type)
2989 {
2990 	struct rcu_state *rsp = type;
2991 	struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2992 
2993 	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
2994 	atomic_inc(&rsp->barrier_cpu_count);
2995 	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
2996 }
2997 
2998 /*
2999  * Orchestrate the specified type of RCU barrier, waiting for all
3000  * RCU callbacks of the specified type to complete.
3001  */
3002 static void _rcu_barrier(struct rcu_state *rsp)
3003 {
3004 	int cpu;
3005 	struct rcu_data *rdp;
3006 	unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
3007 	unsigned long snap_done;
3008 
3009 	_rcu_barrier_trace(rsp, "Begin", -1, snap);
3010 
3011 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3012 	mutex_lock(&rsp->barrier_mutex);
3013 
3014 	/*
3015 	 * Ensure that all prior references, including to ->n_barrier_done,
3016 	 * are ordered before the _rcu_barrier() machinery.
3017 	 */
3018 	smp_mb();  /* See above block comment. */
3019 
3020 	/*
3021 	 * Recheck ->n_barrier_done to see if others did our work for us.
3022 	 * This means checking ->n_barrier_done for an even-to-odd-to-even
3023 	 * transition.  The "if" expression below therefore rounds the old
3024 	 * value up to the next even number and adds two before comparing.
3025 	 */
3026 	snap_done = rsp->n_barrier_done;
3027 	_rcu_barrier_trace(rsp, "Check", -1, snap_done);
3028 
3029 	/*
3030 	 * If the value in snap is odd, we needed to wait for the current
3031 	 * rcu_barrier() to complete, then wait for the next one, in other
3032 	 * words, we need the value of snap_done to be three larger than
3033 	 * the value of snap.  On the other hand, if the value in snap is
3034 	 * even, we only had to wait for the next rcu_barrier() to complete,
3035 	 * in other words, we need the value of snap_done to be only two
3036 	 * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
3037 	 * this for us (thank you, Linus!).
3038 	 */
3039 	if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3040 		_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3041 		smp_mb(); /* caller's subsequent code after above check. */
3042 		mutex_unlock(&rsp->barrier_mutex);
3043 		return;
3044 	}
3045 
3046 	/*
3047 	 * Increment ->n_barrier_done to avoid duplicate work.  Use
3048 	 * ACCESS_ONCE() to prevent the compiler from speculating
3049 	 * the increment to precede the early-exit check.
3050 	 */
3051 	ACCESS_ONCE(rsp->n_barrier_done)++;
3052 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3053 	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3054 	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
3055 
3056 	/*
3057 	 * Initialize the count to one rather than to zero in order to
3058 	 * avoid a too-soon return to zero in case of a short grace period
3059 	 * (or preemption of this task).  Exclude CPU-hotplug operations
3060 	 * to ensure that no offline CPU has callbacks queued.
3061 	 */
3062 	init_completion(&rsp->barrier_completion);
3063 	atomic_set(&rsp->barrier_cpu_count, 1);
3064 	get_online_cpus();
3065 
3066 	/*
3067 	 * Force each CPU with callbacks to register a new callback.
3068 	 * When that callback is invoked, we will know that all of the
3069 	 * corresponding CPU's preceding callbacks have been invoked.
3070 	 */
3071 	for_each_possible_cpu(cpu) {
3072 		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3073 			continue;
3074 		rdp = per_cpu_ptr(rsp->rda, cpu);
3075 		if (rcu_is_nocb_cpu(cpu)) {
3076 			_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3077 					   rsp->n_barrier_done);
3078 			atomic_inc(&rsp->barrier_cpu_count);
3079 			__call_rcu(&rdp->barrier_head, rcu_barrier_callback,
3080 				   rsp, cpu, 0);
3081 		} else if (ACCESS_ONCE(rdp->qlen)) {
3082 			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
3083 					   rsp->n_barrier_done);
3084 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3085 		} else {
3086 			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3087 					   rsp->n_barrier_done);
3088 		}
3089 	}
3090 	put_online_cpus();
3091 
3092 	/*
3093 	 * Now that we have an rcu_barrier_callback() callback on each
3094 	 * CPU, and thus each counted, remove the initial count.
3095 	 */
3096 	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3097 		complete(&rsp->barrier_completion);
3098 
3099 	/* Increment ->n_barrier_done to prevent duplicate work. */
3100 	smp_mb(); /* Keep increment after above mechanism. */
3101 	ACCESS_ONCE(rsp->n_barrier_done)++;
3102 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3103 	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3104 	smp_mb(); /* Keep increment before caller's subsequent code. */
3105 
3106 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3107 	wait_for_completion(&rsp->barrier_completion);
3108 
3109 	/* Other rcu_barrier() invocations can now safely proceed. */
3110 	mutex_unlock(&rsp->barrier_mutex);
3111 }
3112 
3113 /**
3114  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3115  */
3116 void rcu_barrier_bh(void)
3117 {
3118 	_rcu_barrier(&rcu_bh_state);
3119 }
3120 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3121 
3122 /**
3123  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3124  */
3125 void rcu_barrier_sched(void)
3126 {
3127 	_rcu_barrier(&rcu_sched_state);
3128 }
3129 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3130 
3131 /*
3132  * Do boot-time initialization of a CPU's per-CPU RCU data.
3133  */
3134 static void __init
3135 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3136 {
3137 	unsigned long flags;
3138 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3139 	struct rcu_node *rnp = rcu_get_root(rsp);
3140 
3141 	/* Set up local state, ensuring consistent view of global state. */
3142 	raw_spin_lock_irqsave(&rnp->lock, flags);
3143 	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3144 	init_callback_list(rdp);
3145 	rdp->qlen_lazy = 0;
3146 	ACCESS_ONCE(rdp->qlen) = 0;
3147 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3148 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3149 	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
3150 	rdp->cpu = cpu;
3151 	rdp->rsp = rsp;
3152 	rcu_boot_init_nocb_percpu_data(rdp);
3153 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
3154 }
3155 
3156 /*
3157  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
3158  * offline event can be happening at a given time.  Note also that we
3159  * can accept some slop in the rsp->completed access due to the fact
3160  * that this CPU cannot possibly have any RCU callbacks in flight yet.
3161  */
3162 static void
3163 rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
3164 {
3165 	unsigned long flags;
3166 	unsigned long mask;
3167 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3168 	struct rcu_node *rnp = rcu_get_root(rsp);
3169 
3170 	/* Exclude new grace periods. */
3171 	mutex_lock(&rsp->onoff_mutex);
3172 
3173 	/* Set up local state, ensuring consistent view of global state. */
3174 	raw_spin_lock_irqsave(&rnp->lock, flags);
3175 	rdp->beenonline = 1;	 /* We have now been online. */
3176 	rdp->preemptible = preemptible;
3177 	rdp->qlen_last_fqs_check = 0;
3178 	rdp->n_force_qs_snap = rsp->n_force_qs;
3179 	rdp->blimit = blimit;
3180 	init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
3181 	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3182 	rcu_sysidle_init_percpu_data(rdp->dynticks);
3183 	atomic_set(&rdp->dynticks->dynticks,
3184 		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3185 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
3186 
3187 	/* Add CPU to rcu_node bitmasks. */
3188 	rnp = rdp->mynode;
3189 	mask = rdp->grpmask;
3190 	do {
3191 		/* Exclude any attempts to start a new GP on small systems. */
3192 		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
3193 		rnp->qsmaskinit |= mask;
3194 		mask = rnp->grpmask;
3195 		if (rnp == rdp->mynode) {
3196 			/*
3197 			 * If there is a grace period in progress, we will
3198 			 * set up to wait for it next time we run the
3199 			 * RCU core code.
3200 			 */
3201 			rdp->gpnum = rnp->completed;
3202 			rdp->completed = rnp->completed;
3203 			rdp->passed_quiesce = 0;
3204 			rdp->qs_pending = 0;
3205 			trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3206 		}
3207 		raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
3208 		rnp = rnp->parent;
3209 	} while (rnp != NULL && !(rnp->qsmaskinit & mask));
3210 	local_irq_restore(flags);
3211 
3212 	mutex_unlock(&rsp->onoff_mutex);
3213 }
3214 
3215 static void rcu_prepare_cpu(int cpu)
3216 {
3217 	struct rcu_state *rsp;
3218 
3219 	for_each_rcu_flavor(rsp)
3220 		rcu_init_percpu_data(cpu, rsp,
3221 				     strcmp(rsp->name, "rcu_preempt") == 0);
3222 }
3223 
3224 /*
3225  * Handle CPU online/offline notification events.
3226  */
3227 static int rcu_cpu_notify(struct notifier_block *self,
3228 				    unsigned long action, void *hcpu)
3229 {
3230 	long cpu = (long)hcpu;
3231 	struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
3232 	struct rcu_node *rnp = rdp->mynode;
3233 	struct rcu_state *rsp;
3234 
3235 	trace_rcu_utilization(TPS("Start CPU hotplug"));
3236 	switch (action) {
3237 	case CPU_UP_PREPARE:
3238 	case CPU_UP_PREPARE_FROZEN:
3239 		rcu_prepare_cpu(cpu);
3240 		rcu_prepare_kthreads(cpu);
3241 		break;
3242 	case CPU_ONLINE:
3243 	case CPU_DOWN_FAILED:
3244 		rcu_boost_kthread_setaffinity(rnp, -1);
3245 		break;
3246 	case CPU_DOWN_PREPARE:
3247 		rcu_boost_kthread_setaffinity(rnp, cpu);
3248 		break;
3249 	case CPU_DYING:
3250 	case CPU_DYING_FROZEN:
3251 		for_each_rcu_flavor(rsp)
3252 			rcu_cleanup_dying_cpu(rsp);
3253 		break;
3254 	case CPU_DEAD:
3255 	case CPU_DEAD_FROZEN:
3256 	case CPU_UP_CANCELED:
3257 	case CPU_UP_CANCELED_FROZEN:
3258 		for_each_rcu_flavor(rsp)
3259 			rcu_cleanup_dead_cpu(cpu, rsp);
3260 		break;
3261 	default:
3262 		break;
3263 	}
3264 	trace_rcu_utilization(TPS("End CPU hotplug"));
3265 	return NOTIFY_OK;
3266 }
3267 
3268 static int rcu_pm_notify(struct notifier_block *self,
3269 			 unsigned long action, void *hcpu)
3270 {
3271 	switch (action) {
3272 	case PM_HIBERNATION_PREPARE:
3273 	case PM_SUSPEND_PREPARE:
3274 		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3275 			rcu_expedited = 1;
3276 		break;
3277 	case PM_POST_HIBERNATION:
3278 	case PM_POST_SUSPEND:
3279 		rcu_expedited = 0;
3280 		break;
3281 	default:
3282 		break;
3283 	}
3284 	return NOTIFY_OK;
3285 }
3286 
3287 /*
3288  * Spawn the kthread that handles this RCU flavor's grace periods.
3289  */
3290 static int __init rcu_spawn_gp_kthread(void)
3291 {
3292 	unsigned long flags;
3293 	struct rcu_node *rnp;
3294 	struct rcu_state *rsp;
3295 	struct task_struct *t;
3296 
3297 	for_each_rcu_flavor(rsp) {
3298 		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3299 		BUG_ON(IS_ERR(t));
3300 		rnp = rcu_get_root(rsp);
3301 		raw_spin_lock_irqsave(&rnp->lock, flags);
3302 		rsp->gp_kthread = t;
3303 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
3304 		rcu_spawn_nocb_kthreads(rsp);
3305 	}
3306 	return 0;
3307 }
3308 early_initcall(rcu_spawn_gp_kthread);
3309 
3310 /*
3311  * This function is invoked towards the end of the scheduler's initialization
3312  * process.  Before this is called, the idle task might contain
3313  * RCU read-side critical sections (during which time, this idle
3314  * task is booting the system).  After this function is called, the
3315  * idle tasks are prohibited from containing RCU read-side critical
3316  * sections.  This function also enables RCU lockdep checking.
3317  */
3318 void rcu_scheduler_starting(void)
3319 {
3320 	WARN_ON(num_online_cpus() != 1);
3321 	WARN_ON(nr_context_switches() > 0);
3322 	rcu_scheduler_active = 1;
3323 }
3324 
3325 /*
3326  * Compute the per-level fanout, either using the exact fanout specified
3327  * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3328  */
3329 #ifdef CONFIG_RCU_FANOUT_EXACT
3330 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3331 {
3332 	int i;
3333 
3334 	rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3335 	for (i = rcu_num_lvls - 2; i >= 0; i--)
3336 		rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3337 }
3338 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3339 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3340 {
3341 	int ccur;
3342 	int cprv;
3343 	int i;
3344 
3345 	cprv = nr_cpu_ids;
3346 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3347 		ccur = rsp->levelcnt[i];
3348 		rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3349 		cprv = ccur;
3350 	}
3351 }
3352 #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3353 
3354 /*
3355  * Helper function for rcu_init() that initializes one rcu_state structure.
3356  */
3357 static void __init rcu_init_one(struct rcu_state *rsp,
3358 		struct rcu_data __percpu *rda)
3359 {
3360 	static char *buf[] = { "rcu_node_0",
3361 			       "rcu_node_1",
3362 			       "rcu_node_2",
3363 			       "rcu_node_3" };  /* Match MAX_RCU_LVLS */
3364 	static char *fqs[] = { "rcu_node_fqs_0",
3365 			       "rcu_node_fqs_1",
3366 			       "rcu_node_fqs_2",
3367 			       "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
3368 	int cpustride = 1;
3369 	int i;
3370 	int j;
3371 	struct rcu_node *rnp;
3372 
3373 	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3374 
3375 	/* Silence gcc 4.8 warning about array index out of range. */
3376 	if (rcu_num_lvls > RCU_NUM_LVLS)
3377 		panic("rcu_init_one: rcu_num_lvls overflow");
3378 
3379 	/* Initialize the level-tracking arrays. */
3380 
3381 	for (i = 0; i < rcu_num_lvls; i++)
3382 		rsp->levelcnt[i] = num_rcu_lvl[i];
3383 	for (i = 1; i < rcu_num_lvls; i++)
3384 		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3385 	rcu_init_levelspread(rsp);
3386 
3387 	/* Initialize the elements themselves, starting from the leaves. */
3388 
3389 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3390 		cpustride *= rsp->levelspread[i];
3391 		rnp = rsp->level[i];
3392 		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3393 			raw_spin_lock_init(&rnp->lock);
3394 			lockdep_set_class_and_name(&rnp->lock,
3395 						   &rcu_node_class[i], buf[i]);
3396 			raw_spin_lock_init(&rnp->fqslock);
3397 			lockdep_set_class_and_name(&rnp->fqslock,
3398 						   &rcu_fqs_class[i], fqs[i]);
3399 			rnp->gpnum = rsp->gpnum;
3400 			rnp->completed = rsp->completed;
3401 			rnp->qsmask = 0;
3402 			rnp->qsmaskinit = 0;
3403 			rnp->grplo = j * cpustride;
3404 			rnp->grphi = (j + 1) * cpustride - 1;
3405 			if (rnp->grphi >= NR_CPUS)
3406 				rnp->grphi = NR_CPUS - 1;
3407 			if (i == 0) {
3408 				rnp->grpnum = 0;
3409 				rnp->grpmask = 0;
3410 				rnp->parent = NULL;
3411 			} else {
3412 				rnp->grpnum = j % rsp->levelspread[i - 1];
3413 				rnp->grpmask = 1UL << rnp->grpnum;
3414 				rnp->parent = rsp->level[i - 1] +
3415 					      j / rsp->levelspread[i - 1];
3416 			}
3417 			rnp->level = i;
3418 			INIT_LIST_HEAD(&rnp->blkd_tasks);
3419 			rcu_init_one_nocb(rnp);
3420 		}
3421 	}
3422 
3423 	rsp->rda = rda;
3424 	init_waitqueue_head(&rsp->gp_wq);
3425 	init_irq_work(&rsp->wakeup_work, rsp_wakeup);
3426 	rnp = rsp->level[rcu_num_lvls - 1];
3427 	for_each_possible_cpu(i) {
3428 		while (i > rnp->grphi)
3429 			rnp++;
3430 		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3431 		rcu_boot_init_percpu_data(i, rsp);
3432 	}
3433 	list_add(&rsp->flavors, &rcu_struct_flavors);
3434 }
3435 
3436 /*
3437  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3438  * replace the definitions in tree.h because those are needed to size
3439  * the ->node array in the rcu_state structure.
3440  */
3441 static void __init rcu_init_geometry(void)
3442 {
3443 	ulong d;
3444 	int i;
3445 	int j;
3446 	int n = nr_cpu_ids;
3447 	int rcu_capacity[MAX_RCU_LVLS + 1];
3448 
3449 	/*
3450 	 * Initialize any unspecified boot parameters.
3451 	 * The default values of jiffies_till_first_fqs and
3452 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3453 	 * value, which is a function of HZ, then adding one for each
3454 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3455 	 */
3456 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3457 	if (jiffies_till_first_fqs == ULONG_MAX)
3458 		jiffies_till_first_fqs = d;
3459 	if (jiffies_till_next_fqs == ULONG_MAX)
3460 		jiffies_till_next_fqs = d;
3461 
3462 	/* If the compile-time values are accurate, just leave. */
3463 	if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3464 	    nr_cpu_ids == NR_CPUS)
3465 		return;
3466 	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
3467 		rcu_fanout_leaf, nr_cpu_ids);
3468 
3469 	/*
3470 	 * Compute number of nodes that can be handled an rcu_node tree
3471 	 * with the given number of levels.  Setting rcu_capacity[0] makes
3472 	 * some of the arithmetic easier.
3473 	 */
3474 	rcu_capacity[0] = 1;
3475 	rcu_capacity[1] = rcu_fanout_leaf;
3476 	for (i = 2; i <= MAX_RCU_LVLS; i++)
3477 		rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
3478 
3479 	/*
3480 	 * The boot-time rcu_fanout_leaf parameter is only permitted
3481 	 * to increase the leaf-level fanout, not decrease it.  Of course,
3482 	 * the leaf-level fanout cannot exceed the number of bits in
3483 	 * the rcu_node masks.  Finally, the tree must be able to accommodate
3484 	 * the configured number of CPUs.  Complain and fall back to the
3485 	 * compile-time values if these limits are exceeded.
3486 	 */
3487 	if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
3488 	    rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
3489 	    n > rcu_capacity[MAX_RCU_LVLS]) {
3490 		WARN_ON(1);
3491 		return;
3492 	}
3493 
3494 	/* Calculate the number of rcu_nodes at each level of the tree. */
3495 	for (i = 1; i <= MAX_RCU_LVLS; i++)
3496 		if (n <= rcu_capacity[i]) {
3497 			for (j = 0; j <= i; j++)
3498 				num_rcu_lvl[j] =
3499 					DIV_ROUND_UP(n, rcu_capacity[i - j]);
3500 			rcu_num_lvls = i;
3501 			for (j = i + 1; j <= MAX_RCU_LVLS; j++)
3502 				num_rcu_lvl[j] = 0;
3503 			break;
3504 		}
3505 
3506 	/* Calculate the total number of rcu_node structures. */
3507 	rcu_num_nodes = 0;
3508 	for (i = 0; i <= MAX_RCU_LVLS; i++)
3509 		rcu_num_nodes += num_rcu_lvl[i];
3510 	rcu_num_nodes -= n;
3511 }
3512 
3513 void __init rcu_init(void)
3514 {
3515 	int cpu;
3516 
3517 	rcu_bootup_announce();
3518 	rcu_init_geometry();
3519 	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3520 	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3521 	__rcu_init_preempt();
3522 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3523 
3524 	/*
3525 	 * We don't need protection against CPU-hotplug here because
3526 	 * this is called early in boot, before either interrupts
3527 	 * or the scheduler are operational.
3528 	 */
3529 	cpu_notifier(rcu_cpu_notify, 0);
3530 	pm_notifier(rcu_pm_notify, 0);
3531 	for_each_online_cpu(cpu)
3532 		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3533 }
3534 
3535 #include "tree_plugin.h"
3536