xref: /openbmc/linux/kernel/rcu/tree.c (revision af958a38)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23  *
24  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26  *
27  * For detailed explanation of Read-Copy Update mechanism see -
28  *	Documentation/RCU
29  */
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp.h>
35 #include <linux/rcupdate.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/nmi.h>
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/export.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/module.h>
45 #include <linux/percpu.h>
46 #include <linux/notifier.h>
47 #include <linux/cpu.h>
48 #include <linux/mutex.h>
49 #include <linux/time.h>
50 #include <linux/kernel_stat.h>
51 #include <linux/wait.h>
52 #include <linux/kthread.h>
53 #include <linux/prefetch.h>
54 #include <linux/delay.h>
55 #include <linux/stop_machine.h>
56 #include <linux/random.h>
57 #include <linux/ftrace_event.h>
58 #include <linux/suspend.h>
59 
60 #include "tree.h"
61 #include "rcu.h"
62 
63 MODULE_ALIAS("rcutree");
64 #ifdef MODULE_PARAM_PREFIX
65 #undef MODULE_PARAM_PREFIX
66 #endif
67 #define MODULE_PARAM_PREFIX "rcutree."
68 
69 /* Data structures. */
70 
71 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73 
74 /*
75  * In order to export the rcu_state name to the tracing tools, it
76  * needs to be added in the __tracepoint_string section.
77  * This requires defining a separate variable tp_<sname>_varname
78  * that points to the string being used, and this will allow
79  * the tracing userspace tools to be able to decipher the string
80  * address to the matching string.
81  */
82 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
83 static char sname##_varname[] = #sname; \
84 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
85 struct rcu_state sname##_state = { \
86 	.level = { &sname##_state.node[0] }, \
87 	.call = cr, \
88 	.fqs_state = RCU_GP_IDLE, \
89 	.gpnum = 0UL - 300UL, \
90 	.completed = 0UL - 300UL, \
91 	.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
92 	.orphan_nxttail = &sname##_state.orphan_nxtlist, \
93 	.orphan_donetail = &sname##_state.orphan_donelist, \
94 	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
95 	.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
96 	.name = sname##_varname, \
97 	.abbr = sabbr, \
98 }; \
99 DEFINE_PER_CPU(struct rcu_data, sname##_data)
100 
101 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
102 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
103 
104 static struct rcu_state *rcu_state_p;
105 LIST_HEAD(rcu_struct_flavors);
106 
107 /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
108 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
109 module_param(rcu_fanout_leaf, int, 0444);
110 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
111 static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
112 	NUM_RCU_LVL_0,
113 	NUM_RCU_LVL_1,
114 	NUM_RCU_LVL_2,
115 	NUM_RCU_LVL_3,
116 	NUM_RCU_LVL_4,
117 };
118 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
119 
120 /*
121  * The rcu_scheduler_active variable transitions from zero to one just
122  * before the first task is spawned.  So when this variable is zero, RCU
123  * can assume that there is but one task, allowing RCU to (for example)
124  * optimize synchronize_sched() to a simple barrier().  When this variable
125  * is one, RCU must actually do all the hard work required to detect real
126  * grace periods.  This variable is also used to suppress boot-time false
127  * positives from lockdep-RCU error checking.
128  */
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
131 
132 /*
133  * The rcu_scheduler_fully_active variable transitions from zero to one
134  * during the early_initcall() processing, which is after the scheduler
135  * is capable of creating new tasks.  So RCU processing (for example,
136  * creating tasks for RCU priority boosting) must be delayed until after
137  * rcu_scheduler_fully_active transitions from zero to one.  We also
138  * currently delay invocation of any RCU callbacks until after this point.
139  *
140  * It might later prove better for people registering RCU callbacks during
141  * early boot to take responsibility for these callbacks, but one step at
142  * a time.
143  */
144 static int rcu_scheduler_fully_active __read_mostly;
145 
146 #ifdef CONFIG_RCU_BOOST
147 
148 /*
149  * Control variables for per-CPU and per-rcu_node kthreads.  These
150  * handle all flavors of RCU.
151  */
152 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
153 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
154 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
155 DEFINE_PER_CPU(char, rcu_cpu_has_work);
156 
157 #endif /* #ifdef CONFIG_RCU_BOOST */
158 
159 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
160 static void invoke_rcu_core(void);
161 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
162 
163 /*
164  * Track the rcutorture test sequence number and the update version
165  * number within a given test.  The rcutorture_testseq is incremented
166  * on every rcutorture module load and unload, so has an odd value
167  * when a test is running.  The rcutorture_vernum is set to zero
168  * when rcutorture starts and is incremented on each rcutorture update.
169  * These variables enable correlating rcutorture output with the
170  * RCU tracing information.
171  */
172 unsigned long rcutorture_testseq;
173 unsigned long rcutorture_vernum;
174 
175 /*
176  * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
177  * permit this function to be invoked without holding the root rcu_node
178  * structure's ->lock, but of course results can be subject to change.
179  */
180 static int rcu_gp_in_progress(struct rcu_state *rsp)
181 {
182 	return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
183 }
184 
185 /*
186  * Note a quiescent state.  Because we do not need to know
187  * how many quiescent states passed, just if there was at least
188  * one since the start of the grace period, this just sets a flag.
189  * The caller must have disabled preemption.
190  */
191 void rcu_sched_qs(int cpu)
192 {
193 	struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
194 
195 	if (rdp->passed_quiesce == 0)
196 		trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
197 	rdp->passed_quiesce = 1;
198 }
199 
200 void rcu_bh_qs(int cpu)
201 {
202 	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
203 
204 	if (rdp->passed_quiesce == 0)
205 		trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
206 	rdp->passed_quiesce = 1;
207 }
208 
209 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
210 
211 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212 	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
213 	.dynticks = ATOMIC_INIT(1),
214 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
215 	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
216 	.dynticks_idle = ATOMIC_INIT(1),
217 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
218 };
219 
220 /*
221  * Let the RCU core know that this CPU has gone through the scheduler,
222  * which is a quiescent state.  This is called when the need for a
223  * quiescent state is urgent, so we burn an atomic operation and full
224  * memory barriers to let the RCU core know about it, regardless of what
225  * this CPU might (or might not) do in the near future.
226  *
227  * We inform the RCU core by emulating a zero-duration dyntick-idle
228  * period, which we in turn do by incrementing the ->dynticks counter
229  * by two.
230  */
231 static void rcu_momentary_dyntick_idle(void)
232 {
233 	unsigned long flags;
234 	struct rcu_data *rdp;
235 	struct rcu_dynticks *rdtp;
236 	int resched_mask;
237 	struct rcu_state *rsp;
238 
239 	local_irq_save(flags);
240 
241 	/*
242 	 * Yes, we can lose flag-setting operations.  This is OK, because
243 	 * the flag will be set again after some delay.
244 	 */
245 	resched_mask = raw_cpu_read(rcu_sched_qs_mask);
246 	raw_cpu_write(rcu_sched_qs_mask, 0);
247 
248 	/* Find the flavor that needs a quiescent state. */
249 	for_each_rcu_flavor(rsp) {
250 		rdp = raw_cpu_ptr(rsp->rda);
251 		if (!(resched_mask & rsp->flavor_mask))
252 			continue;
253 		smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
254 		if (ACCESS_ONCE(rdp->mynode->completed) !=
255 		    ACCESS_ONCE(rdp->cond_resched_completed))
256 			continue;
257 
258 		/*
259 		 * Pretend to be momentarily idle for the quiescent state.
260 		 * This allows the grace-period kthread to record the
261 		 * quiescent state, with no need for this CPU to do anything
262 		 * further.
263 		 */
264 		rdtp = this_cpu_ptr(&rcu_dynticks);
265 		smp_mb__before_atomic(); /* Earlier stuff before QS. */
266 		atomic_add(2, &rdtp->dynticks);  /* QS. */
267 		smp_mb__after_atomic(); /* Later stuff after QS. */
268 		break;
269 	}
270 	local_irq_restore(flags);
271 }
272 
273 /*
274  * Note a context switch.  This is a quiescent state for RCU-sched,
275  * and requires special handling for preemptible RCU.
276  * The caller must have disabled preemption.
277  */
278 void rcu_note_context_switch(int cpu)
279 {
280 	trace_rcu_utilization(TPS("Start context switch"));
281 	rcu_sched_qs(cpu);
282 	rcu_preempt_note_context_switch(cpu);
283 	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 		rcu_momentary_dyntick_idle();
285 	trace_rcu_utilization(TPS("End context switch"));
286 }
287 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
288 
289 static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
290 static long qhimark = 10000;	/* If this many pending, ignore blimit. */
291 static long qlowmark = 100;	/* Once only this many pending, use blimit. */
292 
293 module_param(blimit, long, 0444);
294 module_param(qhimark, long, 0444);
295 module_param(qlowmark, long, 0444);
296 
297 static ulong jiffies_till_first_fqs = ULONG_MAX;
298 static ulong jiffies_till_next_fqs = ULONG_MAX;
299 
300 module_param(jiffies_till_first_fqs, ulong, 0644);
301 module_param(jiffies_till_next_fqs, ulong, 0644);
302 
303 /*
304  * How long the grace period must be before we start recruiting
305  * quiescent-state help from rcu_note_context_switch().
306  */
307 static ulong jiffies_till_sched_qs = HZ / 20;
308 module_param(jiffies_till_sched_qs, ulong, 0644);
309 
310 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
311 				  struct rcu_data *rdp);
312 static void force_qs_rnp(struct rcu_state *rsp,
313 			 int (*f)(struct rcu_data *rsp, bool *isidle,
314 				  unsigned long *maxj),
315 			 bool *isidle, unsigned long *maxj);
316 static void force_quiescent_state(struct rcu_state *rsp);
317 static int rcu_pending(int cpu);
318 
319 /*
320  * Return the number of RCU-sched batches processed thus far for debug & stats.
321  */
322 long rcu_batches_completed_sched(void)
323 {
324 	return rcu_sched_state.completed;
325 }
326 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
327 
328 /*
329  * Return the number of RCU BH batches processed thus far for debug & stats.
330  */
331 long rcu_batches_completed_bh(void)
332 {
333 	return rcu_bh_state.completed;
334 }
335 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
336 
337 /*
338  * Force a quiescent state.
339  */
340 void rcu_force_quiescent_state(void)
341 {
342 	force_quiescent_state(rcu_state_p);
343 }
344 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
345 
346 /*
347  * Force a quiescent state for RCU BH.
348  */
349 void rcu_bh_force_quiescent_state(void)
350 {
351 	force_quiescent_state(&rcu_bh_state);
352 }
353 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
354 
355 /*
356  * Show the state of the grace-period kthreads.
357  */
358 void show_rcu_gp_kthreads(void)
359 {
360 	struct rcu_state *rsp;
361 
362 	for_each_rcu_flavor(rsp) {
363 		pr_info("%s: wait state: %d ->state: %#lx\n",
364 			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
365 		/* sched_show_task(rsp->gp_kthread); */
366 	}
367 }
368 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
369 
370 /*
371  * Record the number of times rcutorture tests have been initiated and
372  * terminated.  This information allows the debugfs tracing stats to be
373  * correlated to the rcutorture messages, even when the rcutorture module
374  * is being repeatedly loaded and unloaded.  In other words, we cannot
375  * store this state in rcutorture itself.
376  */
377 void rcutorture_record_test_transition(void)
378 {
379 	rcutorture_testseq++;
380 	rcutorture_vernum = 0;
381 }
382 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
383 
384 /*
385  * Send along grace-period-related data for rcutorture diagnostics.
386  */
387 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
388 			    unsigned long *gpnum, unsigned long *completed)
389 {
390 	struct rcu_state *rsp = NULL;
391 
392 	switch (test_type) {
393 	case RCU_FLAVOR:
394 		rsp = rcu_state_p;
395 		break;
396 	case RCU_BH_FLAVOR:
397 		rsp = &rcu_bh_state;
398 		break;
399 	case RCU_SCHED_FLAVOR:
400 		rsp = &rcu_sched_state;
401 		break;
402 	default:
403 		break;
404 	}
405 	if (rsp != NULL) {
406 		*flags = ACCESS_ONCE(rsp->gp_flags);
407 		*gpnum = ACCESS_ONCE(rsp->gpnum);
408 		*completed = ACCESS_ONCE(rsp->completed);
409 		return;
410 	}
411 	*flags = 0;
412 	*gpnum = 0;
413 	*completed = 0;
414 }
415 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
416 
417 /*
418  * Record the number of writer passes through the current rcutorture test.
419  * This is also used to correlate debugfs tracing stats with the rcutorture
420  * messages.
421  */
422 void rcutorture_record_progress(unsigned long vernum)
423 {
424 	rcutorture_vernum++;
425 }
426 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
427 
428 /*
429  * Force a quiescent state for RCU-sched.
430  */
431 void rcu_sched_force_quiescent_state(void)
432 {
433 	force_quiescent_state(&rcu_sched_state);
434 }
435 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
436 
437 /*
438  * Does the CPU have callbacks ready to be invoked?
439  */
440 static int
441 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
442 {
443 	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
444 	       rdp->nxttail[RCU_DONE_TAIL] != NULL;
445 }
446 
447 /*
448  * Return the root node of the specified rcu_state structure.
449  */
450 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
451 {
452 	return &rsp->node[0];
453 }
454 
455 /*
456  * Is there any need for future grace periods?
457  * Interrupts must be disabled.  If the caller does not hold the root
458  * rnp_node structure's ->lock, the results are advisory only.
459  */
460 static int rcu_future_needs_gp(struct rcu_state *rsp)
461 {
462 	struct rcu_node *rnp = rcu_get_root(rsp);
463 	int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1;
464 	int *fp = &rnp->need_future_gp[idx];
465 
466 	return ACCESS_ONCE(*fp);
467 }
468 
469 /*
470  * Does the current CPU require a not-yet-started grace period?
471  * The caller must have disabled interrupts to prevent races with
472  * normal callback registry.
473  */
474 static int
475 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
476 {
477 	int i;
478 
479 	if (rcu_gp_in_progress(rsp))
480 		return 0;  /* No, a grace period is already in progress. */
481 	if (rcu_future_needs_gp(rsp))
482 		return 1;  /* Yes, a no-CBs CPU needs one. */
483 	if (!rdp->nxttail[RCU_NEXT_TAIL])
484 		return 0;  /* No, this is a no-CBs (or offline) CPU. */
485 	if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
486 		return 1;  /* Yes, this CPU has newly registered callbacks. */
487 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
488 		if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
489 		    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
490 				 rdp->nxtcompleted[i]))
491 			return 1;  /* Yes, CBs for future grace period. */
492 	return 0; /* No grace period needed. */
493 }
494 
495 /*
496  * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
497  *
498  * If the new value of the ->dynticks_nesting counter now is zero,
499  * we really have entered idle, and must do the appropriate accounting.
500  * The caller must have disabled interrupts.
501  */
502 static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
503 				bool user)
504 {
505 	struct rcu_state *rsp;
506 	struct rcu_data *rdp;
507 
508 	trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
509 	if (!user && !is_idle_task(current)) {
510 		struct task_struct *idle __maybe_unused =
511 			idle_task(smp_processor_id());
512 
513 		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
514 		ftrace_dump(DUMP_ORIG);
515 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
516 			  current->pid, current->comm,
517 			  idle->pid, idle->comm); /* must be idle task! */
518 	}
519 	for_each_rcu_flavor(rsp) {
520 		rdp = this_cpu_ptr(rsp->rda);
521 		do_nocb_deferred_wakeup(rdp);
522 	}
523 	rcu_prepare_for_idle(smp_processor_id());
524 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
525 	smp_mb__before_atomic();  /* See above. */
526 	atomic_inc(&rdtp->dynticks);
527 	smp_mb__after_atomic();  /* Force ordering with next sojourn. */
528 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
529 
530 	/*
531 	 * It is illegal to enter an extended quiescent state while
532 	 * in an RCU read-side critical section.
533 	 */
534 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
535 			   "Illegal idle entry in RCU read-side critical section.");
536 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
537 			   "Illegal idle entry in RCU-bh read-side critical section.");
538 	rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
539 			   "Illegal idle entry in RCU-sched read-side critical section.");
540 }
541 
542 /*
543  * Enter an RCU extended quiescent state, which can be either the
544  * idle loop or adaptive-tickless usermode execution.
545  */
546 static void rcu_eqs_enter(bool user)
547 {
548 	long long oldval;
549 	struct rcu_dynticks *rdtp;
550 
551 	rdtp = this_cpu_ptr(&rcu_dynticks);
552 	oldval = rdtp->dynticks_nesting;
553 	WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
554 	if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
555 		rdtp->dynticks_nesting = 0;
556 		rcu_eqs_enter_common(rdtp, oldval, user);
557 	} else {
558 		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
559 	}
560 }
561 
562 /**
563  * rcu_idle_enter - inform RCU that current CPU is entering idle
564  *
565  * Enter idle mode, in other words, -leave- the mode in which RCU
566  * read-side critical sections can occur.  (Though RCU read-side
567  * critical sections can occur in irq handlers in idle, a possibility
568  * handled by irq_enter() and irq_exit().)
569  *
570  * We crowbar the ->dynticks_nesting field to zero to allow for
571  * the possibility of usermode upcalls having messed up our count
572  * of interrupt nesting level during the prior busy period.
573  */
574 void rcu_idle_enter(void)
575 {
576 	unsigned long flags;
577 
578 	local_irq_save(flags);
579 	rcu_eqs_enter(false);
580 	rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
581 	local_irq_restore(flags);
582 }
583 EXPORT_SYMBOL_GPL(rcu_idle_enter);
584 
585 #ifdef CONFIG_RCU_USER_QS
586 /**
587  * rcu_user_enter - inform RCU that we are resuming userspace.
588  *
589  * Enter RCU idle mode right before resuming userspace.  No use of RCU
590  * is permitted between this call and rcu_user_exit(). This way the
591  * CPU doesn't need to maintain the tick for RCU maintenance purposes
592  * when the CPU runs in userspace.
593  */
594 void rcu_user_enter(void)
595 {
596 	rcu_eqs_enter(1);
597 }
598 #endif /* CONFIG_RCU_USER_QS */
599 
600 /**
601  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
602  *
603  * Exit from an interrupt handler, which might possibly result in entering
604  * idle mode, in other words, leaving the mode in which read-side critical
605  * sections can occur.
606  *
607  * This code assumes that the idle loop never does anything that might
608  * result in unbalanced calls to irq_enter() and irq_exit().  If your
609  * architecture violates this assumption, RCU will give you what you
610  * deserve, good and hard.  But very infrequently and irreproducibly.
611  *
612  * Use things like work queues to work around this limitation.
613  *
614  * You have been warned.
615  */
616 void rcu_irq_exit(void)
617 {
618 	unsigned long flags;
619 	long long oldval;
620 	struct rcu_dynticks *rdtp;
621 
622 	local_irq_save(flags);
623 	rdtp = this_cpu_ptr(&rcu_dynticks);
624 	oldval = rdtp->dynticks_nesting;
625 	rdtp->dynticks_nesting--;
626 	WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
627 	if (rdtp->dynticks_nesting)
628 		trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
629 	else
630 		rcu_eqs_enter_common(rdtp, oldval, true);
631 	rcu_sysidle_enter(rdtp, 1);
632 	local_irq_restore(flags);
633 }
634 
635 /*
636  * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
637  *
638  * If the new value of the ->dynticks_nesting counter was previously zero,
639  * we really have exited idle, and must do the appropriate accounting.
640  * The caller must have disabled interrupts.
641  */
642 static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
643 			       int user)
644 {
645 	smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
646 	atomic_inc(&rdtp->dynticks);
647 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
648 	smp_mb__after_atomic();  /* See above. */
649 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
650 	rcu_cleanup_after_idle(smp_processor_id());
651 	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
652 	if (!user && !is_idle_task(current)) {
653 		struct task_struct *idle __maybe_unused =
654 			idle_task(smp_processor_id());
655 
656 		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
657 				  oldval, rdtp->dynticks_nesting);
658 		ftrace_dump(DUMP_ORIG);
659 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
660 			  current->pid, current->comm,
661 			  idle->pid, idle->comm); /* must be idle task! */
662 	}
663 }
664 
665 /*
666  * Exit an RCU extended quiescent state, which can be either the
667  * idle loop or adaptive-tickless usermode execution.
668  */
669 static void rcu_eqs_exit(bool user)
670 {
671 	struct rcu_dynticks *rdtp;
672 	long long oldval;
673 
674 	rdtp = this_cpu_ptr(&rcu_dynticks);
675 	oldval = rdtp->dynticks_nesting;
676 	WARN_ON_ONCE(oldval < 0);
677 	if (oldval & DYNTICK_TASK_NEST_MASK) {
678 		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
679 	} else {
680 		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
681 		rcu_eqs_exit_common(rdtp, oldval, user);
682 	}
683 }
684 
685 /**
686  * rcu_idle_exit - inform RCU that current CPU is leaving idle
687  *
688  * Exit idle mode, in other words, -enter- the mode in which RCU
689  * read-side critical sections can occur.
690  *
691  * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
692  * allow for the possibility of usermode upcalls messing up our count
693  * of interrupt nesting level during the busy period that is just
694  * now starting.
695  */
696 void rcu_idle_exit(void)
697 {
698 	unsigned long flags;
699 
700 	local_irq_save(flags);
701 	rcu_eqs_exit(false);
702 	rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
703 	local_irq_restore(flags);
704 }
705 EXPORT_SYMBOL_GPL(rcu_idle_exit);
706 
707 #ifdef CONFIG_RCU_USER_QS
708 /**
709  * rcu_user_exit - inform RCU that we are exiting userspace.
710  *
711  * Exit RCU idle mode while entering the kernel because it can
712  * run a RCU read side critical section anytime.
713  */
714 void rcu_user_exit(void)
715 {
716 	rcu_eqs_exit(1);
717 }
718 #endif /* CONFIG_RCU_USER_QS */
719 
720 /**
721  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
722  *
723  * Enter an interrupt handler, which might possibly result in exiting
724  * idle mode, in other words, entering the mode in which read-side critical
725  * sections can occur.
726  *
727  * Note that the Linux kernel is fully capable of entering an interrupt
728  * handler that it never exits, for example when doing upcalls to
729  * user mode!  This code assumes that the idle loop never does upcalls to
730  * user mode.  If your architecture does do upcalls from the idle loop (or
731  * does anything else that results in unbalanced calls to the irq_enter()
732  * and irq_exit() functions), RCU will give you what you deserve, good
733  * and hard.  But very infrequently and irreproducibly.
734  *
735  * Use things like work queues to work around this limitation.
736  *
737  * You have been warned.
738  */
739 void rcu_irq_enter(void)
740 {
741 	unsigned long flags;
742 	struct rcu_dynticks *rdtp;
743 	long long oldval;
744 
745 	local_irq_save(flags);
746 	rdtp = this_cpu_ptr(&rcu_dynticks);
747 	oldval = rdtp->dynticks_nesting;
748 	rdtp->dynticks_nesting++;
749 	WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
750 	if (oldval)
751 		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
752 	else
753 		rcu_eqs_exit_common(rdtp, oldval, true);
754 	rcu_sysidle_exit(rdtp, 1);
755 	local_irq_restore(flags);
756 }
757 
758 /**
759  * rcu_nmi_enter - inform RCU of entry to NMI context
760  *
761  * If the CPU was idle with dynamic ticks active, and there is no
762  * irq handler running, this updates rdtp->dynticks_nmi to let the
763  * RCU grace-period handling know that the CPU is active.
764  */
765 void rcu_nmi_enter(void)
766 {
767 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
768 
769 	if (rdtp->dynticks_nmi_nesting == 0 &&
770 	    (atomic_read(&rdtp->dynticks) & 0x1))
771 		return;
772 	rdtp->dynticks_nmi_nesting++;
773 	smp_mb__before_atomic();  /* Force delay from prior write. */
774 	atomic_inc(&rdtp->dynticks);
775 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
776 	smp_mb__after_atomic();  /* See above. */
777 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
778 }
779 
780 /**
781  * rcu_nmi_exit - inform RCU of exit from NMI context
782  *
783  * If the CPU was idle with dynamic ticks active, and there is no
784  * irq handler running, this updates rdtp->dynticks_nmi to let the
785  * RCU grace-period handling know that the CPU is no longer active.
786  */
787 void rcu_nmi_exit(void)
788 {
789 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
790 
791 	if (rdtp->dynticks_nmi_nesting == 0 ||
792 	    --rdtp->dynticks_nmi_nesting != 0)
793 		return;
794 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
795 	smp_mb__before_atomic();  /* See above. */
796 	atomic_inc(&rdtp->dynticks);
797 	smp_mb__after_atomic();  /* Force delay to next write. */
798 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
799 }
800 
801 /**
802  * __rcu_is_watching - are RCU read-side critical sections safe?
803  *
804  * Return true if RCU is watching the running CPU, which means that
805  * this CPU can safely enter RCU read-side critical sections.  Unlike
806  * rcu_is_watching(), the caller of __rcu_is_watching() must have at
807  * least disabled preemption.
808  */
809 bool notrace __rcu_is_watching(void)
810 {
811 	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
812 }
813 
814 /**
815  * rcu_is_watching - see if RCU thinks that the current CPU is idle
816  *
817  * If the current CPU is in its idle loop and is neither in an interrupt
818  * or NMI handler, return true.
819  */
820 bool notrace rcu_is_watching(void)
821 {
822 	int ret;
823 
824 	preempt_disable();
825 	ret = __rcu_is_watching();
826 	preempt_enable();
827 	return ret;
828 }
829 EXPORT_SYMBOL_GPL(rcu_is_watching);
830 
831 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
832 
833 /*
834  * Is the current CPU online?  Disable preemption to avoid false positives
835  * that could otherwise happen due to the current CPU number being sampled,
836  * this task being preempted, its old CPU being taken offline, resuming
837  * on some other CPU, then determining that its old CPU is now offline.
838  * It is OK to use RCU on an offline processor during initial boot, hence
839  * the check for rcu_scheduler_fully_active.  Note also that it is OK
840  * for a CPU coming online to use RCU for one jiffy prior to marking itself
841  * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
842  * offline to continue to use RCU for one jiffy after marking itself
843  * offline in the cpu_online_mask.  This leniency is necessary given the
844  * non-atomic nature of the online and offline processing, for example,
845  * the fact that a CPU enters the scheduler after completing the CPU_DYING
846  * notifiers.
847  *
848  * This is also why RCU internally marks CPUs online during the
849  * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
850  *
851  * Disable checking if in an NMI handler because we cannot safely report
852  * errors from NMI handlers anyway.
853  */
854 bool rcu_lockdep_current_cpu_online(void)
855 {
856 	struct rcu_data *rdp;
857 	struct rcu_node *rnp;
858 	bool ret;
859 
860 	if (in_nmi())
861 		return true;
862 	preempt_disable();
863 	rdp = this_cpu_ptr(&rcu_sched_data);
864 	rnp = rdp->mynode;
865 	ret = (rdp->grpmask & rnp->qsmaskinit) ||
866 	      !rcu_scheduler_fully_active;
867 	preempt_enable();
868 	return ret;
869 }
870 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
871 
872 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
873 
874 /**
875  * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
876  *
877  * If the current CPU is idle or running at a first-level (not nested)
878  * interrupt from idle, return true.  The caller must have at least
879  * disabled preemption.
880  */
881 static int rcu_is_cpu_rrupt_from_idle(void)
882 {
883 	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
884 }
885 
886 /*
887  * Snapshot the specified CPU's dynticks counter so that we can later
888  * credit them with an implicit quiescent state.  Return 1 if this CPU
889  * is in dynticks idle mode, which is an extended quiescent state.
890  */
891 static int dyntick_save_progress_counter(struct rcu_data *rdp,
892 					 bool *isidle, unsigned long *maxj)
893 {
894 	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
895 	rcu_sysidle_check_cpu(rdp, isidle, maxj);
896 	if ((rdp->dynticks_snap & 0x1) == 0) {
897 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
898 		return 1;
899 	} else {
900 		return 0;
901 	}
902 }
903 
904 /*
905  * This function really isn't for public consumption, but RCU is special in
906  * that context switches can allow the state machine to make progress.
907  */
908 extern void resched_cpu(int cpu);
909 
910 /*
911  * Return true if the specified CPU has passed through a quiescent
912  * state by virtue of being in or having passed through an dynticks
913  * idle state since the last call to dyntick_save_progress_counter()
914  * for this same CPU, or by virtue of having been offline.
915  */
916 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
917 				    bool *isidle, unsigned long *maxj)
918 {
919 	unsigned int curr;
920 	int *rcrmp;
921 	unsigned int snap;
922 
923 	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
924 	snap = (unsigned int)rdp->dynticks_snap;
925 
926 	/*
927 	 * If the CPU passed through or entered a dynticks idle phase with
928 	 * no active irq/NMI handlers, then we can safely pretend that the CPU
929 	 * already acknowledged the request to pass through a quiescent
930 	 * state.  Either way, that CPU cannot possibly be in an RCU
931 	 * read-side critical section that started before the beginning
932 	 * of the current RCU grace period.
933 	 */
934 	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
935 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
936 		rdp->dynticks_fqs++;
937 		return 1;
938 	}
939 
940 	/*
941 	 * Check for the CPU being offline, but only if the grace period
942 	 * is old enough.  We don't need to worry about the CPU changing
943 	 * state: If we see it offline even once, it has been through a
944 	 * quiescent state.
945 	 *
946 	 * The reason for insisting that the grace period be at least
947 	 * one jiffy old is that CPUs that are not quite online and that
948 	 * have just gone offline can still execute RCU read-side critical
949 	 * sections.
950 	 */
951 	if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
952 		return 0;  /* Grace period is not old enough. */
953 	barrier();
954 	if (cpu_is_offline(rdp->cpu)) {
955 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
956 		rdp->offline_fqs++;
957 		return 1;
958 	}
959 
960 	/*
961 	 * A CPU running for an extended time within the kernel can
962 	 * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
963 	 * even context-switching back and forth between a pair of
964 	 * in-kernel CPU-bound tasks cannot advance grace periods.
965 	 * So if the grace period is old enough, make the CPU pay attention.
966 	 * Note that the unsynchronized assignments to the per-CPU
967 	 * rcu_sched_qs_mask variable are safe.  Yes, setting of
968 	 * bits can be lost, but they will be set again on the next
969 	 * force-quiescent-state pass.  So lost bit sets do not result
970 	 * in incorrect behavior, merely in a grace period lasting
971 	 * a few jiffies longer than it might otherwise.  Because
972 	 * there are at most four threads involved, and because the
973 	 * updates are only once every few jiffies, the probability of
974 	 * lossage (and thus of slight grace-period extension) is
975 	 * quite low.
976 	 *
977 	 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
978 	 * is set too high, we override with half of the RCU CPU stall
979 	 * warning delay.
980 	 */
981 	rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
982 	if (ULONG_CMP_GE(jiffies,
983 			 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
984 	    ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
985 		if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
986 			ACCESS_ONCE(rdp->cond_resched_completed) =
987 				ACCESS_ONCE(rdp->mynode->completed);
988 			smp_mb(); /* ->cond_resched_completed before *rcrmp. */
989 			ACCESS_ONCE(*rcrmp) =
990 				ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
991 			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
992 			rdp->rsp->jiffies_resched += 5; /* Enable beating. */
993 		} else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
994 			/* Time to beat on that CPU again! */
995 			resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
996 			rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
997 		}
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static void record_gp_stall_check_time(struct rcu_state *rsp)
1004 {
1005 	unsigned long j = jiffies;
1006 	unsigned long j1;
1007 
1008 	rsp->gp_start = j;
1009 	smp_wmb(); /* Record start time before stall time. */
1010 	j1 = rcu_jiffies_till_stall_check();
1011 	ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
1012 	rsp->jiffies_resched = j + j1 / 2;
1013 }
1014 
1015 /*
1016  * Dump stacks of all tasks running on stalled CPUs.
1017  */
1018 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1019 {
1020 	int cpu;
1021 	unsigned long flags;
1022 	struct rcu_node *rnp;
1023 
1024 	rcu_for_each_leaf_node(rsp, rnp) {
1025 		raw_spin_lock_irqsave(&rnp->lock, flags);
1026 		if (rnp->qsmask != 0) {
1027 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1028 				if (rnp->qsmask & (1UL << cpu))
1029 					dump_cpu_task(rnp->grplo + cpu);
1030 		}
1031 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1032 	}
1033 }
1034 
1035 static void print_other_cpu_stall(struct rcu_state *rsp)
1036 {
1037 	int cpu;
1038 	long delta;
1039 	unsigned long flags;
1040 	int ndetected = 0;
1041 	struct rcu_node *rnp = rcu_get_root(rsp);
1042 	long totqlen = 0;
1043 
1044 	/* Only let one CPU complain about others per time interval. */
1045 
1046 	raw_spin_lock_irqsave(&rnp->lock, flags);
1047 	delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
1048 	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1049 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1050 		return;
1051 	}
1052 	ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
1053 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1054 
1055 	/*
1056 	 * OK, time to rat on our buddy...
1057 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
1058 	 * RCU CPU stall warnings.
1059 	 */
1060 	pr_err("INFO: %s detected stalls on CPUs/tasks:",
1061 	       rsp->name);
1062 	print_cpu_stall_info_begin();
1063 	rcu_for_each_leaf_node(rsp, rnp) {
1064 		raw_spin_lock_irqsave(&rnp->lock, flags);
1065 		ndetected += rcu_print_task_stall(rnp);
1066 		if (rnp->qsmask != 0) {
1067 			for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1068 				if (rnp->qsmask & (1UL << cpu)) {
1069 					print_cpu_stall_info(rsp,
1070 							     rnp->grplo + cpu);
1071 					ndetected++;
1072 				}
1073 		}
1074 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1075 	}
1076 
1077 	/*
1078 	 * Now rat on any tasks that got kicked up to the root rcu_node
1079 	 * due to CPU offlining.
1080 	 */
1081 	rnp = rcu_get_root(rsp);
1082 	raw_spin_lock_irqsave(&rnp->lock, flags);
1083 	ndetected += rcu_print_task_stall(rnp);
1084 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1085 
1086 	print_cpu_stall_info_end();
1087 	for_each_possible_cpu(cpu)
1088 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1089 	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1090 	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
1091 	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
1092 	if (ndetected == 0)
1093 		pr_err("INFO: Stall ended before state dump start\n");
1094 	else
1095 		rcu_dump_cpu_stacks(rsp);
1096 
1097 	/* Complain about tasks blocking the grace period. */
1098 
1099 	rcu_print_detail_task_stall(rsp);
1100 
1101 	force_quiescent_state(rsp);  /* Kick them all. */
1102 }
1103 
1104 static void print_cpu_stall(struct rcu_state *rsp)
1105 {
1106 	int cpu;
1107 	unsigned long flags;
1108 	struct rcu_node *rnp = rcu_get_root(rsp);
1109 	long totqlen = 0;
1110 
1111 	/*
1112 	 * OK, time to rat on ourselves...
1113 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
1114 	 * RCU CPU stall warnings.
1115 	 */
1116 	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1117 	print_cpu_stall_info_begin();
1118 	print_cpu_stall_info(rsp, smp_processor_id());
1119 	print_cpu_stall_info_end();
1120 	for_each_possible_cpu(cpu)
1121 		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1122 	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1123 		jiffies - rsp->gp_start,
1124 		(long)rsp->gpnum, (long)rsp->completed, totqlen);
1125 	rcu_dump_cpu_stacks(rsp);
1126 
1127 	raw_spin_lock_irqsave(&rnp->lock, flags);
1128 	if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
1129 		ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
1130 				     3 * rcu_jiffies_till_stall_check() + 3;
1131 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1132 
1133 	/*
1134 	 * Attempt to revive the RCU machinery by forcing a context switch.
1135 	 *
1136 	 * A context switch would normally allow the RCU state machine to make
1137 	 * progress and it could be we're stuck in kernel space without context
1138 	 * switches for an entirely unreasonable amount of time.
1139 	 */
1140 	resched_cpu(smp_processor_id());
1141 }
1142 
1143 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1144 {
1145 	unsigned long completed;
1146 	unsigned long gpnum;
1147 	unsigned long gps;
1148 	unsigned long j;
1149 	unsigned long js;
1150 	struct rcu_node *rnp;
1151 
1152 	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1153 		return;
1154 	j = jiffies;
1155 
1156 	/*
1157 	 * Lots of memory barriers to reject false positives.
1158 	 *
1159 	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1160 	 * then rsp->gp_start, and finally rsp->completed.  These values
1161 	 * are updated in the opposite order with memory barriers (or
1162 	 * equivalent) during grace-period initialization and cleanup.
1163 	 * Now, a false positive can occur if we get an new value of
1164 	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
1165 	 * the memory barriers, the only way that this can happen is if one
1166 	 * grace period ends and another starts between these two fetches.
1167 	 * Detect this by comparing rsp->completed with the previous fetch
1168 	 * from rsp->gpnum.
1169 	 *
1170 	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1171 	 * and rsp->gp_start suffice to forestall false positives.
1172 	 */
1173 	gpnum = ACCESS_ONCE(rsp->gpnum);
1174 	smp_rmb(); /* Pick up ->gpnum first... */
1175 	js = ACCESS_ONCE(rsp->jiffies_stall);
1176 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1177 	gps = ACCESS_ONCE(rsp->gp_start);
1178 	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1179 	completed = ACCESS_ONCE(rsp->completed);
1180 	if (ULONG_CMP_GE(completed, gpnum) ||
1181 	    ULONG_CMP_LT(j, js) ||
1182 	    ULONG_CMP_GE(gps, js))
1183 		return; /* No stall or GP completed since entering function. */
1184 	rnp = rdp->mynode;
1185 	if (rcu_gp_in_progress(rsp) &&
1186 	    (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
1187 
1188 		/* We haven't checked in, so go dump stack. */
1189 		print_cpu_stall(rsp);
1190 
1191 	} else if (rcu_gp_in_progress(rsp) &&
1192 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1193 
1194 		/* They had a few time units to dump stack, so complain. */
1195 		print_other_cpu_stall(rsp);
1196 	}
1197 }
1198 
1199 /**
1200  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1201  *
1202  * Set the stall-warning timeout way off into the future, thus preventing
1203  * any RCU CPU stall-warning messages from appearing in the current set of
1204  * RCU grace periods.
1205  *
1206  * The caller must disable hard irqs.
1207  */
1208 void rcu_cpu_stall_reset(void)
1209 {
1210 	struct rcu_state *rsp;
1211 
1212 	for_each_rcu_flavor(rsp)
1213 		ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
1214 }
1215 
1216 /*
1217  * Initialize the specified rcu_data structure's callback list to empty.
1218  */
1219 static void init_callback_list(struct rcu_data *rdp)
1220 {
1221 	int i;
1222 
1223 	if (init_nocb_callback_list(rdp))
1224 		return;
1225 	rdp->nxtlist = NULL;
1226 	for (i = 0; i < RCU_NEXT_SIZE; i++)
1227 		rdp->nxttail[i] = &rdp->nxtlist;
1228 }
1229 
1230 /*
1231  * Determine the value that ->completed will have at the end of the
1232  * next subsequent grace period.  This is used to tag callbacks so that
1233  * a CPU can invoke callbacks in a timely fashion even if that CPU has
1234  * been dyntick-idle for an extended period with callbacks under the
1235  * influence of RCU_FAST_NO_HZ.
1236  *
1237  * The caller must hold rnp->lock with interrupts disabled.
1238  */
1239 static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1240 				       struct rcu_node *rnp)
1241 {
1242 	/*
1243 	 * If RCU is idle, we just wait for the next grace period.
1244 	 * But we can only be sure that RCU is idle if we are looking
1245 	 * at the root rcu_node structure -- otherwise, a new grace
1246 	 * period might have started, but just not yet gotten around
1247 	 * to initializing the current non-root rcu_node structure.
1248 	 */
1249 	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1250 		return rnp->completed + 1;
1251 
1252 	/*
1253 	 * Otherwise, wait for a possible partial grace period and
1254 	 * then the subsequent full grace period.
1255 	 */
1256 	return rnp->completed + 2;
1257 }
1258 
1259 /*
1260  * Trace-event helper function for rcu_start_future_gp() and
1261  * rcu_nocb_wait_gp().
1262  */
1263 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1264 				unsigned long c, const char *s)
1265 {
1266 	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1267 				      rnp->completed, c, rnp->level,
1268 				      rnp->grplo, rnp->grphi, s);
1269 }
1270 
1271 /*
1272  * Start some future grace period, as needed to handle newly arrived
1273  * callbacks.  The required future grace periods are recorded in each
1274  * rcu_node structure's ->need_future_gp field.  Returns true if there
1275  * is reason to awaken the grace-period kthread.
1276  *
1277  * The caller must hold the specified rcu_node structure's ->lock.
1278  */
1279 static bool __maybe_unused
1280 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1281 		    unsigned long *c_out)
1282 {
1283 	unsigned long c;
1284 	int i;
1285 	bool ret = false;
1286 	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1287 
1288 	/*
1289 	 * Pick up grace-period number for new callbacks.  If this
1290 	 * grace period is already marked as needed, return to the caller.
1291 	 */
1292 	c = rcu_cbs_completed(rdp->rsp, rnp);
1293 	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1294 	if (rnp->need_future_gp[c & 0x1]) {
1295 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1296 		goto out;
1297 	}
1298 
1299 	/*
1300 	 * If either this rcu_node structure or the root rcu_node structure
1301 	 * believe that a grace period is in progress, then we must wait
1302 	 * for the one following, which is in "c".  Because our request
1303 	 * will be noticed at the end of the current grace period, we don't
1304 	 * need to explicitly start one.  We only do the lockless check
1305 	 * of rnp_root's fields if the current rcu_node structure thinks
1306 	 * there is no grace period in flight, and because we hold rnp->lock,
1307 	 * the only possible change is when rnp_root's two fields are
1308 	 * equal, in which case rnp_root->gpnum might be concurrently
1309 	 * incremented.  But that is OK, as it will just result in our
1310 	 * doing some extra useless work.
1311 	 */
1312 	if (rnp->gpnum != rnp->completed ||
1313 	    ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
1314 		rnp->need_future_gp[c & 0x1]++;
1315 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1316 		goto out;
1317 	}
1318 
1319 	/*
1320 	 * There might be no grace period in progress.  If we don't already
1321 	 * hold it, acquire the root rcu_node structure's lock in order to
1322 	 * start one (if needed).
1323 	 */
1324 	if (rnp != rnp_root) {
1325 		raw_spin_lock(&rnp_root->lock);
1326 		smp_mb__after_unlock_lock();
1327 	}
1328 
1329 	/*
1330 	 * Get a new grace-period number.  If there really is no grace
1331 	 * period in progress, it will be smaller than the one we obtained
1332 	 * earlier.  Adjust callbacks as needed.  Note that even no-CBs
1333 	 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1334 	 */
1335 	c = rcu_cbs_completed(rdp->rsp, rnp_root);
1336 	for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1337 		if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1338 			rdp->nxtcompleted[i] = c;
1339 
1340 	/*
1341 	 * If the needed for the required grace period is already
1342 	 * recorded, trace and leave.
1343 	 */
1344 	if (rnp_root->need_future_gp[c & 0x1]) {
1345 		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1346 		goto unlock_out;
1347 	}
1348 
1349 	/* Record the need for the future grace period. */
1350 	rnp_root->need_future_gp[c & 0x1]++;
1351 
1352 	/* If a grace period is not already in progress, start one. */
1353 	if (rnp_root->gpnum != rnp_root->completed) {
1354 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1355 	} else {
1356 		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1357 		ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1358 	}
1359 unlock_out:
1360 	if (rnp != rnp_root)
1361 		raw_spin_unlock(&rnp_root->lock);
1362 out:
1363 	if (c_out != NULL)
1364 		*c_out = c;
1365 	return ret;
1366 }
1367 
1368 /*
1369  * Clean up any old requests for the just-ended grace period.  Also return
1370  * whether any additional grace periods have been requested.  Also invoke
1371  * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1372  * waiting for this grace period to complete.
1373  */
1374 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1375 {
1376 	int c = rnp->completed;
1377 	int needmore;
1378 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1379 
1380 	rcu_nocb_gp_cleanup(rsp, rnp);
1381 	rnp->need_future_gp[c & 0x1] = 0;
1382 	needmore = rnp->need_future_gp[(c + 1) & 0x1];
1383 	trace_rcu_future_gp(rnp, rdp, c,
1384 			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1385 	return needmore;
1386 }
1387 
1388 /*
1389  * Awaken the grace-period kthread for the specified flavor of RCU.
1390  * Don't do a self-awaken, and don't bother awakening when there is
1391  * nothing for the grace-period kthread to do (as in several CPUs
1392  * raced to awaken, and we lost), and finally don't try to awaken
1393  * a kthread that has not yet been created.
1394  */
1395 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1396 {
1397 	if (current == rsp->gp_kthread ||
1398 	    !ACCESS_ONCE(rsp->gp_flags) ||
1399 	    !rsp->gp_kthread)
1400 		return;
1401 	wake_up(&rsp->gp_wq);
1402 }
1403 
1404 /*
1405  * If there is room, assign a ->completed number to any callbacks on
1406  * this CPU that have not already been assigned.  Also accelerate any
1407  * callbacks that were previously assigned a ->completed number that has
1408  * since proven to be too conservative, which can happen if callbacks get
1409  * assigned a ->completed number while RCU is idle, but with reference to
1410  * a non-root rcu_node structure.  This function is idempotent, so it does
1411  * not hurt to call it repeatedly.  Returns an flag saying that we should
1412  * awaken the RCU grace-period kthread.
1413  *
1414  * The caller must hold rnp->lock with interrupts disabled.
1415  */
1416 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1417 			       struct rcu_data *rdp)
1418 {
1419 	unsigned long c;
1420 	int i;
1421 	bool ret;
1422 
1423 	/* If the CPU has no callbacks, nothing to do. */
1424 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1425 		return false;
1426 
1427 	/*
1428 	 * Starting from the sublist containing the callbacks most
1429 	 * recently assigned a ->completed number and working down, find the
1430 	 * first sublist that is not assignable to an upcoming grace period.
1431 	 * Such a sublist has something in it (first two tests) and has
1432 	 * a ->completed number assigned that will complete sooner than
1433 	 * the ->completed number for newly arrived callbacks (last test).
1434 	 *
1435 	 * The key point is that any later sublist can be assigned the
1436 	 * same ->completed number as the newly arrived callbacks, which
1437 	 * means that the callbacks in any of these later sublist can be
1438 	 * grouped into a single sublist, whether or not they have already
1439 	 * been assigned a ->completed number.
1440 	 */
1441 	c = rcu_cbs_completed(rsp, rnp);
1442 	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1443 		if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1444 		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1445 			break;
1446 
1447 	/*
1448 	 * If there are no sublist for unassigned callbacks, leave.
1449 	 * At the same time, advance "i" one sublist, so that "i" will
1450 	 * index into the sublist where all the remaining callbacks should
1451 	 * be grouped into.
1452 	 */
1453 	if (++i >= RCU_NEXT_TAIL)
1454 		return false;
1455 
1456 	/*
1457 	 * Assign all subsequent callbacks' ->completed number to the next
1458 	 * full grace period and group them all in the sublist initially
1459 	 * indexed by "i".
1460 	 */
1461 	for (; i <= RCU_NEXT_TAIL; i++) {
1462 		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1463 		rdp->nxtcompleted[i] = c;
1464 	}
1465 	/* Record any needed additional grace periods. */
1466 	ret = rcu_start_future_gp(rnp, rdp, NULL);
1467 
1468 	/* Trace depending on how much we were able to accelerate. */
1469 	if (!*rdp->nxttail[RCU_WAIT_TAIL])
1470 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1471 	else
1472 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1473 	return ret;
1474 }
1475 
1476 /*
1477  * Move any callbacks whose grace period has completed to the
1478  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1479  * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1480  * sublist.  This function is idempotent, so it does not hurt to
1481  * invoke it repeatedly.  As long as it is not invoked -too- often...
1482  * Returns true if the RCU grace-period kthread needs to be awakened.
1483  *
1484  * The caller must hold rnp->lock with interrupts disabled.
1485  */
1486 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1487 			    struct rcu_data *rdp)
1488 {
1489 	int i, j;
1490 
1491 	/* If the CPU has no callbacks, nothing to do. */
1492 	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1493 		return false;
1494 
1495 	/*
1496 	 * Find all callbacks whose ->completed numbers indicate that they
1497 	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1498 	 */
1499 	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1500 		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1501 			break;
1502 		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1503 	}
1504 	/* Clean up any sublist tail pointers that were misordered above. */
1505 	for (j = RCU_WAIT_TAIL; j < i; j++)
1506 		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1507 
1508 	/* Copy down callbacks to fill in empty sublists. */
1509 	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1510 		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1511 			break;
1512 		rdp->nxttail[j] = rdp->nxttail[i];
1513 		rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1514 	}
1515 
1516 	/* Classify any remaining callbacks. */
1517 	return rcu_accelerate_cbs(rsp, rnp, rdp);
1518 }
1519 
1520 /*
1521  * Update CPU-local rcu_data state to record the beginnings and ends of
1522  * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1523  * structure corresponding to the current CPU, and must have irqs disabled.
1524  * Returns true if the grace-period kthread needs to be awakened.
1525  */
1526 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1527 			      struct rcu_data *rdp)
1528 {
1529 	bool ret;
1530 
1531 	/* Handle the ends of any preceding grace periods first. */
1532 	if (rdp->completed == rnp->completed) {
1533 
1534 		/* No grace period end, so just accelerate recent callbacks. */
1535 		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1536 
1537 	} else {
1538 
1539 		/* Advance callbacks. */
1540 		ret = rcu_advance_cbs(rsp, rnp, rdp);
1541 
1542 		/* Remember that we saw this grace-period completion. */
1543 		rdp->completed = rnp->completed;
1544 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1545 	}
1546 
1547 	if (rdp->gpnum != rnp->gpnum) {
1548 		/*
1549 		 * If the current grace period is waiting for this CPU,
1550 		 * set up to detect a quiescent state, otherwise don't
1551 		 * go looking for one.
1552 		 */
1553 		rdp->gpnum = rnp->gpnum;
1554 		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1555 		rdp->passed_quiesce = 0;
1556 		rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1557 		zero_cpu_stall_ticks(rdp);
1558 	}
1559 	return ret;
1560 }
1561 
1562 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1563 {
1564 	unsigned long flags;
1565 	bool needwake;
1566 	struct rcu_node *rnp;
1567 
1568 	local_irq_save(flags);
1569 	rnp = rdp->mynode;
1570 	if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1571 	     rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
1572 	    !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1573 		local_irq_restore(flags);
1574 		return;
1575 	}
1576 	smp_mb__after_unlock_lock();
1577 	needwake = __note_gp_changes(rsp, rnp, rdp);
1578 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1579 	if (needwake)
1580 		rcu_gp_kthread_wake(rsp);
1581 }
1582 
1583 /*
1584  * Initialize a new grace period.  Return 0 if no grace period required.
1585  */
1586 static int rcu_gp_init(struct rcu_state *rsp)
1587 {
1588 	struct rcu_data *rdp;
1589 	struct rcu_node *rnp = rcu_get_root(rsp);
1590 
1591 	rcu_bind_gp_kthread();
1592 	raw_spin_lock_irq(&rnp->lock);
1593 	smp_mb__after_unlock_lock();
1594 	if (!ACCESS_ONCE(rsp->gp_flags)) {
1595 		/* Spurious wakeup, tell caller to go back to sleep.  */
1596 		raw_spin_unlock_irq(&rnp->lock);
1597 		return 0;
1598 	}
1599 	ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
1600 
1601 	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1602 		/*
1603 		 * Grace period already in progress, don't start another.
1604 		 * Not supposed to be able to happen.
1605 		 */
1606 		raw_spin_unlock_irq(&rnp->lock);
1607 		return 0;
1608 	}
1609 
1610 	/* Advance to a new grace period and initialize state. */
1611 	record_gp_stall_check_time(rsp);
1612 	/* Record GP times before starting GP, hence smp_store_release(). */
1613 	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1614 	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1615 	raw_spin_unlock_irq(&rnp->lock);
1616 
1617 	/* Exclude any concurrent CPU-hotplug operations. */
1618 	mutex_lock(&rsp->onoff_mutex);
1619 	smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
1620 
1621 	/*
1622 	 * Set the quiescent-state-needed bits in all the rcu_node
1623 	 * structures for all currently online CPUs in breadth-first order,
1624 	 * starting from the root rcu_node structure, relying on the layout
1625 	 * of the tree within the rsp->node[] array.  Note that other CPUs
1626 	 * will access only the leaves of the hierarchy, thus seeing that no
1627 	 * grace period is in progress, at least until the corresponding
1628 	 * leaf node has been initialized.  In addition, we have excluded
1629 	 * CPU-hotplug operations.
1630 	 *
1631 	 * The grace period cannot complete until the initialization
1632 	 * process finishes, because this kthread handles both.
1633 	 */
1634 	rcu_for_each_node_breadth_first(rsp, rnp) {
1635 		raw_spin_lock_irq(&rnp->lock);
1636 		smp_mb__after_unlock_lock();
1637 		rdp = this_cpu_ptr(rsp->rda);
1638 		rcu_preempt_check_blocked_tasks(rnp);
1639 		rnp->qsmask = rnp->qsmaskinit;
1640 		ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1641 		WARN_ON_ONCE(rnp->completed != rsp->completed);
1642 		ACCESS_ONCE(rnp->completed) = rsp->completed;
1643 		if (rnp == rdp->mynode)
1644 			(void)__note_gp_changes(rsp, rnp, rdp);
1645 		rcu_preempt_boost_start_gp(rnp);
1646 		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1647 					    rnp->level, rnp->grplo,
1648 					    rnp->grphi, rnp->qsmask);
1649 		raw_spin_unlock_irq(&rnp->lock);
1650 		cond_resched();
1651 	}
1652 
1653 	mutex_unlock(&rsp->onoff_mutex);
1654 	return 1;
1655 }
1656 
1657 /*
1658  * Do one round of quiescent-state forcing.
1659  */
1660 static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1661 {
1662 	int fqs_state = fqs_state_in;
1663 	bool isidle = false;
1664 	unsigned long maxj;
1665 	struct rcu_node *rnp = rcu_get_root(rsp);
1666 
1667 	rsp->n_force_qs++;
1668 	if (fqs_state == RCU_SAVE_DYNTICK) {
1669 		/* Collect dyntick-idle snapshots. */
1670 		if (is_sysidle_rcu_state(rsp)) {
1671 			isidle = 1;
1672 			maxj = jiffies - ULONG_MAX / 4;
1673 		}
1674 		force_qs_rnp(rsp, dyntick_save_progress_counter,
1675 			     &isidle, &maxj);
1676 		rcu_sysidle_report_gp(rsp, isidle, maxj);
1677 		fqs_state = RCU_FORCE_QS;
1678 	} else {
1679 		/* Handle dyntick-idle and offline CPUs. */
1680 		isidle = 0;
1681 		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1682 	}
1683 	/* Clear flag to prevent immediate re-entry. */
1684 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1685 		raw_spin_lock_irq(&rnp->lock);
1686 		smp_mb__after_unlock_lock();
1687 		ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
1688 		raw_spin_unlock_irq(&rnp->lock);
1689 	}
1690 	return fqs_state;
1691 }
1692 
1693 /*
1694  * Clean up after the old grace period.
1695  */
1696 static void rcu_gp_cleanup(struct rcu_state *rsp)
1697 {
1698 	unsigned long gp_duration;
1699 	bool needgp = false;
1700 	int nocb = 0;
1701 	struct rcu_data *rdp;
1702 	struct rcu_node *rnp = rcu_get_root(rsp);
1703 
1704 	raw_spin_lock_irq(&rnp->lock);
1705 	smp_mb__after_unlock_lock();
1706 	gp_duration = jiffies - rsp->gp_start;
1707 	if (gp_duration > rsp->gp_max)
1708 		rsp->gp_max = gp_duration;
1709 
1710 	/*
1711 	 * We know the grace period is complete, but to everyone else
1712 	 * it appears to still be ongoing.  But it is also the case
1713 	 * that to everyone else it looks like there is nothing that
1714 	 * they can do to advance the grace period.  It is therefore
1715 	 * safe for us to drop the lock in order to mark the grace
1716 	 * period as completed in all of the rcu_node structures.
1717 	 */
1718 	raw_spin_unlock_irq(&rnp->lock);
1719 
1720 	/*
1721 	 * Propagate new ->completed value to rcu_node structures so
1722 	 * that other CPUs don't have to wait until the start of the next
1723 	 * grace period to process their callbacks.  This also avoids
1724 	 * some nasty RCU grace-period initialization races by forcing
1725 	 * the end of the current grace period to be completely recorded in
1726 	 * all of the rcu_node structures before the beginning of the next
1727 	 * grace period is recorded in any of the rcu_node structures.
1728 	 */
1729 	rcu_for_each_node_breadth_first(rsp, rnp) {
1730 		raw_spin_lock_irq(&rnp->lock);
1731 		smp_mb__after_unlock_lock();
1732 		ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1733 		rdp = this_cpu_ptr(rsp->rda);
1734 		if (rnp == rdp->mynode)
1735 			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
1736 		/* smp_mb() provided by prior unlock-lock pair. */
1737 		nocb += rcu_future_gp_cleanup(rsp, rnp);
1738 		raw_spin_unlock_irq(&rnp->lock);
1739 		cond_resched();
1740 	}
1741 	rnp = rcu_get_root(rsp);
1742 	raw_spin_lock_irq(&rnp->lock);
1743 	smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
1744 	rcu_nocb_gp_set(rnp, nocb);
1745 
1746 	/* Declare grace period done. */
1747 	ACCESS_ONCE(rsp->completed) = rsp->gpnum;
1748 	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1749 	rsp->fqs_state = RCU_GP_IDLE;
1750 	rdp = this_cpu_ptr(rsp->rda);
1751 	/* Advance CBs to reduce false positives below. */
1752 	needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
1753 	if (needgp || cpu_needs_another_gp(rsp, rdp)) {
1754 		ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
1755 		trace_rcu_grace_period(rsp->name,
1756 				       ACCESS_ONCE(rsp->gpnum),
1757 				       TPS("newreq"));
1758 	}
1759 	raw_spin_unlock_irq(&rnp->lock);
1760 }
1761 
1762 /*
1763  * Body of kthread that handles grace periods.
1764  */
1765 static int __noreturn rcu_gp_kthread(void *arg)
1766 {
1767 	int fqs_state;
1768 	int gf;
1769 	unsigned long j;
1770 	int ret;
1771 	struct rcu_state *rsp = arg;
1772 	struct rcu_node *rnp = rcu_get_root(rsp);
1773 
1774 	for (;;) {
1775 
1776 		/* Handle grace-period start. */
1777 		for (;;) {
1778 			trace_rcu_grace_period(rsp->name,
1779 					       ACCESS_ONCE(rsp->gpnum),
1780 					       TPS("reqwait"));
1781 			rsp->gp_state = RCU_GP_WAIT_GPS;
1782 			wait_event_interruptible(rsp->gp_wq,
1783 						 ACCESS_ONCE(rsp->gp_flags) &
1784 						 RCU_GP_FLAG_INIT);
1785 			/* Locking provides needed memory barrier. */
1786 			if (rcu_gp_init(rsp))
1787 				break;
1788 			cond_resched();
1789 			flush_signals(current);
1790 			trace_rcu_grace_period(rsp->name,
1791 					       ACCESS_ONCE(rsp->gpnum),
1792 					       TPS("reqwaitsig"));
1793 		}
1794 
1795 		/* Handle quiescent-state forcing. */
1796 		fqs_state = RCU_SAVE_DYNTICK;
1797 		j = jiffies_till_first_fqs;
1798 		if (j > HZ) {
1799 			j = HZ;
1800 			jiffies_till_first_fqs = HZ;
1801 		}
1802 		ret = 0;
1803 		for (;;) {
1804 			if (!ret)
1805 				rsp->jiffies_force_qs = jiffies + j;
1806 			trace_rcu_grace_period(rsp->name,
1807 					       ACCESS_ONCE(rsp->gpnum),
1808 					       TPS("fqswait"));
1809 			rsp->gp_state = RCU_GP_WAIT_FQS;
1810 			ret = wait_event_interruptible_timeout(rsp->gp_wq,
1811 					((gf = ACCESS_ONCE(rsp->gp_flags)) &
1812 					 RCU_GP_FLAG_FQS) ||
1813 					(!ACCESS_ONCE(rnp->qsmask) &&
1814 					 !rcu_preempt_blocked_readers_cgp(rnp)),
1815 					j);
1816 			/* Locking provides needed memory barriers. */
1817 			/* If grace period done, leave loop. */
1818 			if (!ACCESS_ONCE(rnp->qsmask) &&
1819 			    !rcu_preempt_blocked_readers_cgp(rnp))
1820 				break;
1821 			/* If time for quiescent-state forcing, do it. */
1822 			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
1823 			    (gf & RCU_GP_FLAG_FQS)) {
1824 				trace_rcu_grace_period(rsp->name,
1825 						       ACCESS_ONCE(rsp->gpnum),
1826 						       TPS("fqsstart"));
1827 				fqs_state = rcu_gp_fqs(rsp, fqs_state);
1828 				trace_rcu_grace_period(rsp->name,
1829 						       ACCESS_ONCE(rsp->gpnum),
1830 						       TPS("fqsend"));
1831 				cond_resched();
1832 			} else {
1833 				/* Deal with stray signal. */
1834 				cond_resched();
1835 				flush_signals(current);
1836 				trace_rcu_grace_period(rsp->name,
1837 						       ACCESS_ONCE(rsp->gpnum),
1838 						       TPS("fqswaitsig"));
1839 			}
1840 			j = jiffies_till_next_fqs;
1841 			if (j > HZ) {
1842 				j = HZ;
1843 				jiffies_till_next_fqs = HZ;
1844 			} else if (j < 1) {
1845 				j = 1;
1846 				jiffies_till_next_fqs = 1;
1847 			}
1848 		}
1849 
1850 		/* Handle grace-period end. */
1851 		rcu_gp_cleanup(rsp);
1852 	}
1853 }
1854 
1855 /*
1856  * Start a new RCU grace period if warranted, re-initializing the hierarchy
1857  * in preparation for detecting the next grace period.  The caller must hold
1858  * the root node's ->lock and hard irqs must be disabled.
1859  *
1860  * Note that it is legal for a dying CPU (which is marked as offline) to
1861  * invoke this function.  This can happen when the dying CPU reports its
1862  * quiescent state.
1863  *
1864  * Returns true if the grace-period kthread must be awakened.
1865  */
1866 static bool
1867 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1868 		      struct rcu_data *rdp)
1869 {
1870 	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
1871 		/*
1872 		 * Either we have not yet spawned the grace-period
1873 		 * task, this CPU does not need another grace period,
1874 		 * or a grace period is already in progress.
1875 		 * Either way, don't start a new grace period.
1876 		 */
1877 		return false;
1878 	}
1879 	ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
1880 	trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
1881 			       TPS("newreq"));
1882 
1883 	/*
1884 	 * We can't do wakeups while holding the rnp->lock, as that
1885 	 * could cause possible deadlocks with the rq->lock. Defer
1886 	 * the wakeup to our caller.
1887 	 */
1888 	return true;
1889 }
1890 
1891 /*
1892  * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
1893  * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
1894  * is invoked indirectly from rcu_advance_cbs(), which would result in
1895  * endless recursion -- or would do so if it wasn't for the self-deadlock
1896  * that is encountered beforehand.
1897  *
1898  * Returns true if the grace-period kthread needs to be awakened.
1899  */
1900 static bool rcu_start_gp(struct rcu_state *rsp)
1901 {
1902 	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1903 	struct rcu_node *rnp = rcu_get_root(rsp);
1904 	bool ret = false;
1905 
1906 	/*
1907 	 * If there is no grace period in progress right now, any
1908 	 * callbacks we have up to this point will be satisfied by the
1909 	 * next grace period.  Also, advancing the callbacks reduces the
1910 	 * probability of false positives from cpu_needs_another_gp()
1911 	 * resulting in pointless grace periods.  So, advance callbacks
1912 	 * then start the grace period!
1913 	 */
1914 	ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
1915 	ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
1916 	return ret;
1917 }
1918 
1919 /*
1920  * Report a full set of quiescent states to the specified rcu_state
1921  * data structure.  This involves cleaning up after the prior grace
1922  * period and letting rcu_start_gp() start up the next grace period
1923  * if one is needed.  Note that the caller must hold rnp->lock, which
1924  * is released before return.
1925  */
1926 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1927 	__releases(rcu_get_root(rsp)->lock)
1928 {
1929 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1930 	raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
1931 	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
1932 }
1933 
1934 /*
1935  * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1936  * Allows quiescent states for a group of CPUs to be reported at one go
1937  * to the specified rcu_node structure, though all the CPUs in the group
1938  * must be represented by the same rcu_node structure (which need not be
1939  * a leaf rcu_node structure, though it often will be).  That structure's
1940  * lock must be held upon entry, and it is released before return.
1941  */
1942 static void
1943 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1944 		  struct rcu_node *rnp, unsigned long flags)
1945 	__releases(rnp->lock)
1946 {
1947 	struct rcu_node *rnp_c;
1948 
1949 	/* Walk up the rcu_node hierarchy. */
1950 	for (;;) {
1951 		if (!(rnp->qsmask & mask)) {
1952 
1953 			/* Our bit has already been cleared, so done. */
1954 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1955 			return;
1956 		}
1957 		rnp->qsmask &= ~mask;
1958 		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1959 						 mask, rnp->qsmask, rnp->level,
1960 						 rnp->grplo, rnp->grphi,
1961 						 !!rnp->gp_tasks);
1962 		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1963 
1964 			/* Other bits still set at this level, so done. */
1965 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
1966 			return;
1967 		}
1968 		mask = rnp->grpmask;
1969 		if (rnp->parent == NULL) {
1970 
1971 			/* No more levels.  Exit loop holding root lock. */
1972 
1973 			break;
1974 		}
1975 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1976 		rnp_c = rnp;
1977 		rnp = rnp->parent;
1978 		raw_spin_lock_irqsave(&rnp->lock, flags);
1979 		smp_mb__after_unlock_lock();
1980 		WARN_ON_ONCE(rnp_c->qsmask);
1981 	}
1982 
1983 	/*
1984 	 * Get here if we are the last CPU to pass through a quiescent
1985 	 * state for this grace period.  Invoke rcu_report_qs_rsp()
1986 	 * to clean up and start the next grace period if one is needed.
1987 	 */
1988 	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
1989 }
1990 
1991 /*
1992  * Record a quiescent state for the specified CPU to that CPU's rcu_data
1993  * structure.  This must be either called from the specified CPU, or
1994  * called when the specified CPU is known to be offline (and when it is
1995  * also known that no other CPU is concurrently trying to help the offline
1996  * CPU).  The lastcomp argument is used to make sure we are still in the
1997  * grace period of interest.  We don't want to end the current grace period
1998  * based on quiescent states detected in an earlier grace period!
1999  */
2000 static void
2001 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2002 {
2003 	unsigned long flags;
2004 	unsigned long mask;
2005 	bool needwake;
2006 	struct rcu_node *rnp;
2007 
2008 	rnp = rdp->mynode;
2009 	raw_spin_lock_irqsave(&rnp->lock, flags);
2010 	smp_mb__after_unlock_lock();
2011 	if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
2012 	    rnp->completed == rnp->gpnum) {
2013 
2014 		/*
2015 		 * The grace period in which this quiescent state was
2016 		 * recorded has ended, so don't report it upwards.
2017 		 * We will instead need a new quiescent state that lies
2018 		 * within the current grace period.
2019 		 */
2020 		rdp->passed_quiesce = 0;	/* need qs for new gp. */
2021 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2022 		return;
2023 	}
2024 	mask = rdp->grpmask;
2025 	if ((rnp->qsmask & mask) == 0) {
2026 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2027 	} else {
2028 		rdp->qs_pending = 0;
2029 
2030 		/*
2031 		 * This GP can't end until cpu checks in, so all of our
2032 		 * callbacks can be processed during the next GP.
2033 		 */
2034 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2035 
2036 		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
2037 		if (needwake)
2038 			rcu_gp_kthread_wake(rsp);
2039 	}
2040 }
2041 
2042 /*
2043  * Check to see if there is a new grace period of which this CPU
2044  * is not yet aware, and if so, set up local rcu_data state for it.
2045  * Otherwise, see if this CPU has just passed through its first
2046  * quiescent state for this grace period, and record that fact if so.
2047  */
2048 static void
2049 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2050 {
2051 	/* Check for grace-period ends and beginnings. */
2052 	note_gp_changes(rsp, rdp);
2053 
2054 	/*
2055 	 * Does this CPU still need to do its part for current grace period?
2056 	 * If no, return and let the other CPUs do their part as well.
2057 	 */
2058 	if (!rdp->qs_pending)
2059 		return;
2060 
2061 	/*
2062 	 * Was there a quiescent state since the beginning of the grace
2063 	 * period? If no, then exit and wait for the next call.
2064 	 */
2065 	if (!rdp->passed_quiesce)
2066 		return;
2067 
2068 	/*
2069 	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2070 	 * judge of that).
2071 	 */
2072 	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2073 }
2074 
2075 #ifdef CONFIG_HOTPLUG_CPU
2076 
2077 /*
2078  * Send the specified CPU's RCU callbacks to the orphanage.  The
2079  * specified CPU must be offline, and the caller must hold the
2080  * ->orphan_lock.
2081  */
2082 static void
2083 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2084 			  struct rcu_node *rnp, struct rcu_data *rdp)
2085 {
2086 	/* No-CBs CPUs do not have orphanable callbacks. */
2087 	if (rcu_is_nocb_cpu(rdp->cpu))
2088 		return;
2089 
2090 	/*
2091 	 * Orphan the callbacks.  First adjust the counts.  This is safe
2092 	 * because _rcu_barrier() excludes CPU-hotplug operations, so it
2093 	 * cannot be running now.  Thus no memory barrier is required.
2094 	 */
2095 	if (rdp->nxtlist != NULL) {
2096 		rsp->qlen_lazy += rdp->qlen_lazy;
2097 		rsp->qlen += rdp->qlen;
2098 		rdp->n_cbs_orphaned += rdp->qlen;
2099 		rdp->qlen_lazy = 0;
2100 		ACCESS_ONCE(rdp->qlen) = 0;
2101 	}
2102 
2103 	/*
2104 	 * Next, move those callbacks still needing a grace period to
2105 	 * the orphanage, where some other CPU will pick them up.
2106 	 * Some of the callbacks might have gone partway through a grace
2107 	 * period, but that is too bad.  They get to start over because we
2108 	 * cannot assume that grace periods are synchronized across CPUs.
2109 	 * We don't bother updating the ->nxttail[] array yet, instead
2110 	 * we just reset the whole thing later on.
2111 	 */
2112 	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
2113 		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
2114 		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
2115 		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2116 	}
2117 
2118 	/*
2119 	 * Then move the ready-to-invoke callbacks to the orphanage,
2120 	 * where some other CPU will pick them up.  These will not be
2121 	 * required to pass though another grace period: They are done.
2122 	 */
2123 	if (rdp->nxtlist != NULL) {
2124 		*rsp->orphan_donetail = rdp->nxtlist;
2125 		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
2126 	}
2127 
2128 	/* Finally, initialize the rcu_data structure's list to empty.  */
2129 	init_callback_list(rdp);
2130 }
2131 
2132 /*
2133  * Adopt the RCU callbacks from the specified rcu_state structure's
2134  * orphanage.  The caller must hold the ->orphan_lock.
2135  */
2136 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2137 {
2138 	int i;
2139 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2140 
2141 	/* No-CBs CPUs are handled specially. */
2142 	if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
2143 		return;
2144 
2145 	/* Do the accounting first. */
2146 	rdp->qlen_lazy += rsp->qlen_lazy;
2147 	rdp->qlen += rsp->qlen;
2148 	rdp->n_cbs_adopted += rsp->qlen;
2149 	if (rsp->qlen_lazy != rsp->qlen)
2150 		rcu_idle_count_callbacks_posted();
2151 	rsp->qlen_lazy = 0;
2152 	rsp->qlen = 0;
2153 
2154 	/*
2155 	 * We do not need a memory barrier here because the only way we
2156 	 * can get here if there is an rcu_barrier() in flight is if
2157 	 * we are the task doing the rcu_barrier().
2158 	 */
2159 
2160 	/* First adopt the ready-to-invoke callbacks. */
2161 	if (rsp->orphan_donelist != NULL) {
2162 		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
2163 		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
2164 		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
2165 			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2166 				rdp->nxttail[i] = rsp->orphan_donetail;
2167 		rsp->orphan_donelist = NULL;
2168 		rsp->orphan_donetail = &rsp->orphan_donelist;
2169 	}
2170 
2171 	/* And then adopt the callbacks that still need a grace period. */
2172 	if (rsp->orphan_nxtlist != NULL) {
2173 		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
2174 		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
2175 		rsp->orphan_nxtlist = NULL;
2176 		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2177 	}
2178 }
2179 
2180 /*
2181  * Trace the fact that this CPU is going offline.
2182  */
2183 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2184 {
2185 	RCU_TRACE(unsigned long mask);
2186 	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2187 	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2188 
2189 	RCU_TRACE(mask = rdp->grpmask);
2190 	trace_rcu_grace_period(rsp->name,
2191 			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2192 			       TPS("cpuofl"));
2193 }
2194 
2195 /*
2196  * The CPU has been completely removed, and some other CPU is reporting
2197  * this fact from process context.  Do the remainder of the cleanup,
2198  * including orphaning the outgoing CPU's RCU callbacks, and also
2199  * adopting them.  There can only be one CPU hotplug operation at a time,
2200  * so no other CPU can be attempting to update rcu_cpu_kthread_task.
2201  */
2202 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2203 {
2204 	unsigned long flags;
2205 	unsigned long mask;
2206 	int need_report = 0;
2207 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2208 	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2209 
2210 	/* Adjust any no-longer-needed kthreads. */
2211 	rcu_boost_kthread_setaffinity(rnp, -1);
2212 
2213 	/* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
2214 
2215 	/* Exclude any attempts to start a new grace period. */
2216 	mutex_lock(&rsp->onoff_mutex);
2217 	raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2218 
2219 	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
2220 	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2221 	rcu_adopt_orphan_cbs(rsp, flags);
2222 
2223 	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
2224 	mask = rdp->grpmask;	/* rnp->grplo is constant. */
2225 	do {
2226 		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
2227 		smp_mb__after_unlock_lock();
2228 		rnp->qsmaskinit &= ~mask;
2229 		if (rnp->qsmaskinit != 0) {
2230 			if (rnp != rdp->mynode)
2231 				raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2232 			break;
2233 		}
2234 		if (rnp == rdp->mynode)
2235 			need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
2236 		else
2237 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2238 		mask = rnp->grpmask;
2239 		rnp = rnp->parent;
2240 	} while (rnp != NULL);
2241 
2242 	/*
2243 	 * We still hold the leaf rcu_node structure lock here, and
2244 	 * irqs are still disabled.  The reason for this subterfuge is
2245 	 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
2246 	 * held leads to deadlock.
2247 	 */
2248 	raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
2249 	rnp = rdp->mynode;
2250 	if (need_report & RCU_OFL_TASKS_NORM_GP)
2251 		rcu_report_unblock_qs_rnp(rnp, flags);
2252 	else
2253 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2254 	if (need_report & RCU_OFL_TASKS_EXP_GP)
2255 		rcu_report_exp_rnp(rsp, rnp, true);
2256 	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2257 		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2258 		  cpu, rdp->qlen, rdp->nxtlist);
2259 	init_callback_list(rdp);
2260 	/* Disallow further callbacks on this CPU. */
2261 	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2262 	mutex_unlock(&rsp->onoff_mutex);
2263 }
2264 
2265 #else /* #ifdef CONFIG_HOTPLUG_CPU */
2266 
2267 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2268 {
2269 }
2270 
2271 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2272 {
2273 }
2274 
2275 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2276 
2277 /*
2278  * Invoke any RCU callbacks that have made it to the end of their grace
2279  * period.  Thottle as specified by rdp->blimit.
2280  */
2281 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2282 {
2283 	unsigned long flags;
2284 	struct rcu_head *next, *list, **tail;
2285 	long bl, count, count_lazy;
2286 	int i;
2287 
2288 	/* If no callbacks are ready, just return. */
2289 	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2290 		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2291 		trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
2292 				    need_resched(), is_idle_task(current),
2293 				    rcu_is_callbacks_kthread());
2294 		return;
2295 	}
2296 
2297 	/*
2298 	 * Extract the list of ready callbacks, disabling to prevent
2299 	 * races with call_rcu() from interrupt handlers.
2300 	 */
2301 	local_irq_save(flags);
2302 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2303 	bl = rdp->blimit;
2304 	trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2305 	list = rdp->nxtlist;
2306 	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2307 	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
2308 	tail = rdp->nxttail[RCU_DONE_TAIL];
2309 	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2310 		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2311 			rdp->nxttail[i] = &rdp->nxtlist;
2312 	local_irq_restore(flags);
2313 
2314 	/* Invoke callbacks. */
2315 	count = count_lazy = 0;
2316 	while (list) {
2317 		next = list->next;
2318 		prefetch(next);
2319 		debug_rcu_head_unqueue(list);
2320 		if (__rcu_reclaim(rsp->name, list))
2321 			count_lazy++;
2322 		list = next;
2323 		/* Stop only if limit reached and CPU has something to do. */
2324 		if (++count >= bl &&
2325 		    (need_resched() ||
2326 		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2327 			break;
2328 	}
2329 
2330 	local_irq_save(flags);
2331 	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2332 			    is_idle_task(current),
2333 			    rcu_is_callbacks_kthread());
2334 
2335 	/* Update count, and requeue any remaining callbacks. */
2336 	if (list != NULL) {
2337 		*tail = rdp->nxtlist;
2338 		rdp->nxtlist = list;
2339 		for (i = 0; i < RCU_NEXT_SIZE; i++)
2340 			if (&rdp->nxtlist == rdp->nxttail[i])
2341 				rdp->nxttail[i] = tail;
2342 			else
2343 				break;
2344 	}
2345 	smp_mb(); /* List handling before counting for rcu_barrier(). */
2346 	rdp->qlen_lazy -= count_lazy;
2347 	ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
2348 	rdp->n_cbs_invoked += count;
2349 
2350 	/* Reinstate batch limit if we have worked down the excess. */
2351 	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2352 		rdp->blimit = blimit;
2353 
2354 	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2355 	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2356 		rdp->qlen_last_fqs_check = 0;
2357 		rdp->n_force_qs_snap = rsp->n_force_qs;
2358 	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2359 		rdp->qlen_last_fqs_check = rdp->qlen;
2360 	WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2361 
2362 	local_irq_restore(flags);
2363 
2364 	/* Re-invoke RCU core processing if there are callbacks remaining. */
2365 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2366 		invoke_rcu_core();
2367 }
2368 
2369 /*
2370  * Check to see if this CPU is in a non-context-switch quiescent state
2371  * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2372  * Also schedule RCU core processing.
2373  *
2374  * This function must be called from hardirq context.  It is normally
2375  * invoked from the scheduling-clock interrupt.  If rcu_pending returns
2376  * false, there is no point in invoking rcu_check_callbacks().
2377  */
2378 void rcu_check_callbacks(int cpu, int user)
2379 {
2380 	trace_rcu_utilization(TPS("Start scheduler-tick"));
2381 	increment_cpu_stall_ticks();
2382 	if (user || rcu_is_cpu_rrupt_from_idle()) {
2383 
2384 		/*
2385 		 * Get here if this CPU took its interrupt from user
2386 		 * mode or from the idle loop, and if this is not a
2387 		 * nested interrupt.  In this case, the CPU is in
2388 		 * a quiescent state, so note it.
2389 		 *
2390 		 * No memory barrier is required here because both
2391 		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2392 		 * variables that other CPUs neither access nor modify,
2393 		 * at least not while the corresponding CPU is online.
2394 		 */
2395 
2396 		rcu_sched_qs(cpu);
2397 		rcu_bh_qs(cpu);
2398 
2399 	} else if (!in_softirq()) {
2400 
2401 		/*
2402 		 * Get here if this CPU did not take its interrupt from
2403 		 * softirq, in other words, if it is not interrupting
2404 		 * a rcu_bh read-side critical section.  This is an _bh
2405 		 * critical section, so note it.
2406 		 */
2407 
2408 		rcu_bh_qs(cpu);
2409 	}
2410 	rcu_preempt_check_callbacks(cpu);
2411 	if (rcu_pending(cpu))
2412 		invoke_rcu_core();
2413 	trace_rcu_utilization(TPS("End scheduler-tick"));
2414 }
2415 
2416 /*
2417  * Scan the leaf rcu_node structures, processing dyntick state for any that
2418  * have not yet encountered a quiescent state, using the function specified.
2419  * Also initiate boosting for any threads blocked on the root rcu_node.
2420  *
2421  * The caller must have suppressed start of new grace periods.
2422  */
2423 static void force_qs_rnp(struct rcu_state *rsp,
2424 			 int (*f)(struct rcu_data *rsp, bool *isidle,
2425 				  unsigned long *maxj),
2426 			 bool *isidle, unsigned long *maxj)
2427 {
2428 	unsigned long bit;
2429 	int cpu;
2430 	unsigned long flags;
2431 	unsigned long mask;
2432 	struct rcu_node *rnp;
2433 
2434 	rcu_for_each_leaf_node(rsp, rnp) {
2435 		cond_resched();
2436 		mask = 0;
2437 		raw_spin_lock_irqsave(&rnp->lock, flags);
2438 		smp_mb__after_unlock_lock();
2439 		if (!rcu_gp_in_progress(rsp)) {
2440 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
2441 			return;
2442 		}
2443 		if (rnp->qsmask == 0) {
2444 			rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
2445 			continue;
2446 		}
2447 		cpu = rnp->grplo;
2448 		bit = 1;
2449 		for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2450 			if ((rnp->qsmask & bit) != 0) {
2451 				if ((rnp->qsmaskinit & bit) != 0)
2452 					*isidle = 0;
2453 				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2454 					mask |= bit;
2455 			}
2456 		}
2457 		if (mask != 0) {
2458 
2459 			/* rcu_report_qs_rnp() releases rnp->lock. */
2460 			rcu_report_qs_rnp(mask, rsp, rnp, flags);
2461 			continue;
2462 		}
2463 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
2464 	}
2465 	rnp = rcu_get_root(rsp);
2466 	if (rnp->qsmask == 0) {
2467 		raw_spin_lock_irqsave(&rnp->lock, flags);
2468 		smp_mb__after_unlock_lock();
2469 		rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2470 	}
2471 }
2472 
2473 /*
2474  * Force quiescent states on reluctant CPUs, and also detect which
2475  * CPUs are in dyntick-idle mode.
2476  */
2477 static void force_quiescent_state(struct rcu_state *rsp)
2478 {
2479 	unsigned long flags;
2480 	bool ret;
2481 	struct rcu_node *rnp;
2482 	struct rcu_node *rnp_old = NULL;
2483 
2484 	/* Funnel through hierarchy to reduce memory contention. */
2485 	rnp = __this_cpu_read(rsp->rda->mynode);
2486 	for (; rnp != NULL; rnp = rnp->parent) {
2487 		ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2488 		      !raw_spin_trylock(&rnp->fqslock);
2489 		if (rnp_old != NULL)
2490 			raw_spin_unlock(&rnp_old->fqslock);
2491 		if (ret) {
2492 			rsp->n_force_qs_lh++;
2493 			return;
2494 		}
2495 		rnp_old = rnp;
2496 	}
2497 	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2498 
2499 	/* Reached the root of the rcu_node tree, acquire lock. */
2500 	raw_spin_lock_irqsave(&rnp_old->lock, flags);
2501 	smp_mb__after_unlock_lock();
2502 	raw_spin_unlock(&rnp_old->fqslock);
2503 	if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2504 		rsp->n_force_qs_lh++;
2505 		raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2506 		return;  /* Someone beat us to it. */
2507 	}
2508 	ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
2509 	raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2510 	wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
2511 }
2512 
2513 /*
2514  * This does the RCU core processing work for the specified rcu_state
2515  * and rcu_data structures.  This may be called only from the CPU to
2516  * whom the rdp belongs.
2517  */
2518 static void
2519 __rcu_process_callbacks(struct rcu_state *rsp)
2520 {
2521 	unsigned long flags;
2522 	bool needwake;
2523 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2524 
2525 	WARN_ON_ONCE(rdp->beenonline == 0);
2526 
2527 	/* Update RCU state based on any recent quiescent states. */
2528 	rcu_check_quiescent_state(rsp, rdp);
2529 
2530 	/* Does this CPU require a not-yet-started grace period? */
2531 	local_irq_save(flags);
2532 	if (cpu_needs_another_gp(rsp, rdp)) {
2533 		raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2534 		needwake = rcu_start_gp(rsp);
2535 		raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2536 		if (needwake)
2537 			rcu_gp_kthread_wake(rsp);
2538 	} else {
2539 		local_irq_restore(flags);
2540 	}
2541 
2542 	/* If there are callbacks ready, invoke them. */
2543 	if (cpu_has_callbacks_ready_to_invoke(rdp))
2544 		invoke_rcu_callbacks(rsp, rdp);
2545 
2546 	/* Do any needed deferred wakeups of rcuo kthreads. */
2547 	do_nocb_deferred_wakeup(rdp);
2548 }
2549 
2550 /*
2551  * Do RCU core processing for the current CPU.
2552  */
2553 static void rcu_process_callbacks(struct softirq_action *unused)
2554 {
2555 	struct rcu_state *rsp;
2556 
2557 	if (cpu_is_offline(smp_processor_id()))
2558 		return;
2559 	trace_rcu_utilization(TPS("Start RCU core"));
2560 	for_each_rcu_flavor(rsp)
2561 		__rcu_process_callbacks(rsp);
2562 	trace_rcu_utilization(TPS("End RCU core"));
2563 }
2564 
2565 /*
2566  * Schedule RCU callback invocation.  If the specified type of RCU
2567  * does not support RCU priority boosting, just do a direct call,
2568  * otherwise wake up the per-CPU kernel kthread.  Note that because we
2569  * are running on the current CPU with interrupts disabled, the
2570  * rcu_cpu_kthread_task cannot disappear out from under us.
2571  */
2572 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2573 {
2574 	if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2575 		return;
2576 	if (likely(!rsp->boost)) {
2577 		rcu_do_batch(rsp, rdp);
2578 		return;
2579 	}
2580 	invoke_rcu_callbacks_kthread();
2581 }
2582 
2583 static void invoke_rcu_core(void)
2584 {
2585 	if (cpu_online(smp_processor_id()))
2586 		raise_softirq(RCU_SOFTIRQ);
2587 }
2588 
2589 /*
2590  * Handle any core-RCU processing required by a call_rcu() invocation.
2591  */
2592 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2593 			    struct rcu_head *head, unsigned long flags)
2594 {
2595 	bool needwake;
2596 
2597 	/*
2598 	 * If called from an extended quiescent state, invoke the RCU
2599 	 * core in order to force a re-evaluation of RCU's idleness.
2600 	 */
2601 	if (!rcu_is_watching() && cpu_online(smp_processor_id()))
2602 		invoke_rcu_core();
2603 
2604 	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2605 	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2606 		return;
2607 
2608 	/*
2609 	 * Force the grace period if too many callbacks or too long waiting.
2610 	 * Enforce hysteresis, and don't invoke force_quiescent_state()
2611 	 * if some other CPU has recently done so.  Also, don't bother
2612 	 * invoking force_quiescent_state() if the newly enqueued callback
2613 	 * is the only one waiting for a grace period to complete.
2614 	 */
2615 	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2616 
2617 		/* Are we ignoring a completed grace period? */
2618 		note_gp_changes(rsp, rdp);
2619 
2620 		/* Start a new grace period if one not already started. */
2621 		if (!rcu_gp_in_progress(rsp)) {
2622 			struct rcu_node *rnp_root = rcu_get_root(rsp);
2623 
2624 			raw_spin_lock(&rnp_root->lock);
2625 			smp_mb__after_unlock_lock();
2626 			needwake = rcu_start_gp(rsp);
2627 			raw_spin_unlock(&rnp_root->lock);
2628 			if (needwake)
2629 				rcu_gp_kthread_wake(rsp);
2630 		} else {
2631 			/* Give the grace period a kick. */
2632 			rdp->blimit = LONG_MAX;
2633 			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2634 			    *rdp->nxttail[RCU_DONE_TAIL] != head)
2635 				force_quiescent_state(rsp);
2636 			rdp->n_force_qs_snap = rsp->n_force_qs;
2637 			rdp->qlen_last_fqs_check = rdp->qlen;
2638 		}
2639 	}
2640 }
2641 
2642 /*
2643  * RCU callback function to leak a callback.
2644  */
2645 static void rcu_leak_callback(struct rcu_head *rhp)
2646 {
2647 }
2648 
2649 /*
2650  * Helper function for call_rcu() and friends.  The cpu argument will
2651  * normally be -1, indicating "currently running CPU".  It may specify
2652  * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2653  * is expected to specify a CPU.
2654  */
2655 static void
2656 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2657 	   struct rcu_state *rsp, int cpu, bool lazy)
2658 {
2659 	unsigned long flags;
2660 	struct rcu_data *rdp;
2661 
2662 	WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
2663 	if (debug_rcu_head_queue(head)) {
2664 		/* Probable double call_rcu(), so leak the callback. */
2665 		ACCESS_ONCE(head->func) = rcu_leak_callback;
2666 		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2667 		return;
2668 	}
2669 	head->func = func;
2670 	head->next = NULL;
2671 
2672 	/*
2673 	 * Opportunistically note grace-period endings and beginnings.
2674 	 * Note that we might see a beginning right after we see an
2675 	 * end, but never vice versa, since this CPU has to pass through
2676 	 * a quiescent state betweentimes.
2677 	 */
2678 	local_irq_save(flags);
2679 	rdp = this_cpu_ptr(rsp->rda);
2680 
2681 	/* Add the callback to our list. */
2682 	if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2683 		int offline;
2684 
2685 		if (cpu != -1)
2686 			rdp = per_cpu_ptr(rsp->rda, cpu);
2687 		offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2688 		WARN_ON_ONCE(offline);
2689 		/* _call_rcu() is illegal on offline CPU; leak the callback. */
2690 		local_irq_restore(flags);
2691 		return;
2692 	}
2693 	ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
2694 	if (lazy)
2695 		rdp->qlen_lazy++;
2696 	else
2697 		rcu_idle_count_callbacks_posted();
2698 	smp_mb();  /* Count before adding callback for rcu_barrier(). */
2699 	*rdp->nxttail[RCU_NEXT_TAIL] = head;
2700 	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2701 
2702 	if (__is_kfree_rcu_offset((unsigned long)func))
2703 		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2704 					 rdp->qlen_lazy, rdp->qlen);
2705 	else
2706 		trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
2707 
2708 	/* Go handle any RCU core processing required. */
2709 	__call_rcu_core(rsp, rdp, head, flags);
2710 	local_irq_restore(flags);
2711 }
2712 
2713 /*
2714  * Queue an RCU-sched callback for invocation after a grace period.
2715  */
2716 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2717 {
2718 	__call_rcu(head, func, &rcu_sched_state, -1, 0);
2719 }
2720 EXPORT_SYMBOL_GPL(call_rcu_sched);
2721 
2722 /*
2723  * Queue an RCU callback for invocation after a quicker grace period.
2724  */
2725 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2726 {
2727 	__call_rcu(head, func, &rcu_bh_state, -1, 0);
2728 }
2729 EXPORT_SYMBOL_GPL(call_rcu_bh);
2730 
2731 /*
2732  * Queue an RCU callback for lazy invocation after a grace period.
2733  * This will likely be later named something like "call_rcu_lazy()",
2734  * but this change will require some way of tagging the lazy RCU
2735  * callbacks in the list of pending callbacks. Until then, this
2736  * function may only be called from __kfree_rcu().
2737  */
2738 void kfree_call_rcu(struct rcu_head *head,
2739 		    void (*func)(struct rcu_head *rcu))
2740 {
2741 	__call_rcu(head, func, rcu_state_p, -1, 1);
2742 }
2743 EXPORT_SYMBOL_GPL(kfree_call_rcu);
2744 
2745 /*
2746  * Because a context switch is a grace period for RCU-sched and RCU-bh,
2747  * any blocking grace-period wait automatically implies a grace period
2748  * if there is only one CPU online at any point time during execution
2749  * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
2750  * occasionally incorrectly indicate that there are multiple CPUs online
2751  * when there was in fact only one the whole time, as this just adds
2752  * some overhead: RCU still operates correctly.
2753  */
2754 static inline int rcu_blocking_is_gp(void)
2755 {
2756 	int ret;
2757 
2758 	might_sleep();  /* Check for RCU read-side critical section. */
2759 	preempt_disable();
2760 	ret = num_online_cpus() <= 1;
2761 	preempt_enable();
2762 	return ret;
2763 }
2764 
2765 /**
2766  * synchronize_sched - wait until an rcu-sched grace period has elapsed.
2767  *
2768  * Control will return to the caller some time after a full rcu-sched
2769  * grace period has elapsed, in other words after all currently executing
2770  * rcu-sched read-side critical sections have completed.   These read-side
2771  * critical sections are delimited by rcu_read_lock_sched() and
2772  * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
2773  * local_irq_disable(), and so on may be used in place of
2774  * rcu_read_lock_sched().
2775  *
2776  * This means that all preempt_disable code sequences, including NMI and
2777  * non-threaded hardware-interrupt handlers, in progress on entry will
2778  * have completed before this primitive returns.  However, this does not
2779  * guarantee that softirq handlers will have completed, since in some
2780  * kernels, these handlers can run in process context, and can block.
2781  *
2782  * Note that this guarantee implies further memory-ordering guarantees.
2783  * On systems with more than one CPU, when synchronize_sched() returns,
2784  * each CPU is guaranteed to have executed a full memory barrier since the
2785  * end of its last RCU-sched read-side critical section whose beginning
2786  * preceded the call to synchronize_sched().  In addition, each CPU having
2787  * an RCU read-side critical section that extends beyond the return from
2788  * synchronize_sched() is guaranteed to have executed a full memory barrier
2789  * after the beginning of synchronize_sched() and before the beginning of
2790  * that RCU read-side critical section.  Note that these guarantees include
2791  * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2792  * that are executing in the kernel.
2793  *
2794  * Furthermore, if CPU A invoked synchronize_sched(), which returned
2795  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2796  * to have executed a full memory barrier during the execution of
2797  * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2798  * again only if the system has more than one CPU).
2799  *
2800  * This primitive provides the guarantees made by the (now removed)
2801  * synchronize_kernel() API.  In contrast, synchronize_rcu() only
2802  * guarantees that rcu_read_lock() sections will have completed.
2803  * In "classic RCU", these two guarantees happen to be one and
2804  * the same, but can differ in realtime RCU implementations.
2805  */
2806 void synchronize_sched(void)
2807 {
2808 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2809 			   !lock_is_held(&rcu_lock_map) &&
2810 			   !lock_is_held(&rcu_sched_lock_map),
2811 			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
2812 	if (rcu_blocking_is_gp())
2813 		return;
2814 	if (rcu_expedited)
2815 		synchronize_sched_expedited();
2816 	else
2817 		wait_rcu_gp(call_rcu_sched);
2818 }
2819 EXPORT_SYMBOL_GPL(synchronize_sched);
2820 
2821 /**
2822  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
2823  *
2824  * Control will return to the caller some time after a full rcu_bh grace
2825  * period has elapsed, in other words after all currently executing rcu_bh
2826  * read-side critical sections have completed.  RCU read-side critical
2827  * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2828  * and may be nested.
2829  *
2830  * See the description of synchronize_sched() for more detailed information
2831  * on memory ordering guarantees.
2832  */
2833 void synchronize_rcu_bh(void)
2834 {
2835 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2836 			   !lock_is_held(&rcu_lock_map) &&
2837 			   !lock_is_held(&rcu_sched_lock_map),
2838 			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2839 	if (rcu_blocking_is_gp())
2840 		return;
2841 	if (rcu_expedited)
2842 		synchronize_rcu_bh_expedited();
2843 	else
2844 		wait_rcu_gp(call_rcu_bh);
2845 }
2846 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2847 
2848 /**
2849  * get_state_synchronize_rcu - Snapshot current RCU state
2850  *
2851  * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2852  * to determine whether or not a full grace period has elapsed in the
2853  * meantime.
2854  */
2855 unsigned long get_state_synchronize_rcu(void)
2856 {
2857 	/*
2858 	 * Any prior manipulation of RCU-protected data must happen
2859 	 * before the load from ->gpnum.
2860 	 */
2861 	smp_mb();  /* ^^^ */
2862 
2863 	/*
2864 	 * Make sure this load happens before the purportedly
2865 	 * time-consuming work between get_state_synchronize_rcu()
2866 	 * and cond_synchronize_rcu().
2867 	 */
2868 	return smp_load_acquire(&rcu_state_p->gpnum);
2869 }
2870 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2871 
2872 /**
2873  * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2874  *
2875  * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2876  *
2877  * If a full RCU grace period has elapsed since the earlier call to
2878  * get_state_synchronize_rcu(), just return.  Otherwise, invoke
2879  * synchronize_rcu() to wait for a full grace period.
2880  *
2881  * Yes, this function does not take counter wrap into account.  But
2882  * counter wrap is harmless.  If the counter wraps, we have waited for
2883  * more than 2 billion grace periods (and way more on a 64-bit system!),
2884  * so waiting for one additional grace period should be just fine.
2885  */
2886 void cond_synchronize_rcu(unsigned long oldstate)
2887 {
2888 	unsigned long newstate;
2889 
2890 	/*
2891 	 * Ensure that this load happens before any RCU-destructive
2892 	 * actions the caller might carry out after we return.
2893 	 */
2894 	newstate = smp_load_acquire(&rcu_state_p->completed);
2895 	if (ULONG_CMP_GE(oldstate, newstate))
2896 		synchronize_rcu();
2897 }
2898 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2899 
2900 static int synchronize_sched_expedited_cpu_stop(void *data)
2901 {
2902 	/*
2903 	 * There must be a full memory barrier on each affected CPU
2904 	 * between the time that try_stop_cpus() is called and the
2905 	 * time that it returns.
2906 	 *
2907 	 * In the current initial implementation of cpu_stop, the
2908 	 * above condition is already met when the control reaches
2909 	 * this point and the following smp_mb() is not strictly
2910 	 * necessary.  Do smp_mb() anyway for documentation and
2911 	 * robustness against future implementation changes.
2912 	 */
2913 	smp_mb(); /* See above comment block. */
2914 	return 0;
2915 }
2916 
2917 /**
2918  * synchronize_sched_expedited - Brute-force RCU-sched grace period
2919  *
2920  * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
2921  * approach to force the grace period to end quickly.  This consumes
2922  * significant time on all CPUs and is unfriendly to real-time workloads,
2923  * so is thus not recommended for any sort of common-case code.  In fact,
2924  * if you are using synchronize_sched_expedited() in a loop, please
2925  * restructure your code to batch your updates, and then use a single
2926  * synchronize_sched() instead.
2927  *
2928  * Note that it is illegal to call this function while holding any lock
2929  * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
2930  * to call this function from a CPU-hotplug notifier.  Failing to observe
2931  * these restriction will result in deadlock.
2932  *
2933  * This implementation can be thought of as an application of ticket
2934  * locking to RCU, with sync_sched_expedited_started and
2935  * sync_sched_expedited_done taking on the roles of the halves
2936  * of the ticket-lock word.  Each task atomically increments
2937  * sync_sched_expedited_started upon entry, snapshotting the old value,
2938  * then attempts to stop all the CPUs.  If this succeeds, then each
2939  * CPU will have executed a context switch, resulting in an RCU-sched
2940  * grace period.  We are then done, so we use atomic_cmpxchg() to
2941  * update sync_sched_expedited_done to match our snapshot -- but
2942  * only if someone else has not already advanced past our snapshot.
2943  *
2944  * On the other hand, if try_stop_cpus() fails, we check the value
2945  * of sync_sched_expedited_done.  If it has advanced past our
2946  * initial snapshot, then someone else must have forced a grace period
2947  * some time after we took our snapshot.  In this case, our work is
2948  * done for us, and we can simply return.  Otherwise, we try again,
2949  * but keep our initial snapshot for purposes of checking for someone
2950  * doing our work for us.
2951  *
2952  * If we fail too many times in a row, we fall back to synchronize_sched().
2953  */
2954 void synchronize_sched_expedited(void)
2955 {
2956 	long firstsnap, s, snap;
2957 	int trycount = 0;
2958 	struct rcu_state *rsp = &rcu_sched_state;
2959 
2960 	/*
2961 	 * If we are in danger of counter wrap, just do synchronize_sched().
2962 	 * By allowing sync_sched_expedited_started to advance no more than
2963 	 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2964 	 * that more than 3.5 billion CPUs would be required to force a
2965 	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
2966 	 * course be required on a 64-bit system.
2967 	 */
2968 	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2969 			 (ulong)atomic_long_read(&rsp->expedited_done) +
2970 			 ULONG_MAX / 8)) {
2971 		synchronize_sched();
2972 		atomic_long_inc(&rsp->expedited_wrap);
2973 		return;
2974 	}
2975 
2976 	/*
2977 	 * Take a ticket.  Note that atomic_inc_return() implies a
2978 	 * full memory barrier.
2979 	 */
2980 	snap = atomic_long_inc_return(&rsp->expedited_start);
2981 	firstsnap = snap;
2982 	get_online_cpus();
2983 	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2984 
2985 	/*
2986 	 * Each pass through the following loop attempts to force a
2987 	 * context switch on each CPU.
2988 	 */
2989 	while (try_stop_cpus(cpu_online_mask,
2990 			     synchronize_sched_expedited_cpu_stop,
2991 			     NULL) == -EAGAIN) {
2992 		put_online_cpus();
2993 		atomic_long_inc(&rsp->expedited_tryfail);
2994 
2995 		/* Check to see if someone else did our work for us. */
2996 		s = atomic_long_read(&rsp->expedited_done);
2997 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2998 			/* ensure test happens before caller kfree */
2999 			smp_mb__before_atomic(); /* ^^^ */
3000 			atomic_long_inc(&rsp->expedited_workdone1);
3001 			return;
3002 		}
3003 
3004 		/* No joy, try again later.  Or just synchronize_sched(). */
3005 		if (trycount++ < 10) {
3006 			udelay(trycount * num_online_cpus());
3007 		} else {
3008 			wait_rcu_gp(call_rcu_sched);
3009 			atomic_long_inc(&rsp->expedited_normal);
3010 			return;
3011 		}
3012 
3013 		/* Recheck to see if someone else did our work for us. */
3014 		s = atomic_long_read(&rsp->expedited_done);
3015 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3016 			/* ensure test happens before caller kfree */
3017 			smp_mb__before_atomic(); /* ^^^ */
3018 			atomic_long_inc(&rsp->expedited_workdone2);
3019 			return;
3020 		}
3021 
3022 		/*
3023 		 * Refetching sync_sched_expedited_started allows later
3024 		 * callers to piggyback on our grace period.  We retry
3025 		 * after they started, so our grace period works for them,
3026 		 * and they started after our first try, so their grace
3027 		 * period works for us.
3028 		 */
3029 		get_online_cpus();
3030 		snap = atomic_long_read(&rsp->expedited_start);
3031 		smp_mb(); /* ensure read is before try_stop_cpus(). */
3032 	}
3033 	atomic_long_inc(&rsp->expedited_stoppedcpus);
3034 
3035 	/*
3036 	 * Everyone up to our most recent fetch is covered by our grace
3037 	 * period.  Update the counter, but only if our work is still
3038 	 * relevant -- which it won't be if someone who started later
3039 	 * than we did already did their update.
3040 	 */
3041 	do {
3042 		atomic_long_inc(&rsp->expedited_done_tries);
3043 		s = atomic_long_read(&rsp->expedited_done);
3044 		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
3045 			/* ensure test happens before caller kfree */
3046 			smp_mb__before_atomic(); /* ^^^ */
3047 			atomic_long_inc(&rsp->expedited_done_lost);
3048 			break;
3049 		}
3050 	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
3051 	atomic_long_inc(&rsp->expedited_done_exit);
3052 
3053 	put_online_cpus();
3054 }
3055 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3056 
3057 /*
3058  * Check to see if there is any immediate RCU-related work to be done
3059  * by the current CPU, for the specified type of RCU, returning 1 if so.
3060  * The checks are in order of increasing expense: checks that can be
3061  * carried out against CPU-local state are performed first.  However,
3062  * we must check for CPU stalls first, else we might not get a chance.
3063  */
3064 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3065 {
3066 	struct rcu_node *rnp = rdp->mynode;
3067 
3068 	rdp->n_rcu_pending++;
3069 
3070 	/* Check for CPU stalls, if enabled. */
3071 	check_cpu_stall(rsp, rdp);
3072 
3073 	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3074 	if (rcu_nohz_full_cpu(rsp))
3075 		return 0;
3076 
3077 	/* Is the RCU core waiting for a quiescent state from this CPU? */
3078 	if (rcu_scheduler_fully_active &&
3079 	    rdp->qs_pending && !rdp->passed_quiesce) {
3080 		rdp->n_rp_qs_pending++;
3081 	} else if (rdp->qs_pending && rdp->passed_quiesce) {
3082 		rdp->n_rp_report_qs++;
3083 		return 1;
3084 	}
3085 
3086 	/* Does this CPU have callbacks ready to invoke? */
3087 	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
3088 		rdp->n_rp_cb_ready++;
3089 		return 1;
3090 	}
3091 
3092 	/* Has RCU gone idle with this CPU needing another grace period? */
3093 	if (cpu_needs_another_gp(rsp, rdp)) {
3094 		rdp->n_rp_cpu_needs_gp++;
3095 		return 1;
3096 	}
3097 
3098 	/* Has another RCU grace period completed?  */
3099 	if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
3100 		rdp->n_rp_gp_completed++;
3101 		return 1;
3102 	}
3103 
3104 	/* Has a new RCU grace period started? */
3105 	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
3106 		rdp->n_rp_gp_started++;
3107 		return 1;
3108 	}
3109 
3110 	/* Does this CPU need a deferred NOCB wakeup? */
3111 	if (rcu_nocb_need_deferred_wakeup(rdp)) {
3112 		rdp->n_rp_nocb_defer_wakeup++;
3113 		return 1;
3114 	}
3115 
3116 	/* nothing to do */
3117 	rdp->n_rp_need_nothing++;
3118 	return 0;
3119 }
3120 
3121 /*
3122  * Check to see if there is any immediate RCU-related work to be done
3123  * by the current CPU, returning 1 if so.  This function is part of the
3124  * RCU implementation; it is -not- an exported member of the RCU API.
3125  */
3126 static int rcu_pending(int cpu)
3127 {
3128 	struct rcu_state *rsp;
3129 
3130 	for_each_rcu_flavor(rsp)
3131 		if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
3132 			return 1;
3133 	return 0;
3134 }
3135 
3136 /*
3137  * Return true if the specified CPU has any callback.  If all_lazy is
3138  * non-NULL, store an indication of whether all callbacks are lazy.
3139  * (If there are no callbacks, all of them are deemed to be lazy.)
3140  */
3141 static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
3142 {
3143 	bool al = true;
3144 	bool hc = false;
3145 	struct rcu_data *rdp;
3146 	struct rcu_state *rsp;
3147 
3148 	for_each_rcu_flavor(rsp) {
3149 		rdp = per_cpu_ptr(rsp->rda, cpu);
3150 		if (!rdp->nxtlist)
3151 			continue;
3152 		hc = true;
3153 		if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
3154 			al = false;
3155 			break;
3156 		}
3157 	}
3158 	if (all_lazy)
3159 		*all_lazy = al;
3160 	return hc;
3161 }
3162 
3163 /*
3164  * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
3165  * the compiler is expected to optimize this away.
3166  */
3167 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3168 			       int cpu, unsigned long done)
3169 {
3170 	trace_rcu_barrier(rsp->name, s, cpu,
3171 			  atomic_read(&rsp->barrier_cpu_count), done);
3172 }
3173 
3174 /*
3175  * RCU callback function for _rcu_barrier().  If we are last, wake
3176  * up the task executing _rcu_barrier().
3177  */
3178 static void rcu_barrier_callback(struct rcu_head *rhp)
3179 {
3180 	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3181 	struct rcu_state *rsp = rdp->rsp;
3182 
3183 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3184 		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
3185 		complete(&rsp->barrier_completion);
3186 	} else {
3187 		_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
3188 	}
3189 }
3190 
3191 /*
3192  * Called with preemption disabled, and from cross-cpu IRQ context.
3193  */
3194 static void rcu_barrier_func(void *type)
3195 {
3196 	struct rcu_state *rsp = type;
3197 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3198 
3199 	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
3200 	atomic_inc(&rsp->barrier_cpu_count);
3201 	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
3202 }
3203 
3204 /*
3205  * Orchestrate the specified type of RCU barrier, waiting for all
3206  * RCU callbacks of the specified type to complete.
3207  */
3208 static void _rcu_barrier(struct rcu_state *rsp)
3209 {
3210 	int cpu;
3211 	struct rcu_data *rdp;
3212 	unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
3213 	unsigned long snap_done;
3214 
3215 	_rcu_barrier_trace(rsp, "Begin", -1, snap);
3216 
3217 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
3218 	mutex_lock(&rsp->barrier_mutex);
3219 
3220 	/*
3221 	 * Ensure that all prior references, including to ->n_barrier_done,
3222 	 * are ordered before the _rcu_barrier() machinery.
3223 	 */
3224 	smp_mb();  /* See above block comment. */
3225 
3226 	/*
3227 	 * Recheck ->n_barrier_done to see if others did our work for us.
3228 	 * This means checking ->n_barrier_done for an even-to-odd-to-even
3229 	 * transition.  The "if" expression below therefore rounds the old
3230 	 * value up to the next even number and adds two before comparing.
3231 	 */
3232 	snap_done = rsp->n_barrier_done;
3233 	_rcu_barrier_trace(rsp, "Check", -1, snap_done);
3234 
3235 	/*
3236 	 * If the value in snap is odd, we needed to wait for the current
3237 	 * rcu_barrier() to complete, then wait for the next one, in other
3238 	 * words, we need the value of snap_done to be three larger than
3239 	 * the value of snap.  On the other hand, if the value in snap is
3240 	 * even, we only had to wait for the next rcu_barrier() to complete,
3241 	 * in other words, we need the value of snap_done to be only two
3242 	 * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
3243 	 * this for us (thank you, Linus!).
3244 	 */
3245 	if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3246 		_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3247 		smp_mb(); /* caller's subsequent code after above check. */
3248 		mutex_unlock(&rsp->barrier_mutex);
3249 		return;
3250 	}
3251 
3252 	/*
3253 	 * Increment ->n_barrier_done to avoid duplicate work.  Use
3254 	 * ACCESS_ONCE() to prevent the compiler from speculating
3255 	 * the increment to precede the early-exit check.
3256 	 */
3257 	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3258 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3259 	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3260 	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
3261 
3262 	/*
3263 	 * Initialize the count to one rather than to zero in order to
3264 	 * avoid a too-soon return to zero in case of a short grace period
3265 	 * (or preemption of this task).  Exclude CPU-hotplug operations
3266 	 * to ensure that no offline CPU has callbacks queued.
3267 	 */
3268 	init_completion(&rsp->barrier_completion);
3269 	atomic_set(&rsp->barrier_cpu_count, 1);
3270 	get_online_cpus();
3271 
3272 	/*
3273 	 * Force each CPU with callbacks to register a new callback.
3274 	 * When that callback is invoked, we will know that all of the
3275 	 * corresponding CPU's preceding callbacks have been invoked.
3276 	 */
3277 	for_each_possible_cpu(cpu) {
3278 		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3279 			continue;
3280 		rdp = per_cpu_ptr(rsp->rda, cpu);
3281 		if (rcu_is_nocb_cpu(cpu)) {
3282 			_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3283 					   rsp->n_barrier_done);
3284 			atomic_inc(&rsp->barrier_cpu_count);
3285 			__call_rcu(&rdp->barrier_head, rcu_barrier_callback,
3286 				   rsp, cpu, 0);
3287 		} else if (ACCESS_ONCE(rdp->qlen)) {
3288 			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
3289 					   rsp->n_barrier_done);
3290 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3291 		} else {
3292 			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3293 					   rsp->n_barrier_done);
3294 		}
3295 	}
3296 	put_online_cpus();
3297 
3298 	/*
3299 	 * Now that we have an rcu_barrier_callback() callback on each
3300 	 * CPU, and thus each counted, remove the initial count.
3301 	 */
3302 	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3303 		complete(&rsp->barrier_completion);
3304 
3305 	/* Increment ->n_barrier_done to prevent duplicate work. */
3306 	smp_mb(); /* Keep increment after above mechanism. */
3307 	ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
3308 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3309 	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3310 	smp_mb(); /* Keep increment before caller's subsequent code. */
3311 
3312 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3313 	wait_for_completion(&rsp->barrier_completion);
3314 
3315 	/* Other rcu_barrier() invocations can now safely proceed. */
3316 	mutex_unlock(&rsp->barrier_mutex);
3317 }
3318 
3319 /**
3320  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3321  */
3322 void rcu_barrier_bh(void)
3323 {
3324 	_rcu_barrier(&rcu_bh_state);
3325 }
3326 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3327 
3328 /**
3329  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3330  */
3331 void rcu_barrier_sched(void)
3332 {
3333 	_rcu_barrier(&rcu_sched_state);
3334 }
3335 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3336 
3337 /*
3338  * Do boot-time initialization of a CPU's per-CPU RCU data.
3339  */
3340 static void __init
3341 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3342 {
3343 	unsigned long flags;
3344 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3345 	struct rcu_node *rnp = rcu_get_root(rsp);
3346 
3347 	/* Set up local state, ensuring consistent view of global state. */
3348 	raw_spin_lock_irqsave(&rnp->lock, flags);
3349 	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3350 	init_callback_list(rdp);
3351 	rdp->qlen_lazy = 0;
3352 	ACCESS_ONCE(rdp->qlen) = 0;
3353 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3354 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3355 	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
3356 	rdp->cpu = cpu;
3357 	rdp->rsp = rsp;
3358 	rcu_boot_init_nocb_percpu_data(rdp);
3359 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
3360 }
3361 
3362 /*
3363  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
3364  * offline event can be happening at a given time.  Note also that we
3365  * can accept some slop in the rsp->completed access due to the fact
3366  * that this CPU cannot possibly have any RCU callbacks in flight yet.
3367  */
3368 static void
3369 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3370 {
3371 	unsigned long flags;
3372 	unsigned long mask;
3373 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3374 	struct rcu_node *rnp = rcu_get_root(rsp);
3375 
3376 	/* Exclude new grace periods. */
3377 	mutex_lock(&rsp->onoff_mutex);
3378 
3379 	/* Set up local state, ensuring consistent view of global state. */
3380 	raw_spin_lock_irqsave(&rnp->lock, flags);
3381 	rdp->beenonline = 1;	 /* We have now been online. */
3382 	rdp->qlen_last_fqs_check = 0;
3383 	rdp->n_force_qs_snap = rsp->n_force_qs;
3384 	rdp->blimit = blimit;
3385 	init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
3386 	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3387 	rcu_sysidle_init_percpu_data(rdp->dynticks);
3388 	atomic_set(&rdp->dynticks->dynticks,
3389 		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3390 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
3391 
3392 	/* Add CPU to rcu_node bitmasks. */
3393 	rnp = rdp->mynode;
3394 	mask = rdp->grpmask;
3395 	do {
3396 		/* Exclude any attempts to start a new GP on small systems. */
3397 		raw_spin_lock(&rnp->lock);	/* irqs already disabled. */
3398 		rnp->qsmaskinit |= mask;
3399 		mask = rnp->grpmask;
3400 		if (rnp == rdp->mynode) {
3401 			/*
3402 			 * If there is a grace period in progress, we will
3403 			 * set up to wait for it next time we run the
3404 			 * RCU core code.
3405 			 */
3406 			rdp->gpnum = rnp->completed;
3407 			rdp->completed = rnp->completed;
3408 			rdp->passed_quiesce = 0;
3409 			rdp->qs_pending = 0;
3410 			trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3411 		}
3412 		raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
3413 		rnp = rnp->parent;
3414 	} while (rnp != NULL && !(rnp->qsmaskinit & mask));
3415 	local_irq_restore(flags);
3416 
3417 	mutex_unlock(&rsp->onoff_mutex);
3418 }
3419 
3420 static void rcu_prepare_cpu(int cpu)
3421 {
3422 	struct rcu_state *rsp;
3423 
3424 	for_each_rcu_flavor(rsp)
3425 		rcu_init_percpu_data(cpu, rsp);
3426 }
3427 
3428 /*
3429  * Handle CPU online/offline notification events.
3430  */
3431 static int rcu_cpu_notify(struct notifier_block *self,
3432 				    unsigned long action, void *hcpu)
3433 {
3434 	long cpu = (long)hcpu;
3435 	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3436 	struct rcu_node *rnp = rdp->mynode;
3437 	struct rcu_state *rsp;
3438 
3439 	trace_rcu_utilization(TPS("Start CPU hotplug"));
3440 	switch (action) {
3441 	case CPU_UP_PREPARE:
3442 	case CPU_UP_PREPARE_FROZEN:
3443 		rcu_prepare_cpu(cpu);
3444 		rcu_prepare_kthreads(cpu);
3445 		break;
3446 	case CPU_ONLINE:
3447 	case CPU_DOWN_FAILED:
3448 		rcu_boost_kthread_setaffinity(rnp, -1);
3449 		break;
3450 	case CPU_DOWN_PREPARE:
3451 		rcu_boost_kthread_setaffinity(rnp, cpu);
3452 		break;
3453 	case CPU_DYING:
3454 	case CPU_DYING_FROZEN:
3455 		for_each_rcu_flavor(rsp)
3456 			rcu_cleanup_dying_cpu(rsp);
3457 		break;
3458 	case CPU_DEAD:
3459 	case CPU_DEAD_FROZEN:
3460 	case CPU_UP_CANCELED:
3461 	case CPU_UP_CANCELED_FROZEN:
3462 		for_each_rcu_flavor(rsp)
3463 			rcu_cleanup_dead_cpu(cpu, rsp);
3464 		break;
3465 	default:
3466 		break;
3467 	}
3468 	trace_rcu_utilization(TPS("End CPU hotplug"));
3469 	return NOTIFY_OK;
3470 }
3471 
3472 static int rcu_pm_notify(struct notifier_block *self,
3473 			 unsigned long action, void *hcpu)
3474 {
3475 	switch (action) {
3476 	case PM_HIBERNATION_PREPARE:
3477 	case PM_SUSPEND_PREPARE:
3478 		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3479 			rcu_expedited = 1;
3480 		break;
3481 	case PM_POST_HIBERNATION:
3482 	case PM_POST_SUSPEND:
3483 		rcu_expedited = 0;
3484 		break;
3485 	default:
3486 		break;
3487 	}
3488 	return NOTIFY_OK;
3489 }
3490 
3491 /*
3492  * Spawn the kthread that handles this RCU flavor's grace periods.
3493  */
3494 static int __init rcu_spawn_gp_kthread(void)
3495 {
3496 	unsigned long flags;
3497 	struct rcu_node *rnp;
3498 	struct rcu_state *rsp;
3499 	struct task_struct *t;
3500 
3501 	for_each_rcu_flavor(rsp) {
3502 		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3503 		BUG_ON(IS_ERR(t));
3504 		rnp = rcu_get_root(rsp);
3505 		raw_spin_lock_irqsave(&rnp->lock, flags);
3506 		rsp->gp_kthread = t;
3507 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
3508 		rcu_spawn_nocb_kthreads(rsp);
3509 	}
3510 	return 0;
3511 }
3512 early_initcall(rcu_spawn_gp_kthread);
3513 
3514 /*
3515  * This function is invoked towards the end of the scheduler's initialization
3516  * process.  Before this is called, the idle task might contain
3517  * RCU read-side critical sections (during which time, this idle
3518  * task is booting the system).  After this function is called, the
3519  * idle tasks are prohibited from containing RCU read-side critical
3520  * sections.  This function also enables RCU lockdep checking.
3521  */
3522 void rcu_scheduler_starting(void)
3523 {
3524 	WARN_ON(num_online_cpus() != 1);
3525 	WARN_ON(nr_context_switches() > 0);
3526 	rcu_scheduler_active = 1;
3527 }
3528 
3529 /*
3530  * Compute the per-level fanout, either using the exact fanout specified
3531  * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3532  */
3533 #ifdef CONFIG_RCU_FANOUT_EXACT
3534 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3535 {
3536 	int i;
3537 
3538 	rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3539 	for (i = rcu_num_lvls - 2; i >= 0; i--)
3540 		rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3541 }
3542 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3543 static void __init rcu_init_levelspread(struct rcu_state *rsp)
3544 {
3545 	int ccur;
3546 	int cprv;
3547 	int i;
3548 
3549 	cprv = nr_cpu_ids;
3550 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3551 		ccur = rsp->levelcnt[i];
3552 		rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3553 		cprv = ccur;
3554 	}
3555 }
3556 #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3557 
3558 /*
3559  * Helper function for rcu_init() that initializes one rcu_state structure.
3560  */
3561 static void __init rcu_init_one(struct rcu_state *rsp,
3562 		struct rcu_data __percpu *rda)
3563 {
3564 	static const char * const buf[] = {
3565 		"rcu_node_0",
3566 		"rcu_node_1",
3567 		"rcu_node_2",
3568 		"rcu_node_3" };  /* Match MAX_RCU_LVLS */
3569 	static const char * const fqs[] = {
3570 		"rcu_node_fqs_0",
3571 		"rcu_node_fqs_1",
3572 		"rcu_node_fqs_2",
3573 		"rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
3574 	static u8 fl_mask = 0x1;
3575 	int cpustride = 1;
3576 	int i;
3577 	int j;
3578 	struct rcu_node *rnp;
3579 
3580 	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3581 
3582 	/* Silence gcc 4.8 warning about array index out of range. */
3583 	if (rcu_num_lvls > RCU_NUM_LVLS)
3584 		panic("rcu_init_one: rcu_num_lvls overflow");
3585 
3586 	/* Initialize the level-tracking arrays. */
3587 
3588 	for (i = 0; i < rcu_num_lvls; i++)
3589 		rsp->levelcnt[i] = num_rcu_lvl[i];
3590 	for (i = 1; i < rcu_num_lvls; i++)
3591 		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3592 	rcu_init_levelspread(rsp);
3593 	rsp->flavor_mask = fl_mask;
3594 	fl_mask <<= 1;
3595 
3596 	/* Initialize the elements themselves, starting from the leaves. */
3597 
3598 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
3599 		cpustride *= rsp->levelspread[i];
3600 		rnp = rsp->level[i];
3601 		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3602 			raw_spin_lock_init(&rnp->lock);
3603 			lockdep_set_class_and_name(&rnp->lock,
3604 						   &rcu_node_class[i], buf[i]);
3605 			raw_spin_lock_init(&rnp->fqslock);
3606 			lockdep_set_class_and_name(&rnp->fqslock,
3607 						   &rcu_fqs_class[i], fqs[i]);
3608 			rnp->gpnum = rsp->gpnum;
3609 			rnp->completed = rsp->completed;
3610 			rnp->qsmask = 0;
3611 			rnp->qsmaskinit = 0;
3612 			rnp->grplo = j * cpustride;
3613 			rnp->grphi = (j + 1) * cpustride - 1;
3614 			if (rnp->grphi >= nr_cpu_ids)
3615 				rnp->grphi = nr_cpu_ids - 1;
3616 			if (i == 0) {
3617 				rnp->grpnum = 0;
3618 				rnp->grpmask = 0;
3619 				rnp->parent = NULL;
3620 			} else {
3621 				rnp->grpnum = j % rsp->levelspread[i - 1];
3622 				rnp->grpmask = 1UL << rnp->grpnum;
3623 				rnp->parent = rsp->level[i - 1] +
3624 					      j / rsp->levelspread[i - 1];
3625 			}
3626 			rnp->level = i;
3627 			INIT_LIST_HEAD(&rnp->blkd_tasks);
3628 			rcu_init_one_nocb(rnp);
3629 		}
3630 	}
3631 
3632 	rsp->rda = rda;
3633 	init_waitqueue_head(&rsp->gp_wq);
3634 	rnp = rsp->level[rcu_num_lvls - 1];
3635 	for_each_possible_cpu(i) {
3636 		while (i > rnp->grphi)
3637 			rnp++;
3638 		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3639 		rcu_boot_init_percpu_data(i, rsp);
3640 	}
3641 	list_add(&rsp->flavors, &rcu_struct_flavors);
3642 }
3643 
3644 /*
3645  * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3646  * replace the definitions in tree.h because those are needed to size
3647  * the ->node array in the rcu_state structure.
3648  */
3649 static void __init rcu_init_geometry(void)
3650 {
3651 	ulong d;
3652 	int i;
3653 	int j;
3654 	int n = nr_cpu_ids;
3655 	int rcu_capacity[MAX_RCU_LVLS + 1];
3656 
3657 	/*
3658 	 * Initialize any unspecified boot parameters.
3659 	 * The default values of jiffies_till_first_fqs and
3660 	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3661 	 * value, which is a function of HZ, then adding one for each
3662 	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3663 	 */
3664 	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3665 	if (jiffies_till_first_fqs == ULONG_MAX)
3666 		jiffies_till_first_fqs = d;
3667 	if (jiffies_till_next_fqs == ULONG_MAX)
3668 		jiffies_till_next_fqs = d;
3669 
3670 	/* If the compile-time values are accurate, just leave. */
3671 	if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3672 	    nr_cpu_ids == NR_CPUS)
3673 		return;
3674 	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
3675 		rcu_fanout_leaf, nr_cpu_ids);
3676 
3677 	/*
3678 	 * Compute number of nodes that can be handled an rcu_node tree
3679 	 * with the given number of levels.  Setting rcu_capacity[0] makes
3680 	 * some of the arithmetic easier.
3681 	 */
3682 	rcu_capacity[0] = 1;
3683 	rcu_capacity[1] = rcu_fanout_leaf;
3684 	for (i = 2; i <= MAX_RCU_LVLS; i++)
3685 		rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
3686 
3687 	/*
3688 	 * The boot-time rcu_fanout_leaf parameter is only permitted
3689 	 * to increase the leaf-level fanout, not decrease it.  Of course,
3690 	 * the leaf-level fanout cannot exceed the number of bits in
3691 	 * the rcu_node masks.  Finally, the tree must be able to accommodate
3692 	 * the configured number of CPUs.  Complain and fall back to the
3693 	 * compile-time values if these limits are exceeded.
3694 	 */
3695 	if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
3696 	    rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
3697 	    n > rcu_capacity[MAX_RCU_LVLS]) {
3698 		WARN_ON(1);
3699 		return;
3700 	}
3701 
3702 	/* Calculate the number of rcu_nodes at each level of the tree. */
3703 	for (i = 1; i <= MAX_RCU_LVLS; i++)
3704 		if (n <= rcu_capacity[i]) {
3705 			for (j = 0; j <= i; j++)
3706 				num_rcu_lvl[j] =
3707 					DIV_ROUND_UP(n, rcu_capacity[i - j]);
3708 			rcu_num_lvls = i;
3709 			for (j = i + 1; j <= MAX_RCU_LVLS; j++)
3710 				num_rcu_lvl[j] = 0;
3711 			break;
3712 		}
3713 
3714 	/* Calculate the total number of rcu_node structures. */
3715 	rcu_num_nodes = 0;
3716 	for (i = 0; i <= MAX_RCU_LVLS; i++)
3717 		rcu_num_nodes += num_rcu_lvl[i];
3718 	rcu_num_nodes -= n;
3719 }
3720 
3721 void __init rcu_init(void)
3722 {
3723 	int cpu;
3724 
3725 	rcu_bootup_announce();
3726 	rcu_init_geometry();
3727 	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3728 	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3729 	__rcu_init_preempt();
3730 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3731 
3732 	/*
3733 	 * We don't need protection against CPU-hotplug here because
3734 	 * this is called early in boot, before either interrupts
3735 	 * or the scheduler are operational.
3736 	 */
3737 	cpu_notifier(rcu_cpu_notify, 0);
3738 	pm_notifier(rcu_pm_notify, 0);
3739 	for_each_online_cpu(cpu)
3740 		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3741 }
3742 
3743 #include "tree_plugin.h"
3744