xref: /openbmc/linux/kernel/rcu/tree.h (revision 6dfcd296)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, you can access it online at
17  * http://www.gnu.org/licenses/gpl-2.0.html.
18  *
19  * Copyright IBM Corporation, 2008
20  *
21  * Author: Ingo Molnar <mingo@elte.hu>
22  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23  */
24 
25 #include <linux/cache.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/cpumask.h>
29 #include <linux/seqlock.h>
30 #include <linux/swait.h>
31 #include <linux/stop_machine.h>
32 
33 /*
34  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
35  * CONFIG_RCU_FANOUT_LEAF.
36  * In theory, it should be possible to add more levels straightforwardly.
37  * In practice, this did work well going from three levels to four.
38  * Of course, your mileage may vary.
39  */
40 
41 #ifdef CONFIG_RCU_FANOUT
42 #define RCU_FANOUT CONFIG_RCU_FANOUT
43 #else /* #ifdef CONFIG_RCU_FANOUT */
44 # ifdef CONFIG_64BIT
45 # define RCU_FANOUT 64
46 # else
47 # define RCU_FANOUT 32
48 # endif
49 #endif /* #else #ifdef CONFIG_RCU_FANOUT */
50 
51 #ifdef CONFIG_RCU_FANOUT_LEAF
52 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
53 #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
54 # ifdef CONFIG_64BIT
55 # define RCU_FANOUT_LEAF 64
56 # else
57 # define RCU_FANOUT_LEAF 32
58 # endif
59 #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
60 
61 #define RCU_FANOUT_1	      (RCU_FANOUT_LEAF)
62 #define RCU_FANOUT_2	      (RCU_FANOUT_1 * RCU_FANOUT)
63 #define RCU_FANOUT_3	      (RCU_FANOUT_2 * RCU_FANOUT)
64 #define RCU_FANOUT_4	      (RCU_FANOUT_3 * RCU_FANOUT)
65 
66 #if NR_CPUS <= RCU_FANOUT_1
67 #  define RCU_NUM_LVLS	      1
68 #  define NUM_RCU_LVL_0	      1
69 #  define NUM_RCU_NODES	      NUM_RCU_LVL_0
70 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
71 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
72 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
73 #elif NR_CPUS <= RCU_FANOUT_2
74 #  define RCU_NUM_LVLS	      2
75 #  define NUM_RCU_LVL_0	      1
76 #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
77 #  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
78 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
79 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
80 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
81 #elif NR_CPUS <= RCU_FANOUT_3
82 #  define RCU_NUM_LVLS	      3
83 #  define NUM_RCU_LVL_0	      1
84 #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
85 #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
86 #  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
87 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
88 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
89 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
90 #elif NR_CPUS <= RCU_FANOUT_4
91 #  define RCU_NUM_LVLS	      4
92 #  define NUM_RCU_LVL_0	      1
93 #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
94 #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
95 #  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
96 #  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
97 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
98 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
99 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
100 #else
101 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
102 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
103 
104 extern int rcu_num_lvls;
105 extern int rcu_num_nodes;
106 
107 /*
108  * Dynticks per-CPU state.
109  */
110 struct rcu_dynticks {
111 	long long dynticks_nesting; /* Track irq/process nesting level. */
112 				    /* Process level is worth LLONG_MAX/2. */
113 	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
114 	atomic_t dynticks;	    /* Even value for idle, else odd. */
115 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
116 	long long dynticks_idle_nesting;
117 				    /* irq/process nesting level from idle. */
118 	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
119 				    /*  "Idle" excludes userspace execution. */
120 	unsigned long dynticks_idle_jiffies;
121 				    /* End of last non-NMI non-idle period. */
122 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
123 #ifdef CONFIG_RCU_FAST_NO_HZ
124 	bool all_lazy;		    /* Are all CPU's CBs lazy? */
125 	unsigned long nonlazy_posted;
126 				    /* # times non-lazy CBs posted to CPU. */
127 	unsigned long nonlazy_posted_snap;
128 				    /* idle-period nonlazy_posted snapshot. */
129 	unsigned long last_accelerate;
130 				    /* Last jiffy CBs were accelerated. */
131 	unsigned long last_advance_all;
132 				    /* Last jiffy CBs were all advanced. */
133 	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
134 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
135 };
136 
137 /* RCU's kthread states for tracing. */
138 #define RCU_KTHREAD_STOPPED  0
139 #define RCU_KTHREAD_RUNNING  1
140 #define RCU_KTHREAD_WAITING  2
141 #define RCU_KTHREAD_OFFCPU   3
142 #define RCU_KTHREAD_YIELDING 4
143 #define RCU_KTHREAD_MAX      4
144 
145 /*
146  * Definition for node within the RCU grace-period-detection hierarchy.
147  */
148 struct rcu_node {
149 	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
150 					/*  some rcu_state fields as well as */
151 					/*  following. */
152 	unsigned long gpnum;	/* Current grace period for this node. */
153 				/*  This will either be equal to or one */
154 				/*  behind the root rcu_node's gpnum. */
155 	unsigned long completed; /* Last GP completed for this node. */
156 				/*  This will either be equal to or one */
157 				/*  behind the root rcu_node's gpnum. */
158 	unsigned long qsmask;	/* CPUs or groups that need to switch in */
159 				/*  order for current grace period to proceed.*/
160 				/*  In leaf rcu_node, each bit corresponds to */
161 				/*  an rcu_data structure, otherwise, each */
162 				/*  bit corresponds to a child rcu_node */
163 				/*  structure. */
164 	unsigned long qsmaskinit;
165 				/* Per-GP initial value for qsmask. */
166 				/*  Initialized from ->qsmaskinitnext at the */
167 				/*  beginning of each grace period. */
168 	unsigned long qsmaskinitnext;
169 				/* Online CPUs for next grace period. */
170 	unsigned long expmask;	/* CPUs or groups that need to check in */
171 				/*  to allow the current expedited GP */
172 				/*  to complete. */
173 	unsigned long expmaskinit;
174 				/* Per-GP initial values for expmask. */
175 				/*  Initialized from ->expmaskinitnext at the */
176 				/*  beginning of each expedited GP. */
177 	unsigned long expmaskinitnext;
178 				/* Online CPUs for next expedited GP. */
179 				/*  Any CPU that has ever been online will */
180 				/*  have its bit set. */
181 	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
182 				/*  Only one bit will be set in this mask. */
183 	int	grplo;		/* lowest-numbered CPU or group here. */
184 	int	grphi;		/* highest-numbered CPU or group here. */
185 	u8	grpnum;		/* CPU/group number for next level up. */
186 	u8	level;		/* root is at level 0. */
187 	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
188 				/*  exit RCU read-side critical sections */
189 				/*  before propagating offline up the */
190 				/*  rcu_node tree? */
191 	struct rcu_node *parent;
192 	struct list_head blkd_tasks;
193 				/* Tasks blocked in RCU read-side critical */
194 				/*  section.  Tasks are placed at the head */
195 				/*  of this list and age towards the tail. */
196 	struct list_head *gp_tasks;
197 				/* Pointer to the first task blocking the */
198 				/*  current grace period, or NULL if there */
199 				/*  is no such task. */
200 	struct list_head *exp_tasks;
201 				/* Pointer to the first task blocking the */
202 				/*  current expedited grace period, or NULL */
203 				/*  if there is no such task.  If there */
204 				/*  is no current expedited grace period, */
205 				/*  then there can cannot be any such task. */
206 	struct list_head *boost_tasks;
207 				/* Pointer to first task that needs to be */
208 				/*  priority boosted, or NULL if no priority */
209 				/*  boosting is needed for this rcu_node */
210 				/*  structure.  If there are no tasks */
211 				/*  queued on this rcu_node structure that */
212 				/*  are blocking the current grace period, */
213 				/*  there can be no such task. */
214 	struct rt_mutex boost_mtx;
215 				/* Used only for the priority-boosting */
216 				/*  side effect, not as a lock. */
217 	unsigned long boost_time;
218 				/* When to start boosting (jiffies). */
219 	struct task_struct *boost_kthread_task;
220 				/* kthread that takes care of priority */
221 				/*  boosting for this rcu_node structure. */
222 	unsigned int boost_kthread_status;
223 				/* State of boost_kthread_task for tracing. */
224 	unsigned long n_tasks_boosted;
225 				/* Total number of tasks boosted. */
226 	unsigned long n_exp_boosts;
227 				/* Number of tasks boosted for expedited GP. */
228 	unsigned long n_normal_boosts;
229 				/* Number of tasks boosted for normal GP. */
230 	unsigned long n_balk_blkd_tasks;
231 				/* Refused to boost: no blocked tasks. */
232 	unsigned long n_balk_exp_gp_tasks;
233 				/* Refused to boost: nothing blocking GP. */
234 	unsigned long n_balk_boost_tasks;
235 				/* Refused to boost: already boosting. */
236 	unsigned long n_balk_notblocked;
237 				/* Refused to boost: RCU RS CS still running. */
238 	unsigned long n_balk_notyet;
239 				/* Refused to boost: not yet time. */
240 	unsigned long n_balk_nos;
241 				/* Refused to boost: not sure why, though. */
242 				/*  This can happen due to race conditions. */
243 #ifdef CONFIG_RCU_NOCB_CPU
244 	struct swait_queue_head nocb_gp_wq[2];
245 				/* Place for rcu_nocb_kthread() to wait GP. */
246 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
247 	int need_future_gp[2];
248 				/* Counts of upcoming no-CB GP requests. */
249 	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
250 
251 	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
252 	unsigned long exp_seq_rq;
253 	wait_queue_head_t exp_wq[4];
254 } ____cacheline_internodealigned_in_smp;
255 
256 /*
257  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
258  * are indexed relative to this interval rather than the global CPU ID space.
259  * This generates the bit for a CPU in node-local masks.
260  */
261 #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
262 
263 /*
264  * Do a full breadth-first scan of the rcu_node structures for the
265  * specified rcu_state structure.
266  */
267 #define rcu_for_each_node_breadth_first(rsp, rnp) \
268 	for ((rnp) = &(rsp)->node[0]; \
269 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
270 
271 /*
272  * Do a breadth-first scan of the non-leaf rcu_node structures for the
273  * specified rcu_state structure.  Note that if there is a singleton
274  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
275  */
276 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
277 	for ((rnp) = &(rsp)->node[0]; \
278 	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
279 
280 /*
281  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
282  * structure.  Note that if there is a singleton rcu_node tree with but
283  * one rcu_node structure, this loop -will- visit the rcu_node structure.
284  * It is still a leaf node, even if it is also the root node.
285  */
286 #define rcu_for_each_leaf_node(rsp, rnp) \
287 	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
288 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
289 
290 /*
291  * Iterate over all possible CPUs in a leaf RCU node.
292  */
293 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
294 	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
295 	     cpu <= rnp->grphi; \
296 	     cpu = cpumask_next((cpu), cpu_possible_mask))
297 
298 /*
299  * Union to allow "aggregate OR" operation on the need for a quiescent
300  * state by the normal and expedited grace periods.
301  */
302 union rcu_noqs {
303 	struct {
304 		u8 norm;
305 		u8 exp;
306 	} b; /* Bits. */
307 	u16 s; /* Set of bits, aggregate OR here. */
308 };
309 
310 /* Index values for nxttail array in struct rcu_data. */
311 #define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
312 #define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
313 #define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
314 #define RCU_NEXT_TAIL		3
315 #define RCU_NEXT_SIZE		4
316 
317 /* Per-CPU data for read-copy update. */
318 struct rcu_data {
319 	/* 1) quiescent-state and grace-period handling : */
320 	unsigned long	completed;	/* Track rsp->completed gp number */
321 					/*  in order to detect GP end. */
322 	unsigned long	gpnum;		/* Highest gp number that this CPU */
323 					/*  is aware of having started. */
324 	unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
325 					/*  for rcu_all_qs() invocations. */
326 	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
327 	bool		core_needs_qs;	/* Core waits for quiesc state. */
328 	bool		beenonline;	/* CPU online at least once. */
329 	bool		gpwrap;		/* Possible gpnum/completed wrap. */
330 	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
331 	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
332 	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
333 					/*  ticks this CPU has handled */
334 					/*  during and after the last grace */
335 					/* period it is aware of. */
336 
337 	/* 2) batch handling */
338 	/*
339 	 * If nxtlist is not NULL, it is partitioned as follows.
340 	 * Any of the partitions might be empty, in which case the
341 	 * pointer to that partition will be equal to the pointer for
342 	 * the following partition.  When the list is empty, all of
343 	 * the nxttail elements point to the ->nxtlist pointer itself,
344 	 * which in that case is NULL.
345 	 *
346 	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
347 	 *	Entries that batch # <= ->completed
348 	 *	The grace period for these entries has completed, and
349 	 *	the other grace-period-completed entries may be moved
350 	 *	here temporarily in rcu_process_callbacks().
351 	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
352 	 *	Entries that batch # <= ->completed - 1: waiting for current GP
353 	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
354 	 *	Entries known to have arrived before current GP ended
355 	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
356 	 *	Entries that might have arrived after current GP ended
357 	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will
358 	 *	always be NULL, as this is the end of the list.
359 	 */
360 	struct rcu_head *nxtlist;
361 	struct rcu_head **nxttail[RCU_NEXT_SIZE];
362 	unsigned long	nxtcompleted[RCU_NEXT_SIZE];
363 					/* grace periods for sublists. */
364 	long		qlen_lazy;	/* # of lazy queued callbacks */
365 	long		qlen;		/* # of queued callbacks, incl lazy */
366 	long		qlen_last_fqs_check;
367 					/* qlen at last check for QS forcing */
368 	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */
369 	unsigned long	n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
370 	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
371 	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
372 	unsigned long	n_force_qs_snap;
373 					/* did other CPU force QS recently? */
374 	long		blimit;		/* Upper limit on a processed batch */
375 
376 	/* 3) dynticks interface. */
377 	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
378 	int dynticks_snap;		/* Per-GP tracking for dynticks. */
379 
380 	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
381 	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
382 	unsigned long offline_fqs;	/* Kicked due to being offline. */
383 	unsigned long cond_resched_completed;
384 					/* Grace period that needs help */
385 					/*  from cond_resched(). */
386 
387 	/* 5) __rcu_pending() statistics. */
388 	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */
389 	unsigned long n_rp_core_needs_qs;
390 	unsigned long n_rp_report_qs;
391 	unsigned long n_rp_cb_ready;
392 	unsigned long n_rp_cpu_needs_gp;
393 	unsigned long n_rp_gp_completed;
394 	unsigned long n_rp_gp_started;
395 	unsigned long n_rp_nocb_defer_wakeup;
396 	unsigned long n_rp_need_nothing;
397 
398 	/* 6) _rcu_barrier(), OOM callbacks, and expediting. */
399 	struct rcu_head barrier_head;
400 #ifdef CONFIG_RCU_FAST_NO_HZ
401 	struct rcu_head oom_head;
402 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
403 	atomic_long_t exp_workdone0;	/* # done by workqueue. */
404 	atomic_long_t exp_workdone1;	/* # done by others #1. */
405 	atomic_long_t exp_workdone2;	/* # done by others #2. */
406 	atomic_long_t exp_workdone3;	/* # done by others #3. */
407 
408 	/* 7) Callback offloading. */
409 #ifdef CONFIG_RCU_NOCB_CPU
410 	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
411 	struct rcu_head **nocb_tail;
412 	atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */
413 	atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
414 	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
415 	struct rcu_head **nocb_follower_tail;
416 	struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
417 	struct task_struct *nocb_kthread;
418 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
419 
420 	/* The following fields are used by the leader, hence own cacheline. */
421 	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
422 					/* CBs waiting for GP. */
423 	struct rcu_head **nocb_gp_tail;
424 	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
425 	struct rcu_data *nocb_next_follower;
426 					/* Next follower in wakeup chain. */
427 
428 	/* The following fields are used by the follower, hence new cachline. */
429 	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
430 					/* Leader CPU takes GP-end wakeups. */
431 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
432 
433 	/* 8) RCU CPU stall data. */
434 	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
435 
436 	int cpu;
437 	struct rcu_state *rsp;
438 };
439 
440 /* Values for nocb_defer_wakeup field in struct rcu_data. */
441 #define RCU_NOGP_WAKE_NOT	0
442 #define RCU_NOGP_WAKE		1
443 #define RCU_NOGP_WAKE_FORCE	2
444 
445 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
446 					/* For jiffies_till_first_fqs and */
447 					/*  and jiffies_till_next_fqs. */
448 
449 #define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
450 					/*  delay between bouts of */
451 					/*  quiescent-state forcing. */
452 
453 #define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
454 					/*  at least one scheduling clock */
455 					/*  irq before ratting on them. */
456 
457 #define rcu_wait(cond)							\
458 do {									\
459 	for (;;) {							\
460 		set_current_state(TASK_INTERRUPTIBLE);			\
461 		if (cond)						\
462 			break;						\
463 		schedule();						\
464 	}								\
465 	__set_current_state(TASK_RUNNING);				\
466 } while (0)
467 
468 /*
469  * RCU global state, including node hierarchy.  This hierarchy is
470  * represented in "heap" form in a dense array.  The root (first level)
471  * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
472  * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
473  * and the third level in ->node[m+1] and following (->node[m+1] referenced
474  * by ->level[2]).  The number of levels is determined by the number of
475  * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
476  * consisting of a single rcu_node.
477  */
478 struct rcu_state {
479 	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
480 	struct rcu_node *level[RCU_NUM_LVLS + 1];
481 						/* Hierarchy levels (+1 to */
482 						/*  shut bogus gcc warning) */
483 	u8 flavor_mask;				/* bit in flavor mask. */
484 	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
485 	call_rcu_func_t call;			/* call_rcu() flavor. */
486 	int ncpus;				/* # CPUs seen so far. */
487 
488 	/* The following fields are guarded by the root rcu_node's lock. */
489 
490 	u8	boost ____cacheline_internodealigned_in_smp;
491 						/* Subject to priority boost. */
492 	unsigned long gpnum;			/* Current gp number. */
493 	unsigned long completed;		/* # of last completed gp. */
494 	struct task_struct *gp_kthread;		/* Task for grace periods. */
495 	struct swait_queue_head gp_wq;		/* Where GP task waits. */
496 	short gp_flags;				/* Commands for GP task. */
497 	short gp_state;				/* GP kthread sleep state. */
498 
499 	/* End of fields guarded by root rcu_node's lock. */
500 
501 	raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
502 						/* Protect following fields. */
503 	struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */
504 						/*  need a grace period. */
505 	struct rcu_head **orphan_nxttail;	/* Tail of above. */
506 	struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */
507 						/*  are ready to invoke. */
508 	struct rcu_head **orphan_donetail;	/* Tail of above. */
509 	long qlen_lazy;				/* Number of lazy callbacks. */
510 	long qlen;				/* Total number of callbacks. */
511 	/* End of fields guarded by orphan_lock. */
512 
513 	struct mutex barrier_mutex;		/* Guards barrier fields. */
514 	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
515 	struct completion barrier_completion;	/* Wake at barrier end. */
516 	unsigned long barrier_sequence;		/* ++ at start and end of */
517 						/*  _rcu_barrier(). */
518 	/* End of fields guarded by barrier_mutex. */
519 
520 	struct mutex exp_mutex;			/* Serialize expedited GP. */
521 	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
522 	unsigned long expedited_sequence;	/* Take a ticket. */
523 	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
524 	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
525 	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
526 	int ncpus_snap;				/* # CPUs seen last time. */
527 
528 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
529 						/*  force_quiescent_state(). */
530 	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
531 						/*  kthreads, if configured. */
532 	unsigned long n_force_qs;		/* Number of calls to */
533 						/*  force_quiescent_state(). */
534 	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
535 						/*  due to lock unavailable. */
536 	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
537 						/*  due to no GP active. */
538 	unsigned long gp_start;			/* Time at which GP started, */
539 						/*  but in jiffies. */
540 	unsigned long gp_activity;		/* Time of last GP kthread */
541 						/*  activity in jiffies. */
542 	unsigned long jiffies_stall;		/* Time at which to check */
543 						/*  for CPU stalls. */
544 	unsigned long jiffies_resched;		/* Time at which to resched */
545 						/*  a reluctant CPU. */
546 	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
547 						/*  GP start. */
548 	unsigned long gp_max;			/* Maximum GP duration in */
549 						/*  jiffies. */
550 	const char *name;			/* Name of structure. */
551 	char abbr;				/* Abbreviated name. */
552 	struct list_head flavors;		/* List of RCU flavors. */
553 };
554 
555 /* Values for rcu_state structure's gp_flags field. */
556 #define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
557 #define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
558 
559 /* Values for rcu_state structure's gp_state field. */
560 #define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
561 #define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
562 #define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
563 #define RCU_GP_WAIT_FQS  3	/* Wait for force-quiescent-state time. */
564 #define RCU_GP_DOING_FQS 4	/* Wait done for force-quiescent-state time. */
565 #define RCU_GP_CLEANUP   5	/* Grace-period cleanup started. */
566 #define RCU_GP_CLEANED   6	/* Grace-period cleanup complete. */
567 
568 #ifndef RCU_TREE_NONCORE
569 static const char * const gp_state_names[] = {
570 	"RCU_GP_IDLE",
571 	"RCU_GP_WAIT_GPS",
572 	"RCU_GP_DONE_GPS",
573 	"RCU_GP_WAIT_FQS",
574 	"RCU_GP_DOING_FQS",
575 	"RCU_GP_CLEANUP",
576 	"RCU_GP_CLEANED",
577 };
578 #endif /* #ifndef RCU_TREE_NONCORE */
579 
580 extern struct list_head rcu_struct_flavors;
581 
582 /* Sequence through rcu_state structures for each RCU flavor. */
583 #define for_each_rcu_flavor(rsp) \
584 	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
585 
586 /*
587  * RCU implementation internal declarations:
588  */
589 extern struct rcu_state rcu_sched_state;
590 
591 extern struct rcu_state rcu_bh_state;
592 
593 #ifdef CONFIG_PREEMPT_RCU
594 extern struct rcu_state rcu_preempt_state;
595 #endif /* #ifdef CONFIG_PREEMPT_RCU */
596 
597 #ifdef CONFIG_RCU_BOOST
598 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
599 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
600 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
601 DECLARE_PER_CPU(char, rcu_cpu_has_work);
602 #endif /* #ifdef CONFIG_RCU_BOOST */
603 
604 #ifndef RCU_TREE_NONCORE
605 
606 /* Forward declarations for rcutree_plugin.h */
607 static void rcu_bootup_announce(void);
608 static void rcu_preempt_note_context_switch(void);
609 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
610 #ifdef CONFIG_HOTPLUG_CPU
611 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
612 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
613 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
614 static int rcu_print_task_stall(struct rcu_node *rnp);
615 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
616 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
617 static void rcu_preempt_check_callbacks(void);
618 void call_rcu(struct rcu_head *head, rcu_callback_t func);
619 static void __init __rcu_init_preempt(void);
620 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
621 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
622 static void invoke_rcu_callbacks_kthread(void);
623 static bool rcu_is_callbacks_kthread(void);
624 #ifdef CONFIG_RCU_BOOST
625 static void rcu_preempt_do_callbacks(void);
626 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
627 						 struct rcu_node *rnp);
628 #endif /* #ifdef CONFIG_RCU_BOOST */
629 static void __init rcu_spawn_boost_kthreads(void);
630 static void rcu_prepare_kthreads(int cpu);
631 static void rcu_cleanup_after_idle(void);
632 static void rcu_prepare_for_idle(void);
633 static void rcu_idle_count_callbacks_posted(void);
634 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
635 static void print_cpu_stall_info_begin(void);
636 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
637 static void print_cpu_stall_info_end(void);
638 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
639 static void increment_cpu_stall_ticks(void);
640 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
641 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
642 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
643 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
644 static void rcu_init_one_nocb(struct rcu_node *rnp);
645 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
646 			    bool lazy, unsigned long flags);
647 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
648 				      struct rcu_data *rdp,
649 				      unsigned long flags);
650 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
651 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
652 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
653 static void rcu_spawn_all_nocb_kthreads(int cpu);
654 static void __init rcu_spawn_nocb_kthreads(void);
655 #ifdef CONFIG_RCU_NOCB_CPU
656 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
657 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
658 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
659 static bool init_nocb_callback_list(struct rcu_data *rdp);
660 static void rcu_sysidle_enter(int irq);
661 static void rcu_sysidle_exit(int irq);
662 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
663 				  unsigned long *maxj);
664 static bool is_sysidle_rcu_state(struct rcu_state *rsp);
665 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
666 				  unsigned long maxj);
667 static void rcu_bind_gp_kthread(void);
668 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
669 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
670 static void rcu_dynticks_task_enter(void);
671 static void rcu_dynticks_task_exit(void);
672 
673 #endif /* #ifndef RCU_TREE_NONCORE */
674 
675 #ifdef CONFIG_RCU_TRACE
676 /* Read out queue lengths for tracing. */
677 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
678 {
679 #ifdef CONFIG_RCU_NOCB_CPU
680 	*ql = atomic_long_read(&rdp->nocb_q_count);
681 	*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
682 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
683 	*ql = 0;
684 	*qll = 0;
685 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
686 }
687 #endif /* #ifdef CONFIG_RCU_TRACE */
688 
689 /*
690  * Place this after a lock-acquisition primitive to guarantee that
691  * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
692  * if the UNLOCK and LOCK are executed by the same CPU or if the
693  * UNLOCK and LOCK operate on the same lock variable.
694  */
695 #ifdef CONFIG_PPC
696 #define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */
697 #else /* #ifdef CONFIG_PPC */
698 #define smp_mb__after_unlock_lock()	do { } while (0)
699 #endif /* #else #ifdef CONFIG_PPC */
700 
701 /*
702  * Wrappers for the rcu_node::lock acquire and release.
703  *
704  * Because the rcu_nodes form a tree, the tree traversal locking will observe
705  * different lock values, this in turn means that an UNLOCK of one level
706  * followed by a LOCK of another level does not imply a full memory barrier;
707  * and most importantly transitivity is lost.
708  *
709  * In order to restore full ordering between tree levels, augment the regular
710  * lock acquire functions with smp_mb__after_unlock_lock().
711  *
712  * As ->lock of struct rcu_node is a __private field, therefore one should use
713  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
714  */
715 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
716 {
717 	raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
718 	smp_mb__after_unlock_lock();
719 }
720 
721 static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
722 {
723 	raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
724 }
725 
726 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
727 {
728 	raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
729 	smp_mb__after_unlock_lock();
730 }
731 
732 static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
733 {
734 	raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
735 }
736 
737 #define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
738 do {									\
739 	typecheck(unsigned long, flags);				\
740 	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
741 	smp_mb__after_unlock_lock();					\
742 } while (0)
743 
744 #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
745 do {									\
746 	typecheck(unsigned long, flags);				\
747 	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
748 } while (0)
749 
750 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
751 {
752 	bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
753 
754 	if (locked)
755 		smp_mb__after_unlock_lock();
756 	return locked;
757 }
758