xref: /openbmc/linux/kernel/rcu/tree.h (revision 02e30241)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions.
5  *
6  * Copyright IBM Corporation, 2008
7  *
8  * Author: Ingo Molnar <mingo@elte.hu>
9  *	   Paul E. McKenney <paulmck@linux.ibm.com>
10  */
11 
12 #include <linux/cache.h>
13 #include <linux/spinlock.h>
14 #include <linux/rtmutex.h>
15 #include <linux/threads.h>
16 #include <linux/cpumask.h>
17 #include <linux/seqlock.h>
18 #include <linux/swait.h>
19 #include <linux/rcu_node_tree.h>
20 
21 #include "rcu_segcblist.h"
22 
23 /* Communicate arguments to a workqueue handler. */
24 struct rcu_exp_work {
25 	unsigned long rew_s;
26 	struct work_struct rew_work;
27 };
28 
29 /* RCU's kthread states for tracing. */
30 #define RCU_KTHREAD_STOPPED  0
31 #define RCU_KTHREAD_RUNNING  1
32 #define RCU_KTHREAD_WAITING  2
33 #define RCU_KTHREAD_OFFCPU   3
34 #define RCU_KTHREAD_YIELDING 4
35 #define RCU_KTHREAD_MAX      4
36 
37 /*
38  * Definition for node within the RCU grace-period-detection hierarchy.
39  */
40 struct rcu_node {
41 	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
42 					/*  some rcu_state fields as well as */
43 					/*  following. */
44 	unsigned long gp_seq;	/* Track rsp->gp_seq. */
45 	unsigned long gp_seq_needed; /* Track furthest future GP request. */
46 	unsigned long completedqs; /* All QSes done for this node. */
47 	unsigned long qsmask;	/* CPUs or groups that need to switch in */
48 				/*  order for current grace period to proceed.*/
49 				/*  In leaf rcu_node, each bit corresponds to */
50 				/*  an rcu_data structure, otherwise, each */
51 				/*  bit corresponds to a child rcu_node */
52 				/*  structure. */
53 	unsigned long rcu_gp_init_mask;	/* Mask of offline CPUs at GP init. */
54 	unsigned long qsmaskinit;
55 				/* Per-GP initial value for qsmask. */
56 				/*  Initialized from ->qsmaskinitnext at the */
57 				/*  beginning of each grace period. */
58 	unsigned long qsmaskinitnext;
59 	unsigned long ofl_seq;	/* CPU-hotplug operation sequence count. */
60 				/* Online CPUs for next grace period. */
61 	unsigned long expmask;	/* CPUs or groups that need to check in */
62 				/*  to allow the current expedited GP */
63 				/*  to complete. */
64 	unsigned long expmaskinit;
65 				/* Per-GP initial values for expmask. */
66 				/*  Initialized from ->expmaskinitnext at the */
67 				/*  beginning of each expedited GP. */
68 	unsigned long expmaskinitnext;
69 				/* Online CPUs for next expedited GP. */
70 				/*  Any CPU that has ever been online will */
71 				/*  have its bit set. */
72 	unsigned long cbovldmask;
73 				/* CPUs experiencing callback overload. */
74 	unsigned long ffmask;	/* Fully functional CPUs. */
75 	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
76 				/*  Only one bit will be set in this mask. */
77 	int	grplo;		/* lowest-numbered CPU here. */
78 	int	grphi;		/* highest-numbered CPU here. */
79 	u8	grpnum;		/* group number for next level up. */
80 	u8	level;		/* root is at level 0. */
81 	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
82 				/*  exit RCU read-side critical sections */
83 				/*  before propagating offline up the */
84 				/*  rcu_node tree? */
85 	struct rcu_node *parent;
86 	struct list_head blkd_tasks;
87 				/* Tasks blocked in RCU read-side critical */
88 				/*  section.  Tasks are placed at the head */
89 				/*  of this list and age towards the tail. */
90 	struct list_head *gp_tasks;
91 				/* Pointer to the first task blocking the */
92 				/*  current grace period, or NULL if there */
93 				/*  is no such task. */
94 	struct list_head *exp_tasks;
95 				/* Pointer to the first task blocking the */
96 				/*  current expedited grace period, or NULL */
97 				/*  if there is no such task.  If there */
98 				/*  is no current expedited grace period, */
99 				/*  then there can cannot be any such task. */
100 	struct list_head *boost_tasks;
101 				/* Pointer to first task that needs to be */
102 				/*  priority boosted, or NULL if no priority */
103 				/*  boosting is needed for this rcu_node */
104 				/*  structure.  If there are no tasks */
105 				/*  queued on this rcu_node structure that */
106 				/*  are blocking the current grace period, */
107 				/*  there can be no such task. */
108 	struct rt_mutex boost_mtx;
109 				/* Used only for the priority-boosting */
110 				/*  side effect, not as a lock. */
111 	unsigned long boost_time;
112 				/* When to start boosting (jiffies). */
113 	struct task_struct *boost_kthread_task;
114 				/* kthread that takes care of priority */
115 				/*  boosting for this rcu_node structure. */
116 	unsigned int boost_kthread_status;
117 				/* State of boost_kthread_task for tracing. */
118 	unsigned long n_boosts;	/* Number of boosts for this rcu_node structure. */
119 #ifdef CONFIG_RCU_NOCB_CPU
120 	struct swait_queue_head nocb_gp_wq[2];
121 				/* Place for rcu_nocb_kthread() to wait GP. */
122 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
123 	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
124 
125 	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
126 	unsigned long exp_seq_rq;
127 	wait_queue_head_t exp_wq[4];
128 	struct rcu_exp_work rew;
129 	bool exp_need_flush;	/* Need to flush workitem? */
130 } ____cacheline_internodealigned_in_smp;
131 
132 /*
133  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
134  * are indexed relative to this interval rather than the global CPU ID space.
135  * This generates the bit for a CPU in node-local masks.
136  */
137 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
138 
139 /*
140  * Union to allow "aggregate OR" operation on the need for a quiescent
141  * state by the normal and expedited grace periods.
142  */
143 union rcu_noqs {
144 	struct {
145 		u8 norm;
146 		u8 exp;
147 	} b; /* Bits. */
148 	u16 s; /* Set of bits, aggregate OR here. */
149 };
150 
151 /* Per-CPU data for read-copy update. */
152 struct rcu_data {
153 	/* 1) quiescent-state and grace-period handling : */
154 	unsigned long	gp_seq;		/* Track rsp->gp_seq counter. */
155 	unsigned long	gp_seq_needed;	/* Track furthest future GP request. */
156 	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
157 	bool		core_needs_qs;	/* Core waits for quiescent state. */
158 	bool		beenonline;	/* CPU online at least once. */
159 	bool		gpwrap;		/* Possible ->gp_seq wrap. */
160 	bool		cpu_started;	/* RCU watching this onlining CPU. */
161 	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
162 	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
163 	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
164 					/*  ticks this CPU has handled */
165 					/*  during and after the last grace */
166 					/* period it is aware of. */
167 	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
168 	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
169 	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
170 
171 	/* 2) batch handling */
172 	struct rcu_segcblist cblist;	/* Segmented callback list, with */
173 					/* different callbacks waiting for */
174 					/* different grace periods. */
175 	long		qlen_last_fqs_check;
176 					/* qlen at last check for QS forcing */
177 	unsigned long	n_cbs_invoked;	/* # callbacks invoked since boot. */
178 	unsigned long	n_force_qs_snap;
179 					/* did other CPU force QS recently? */
180 	long		blimit;		/* Upper limit on a processed batch */
181 
182 	/* 3) dynticks interface. */
183 	int dynticks_snap;		/* Per-GP tracking for dynticks. */
184 	long dynticks_nesting;		/* Track process nesting level. */
185 	long dynticks_nmi_nesting;	/* Track irq/NMI nesting level. */
186 	atomic_t dynticks;		/* Even value for idle, else odd. */
187 	bool rcu_need_heavy_qs;		/* GP old, so heavy quiescent state! */
188 	bool rcu_urgent_qs;		/* GP old need light quiescent state. */
189 	bool rcu_forced_tick;		/* Forced tick to provide QS. */
190 	bool rcu_forced_tick_exp;	/*   ... provide QS to expedited GP. */
191 
192 	/* 4) rcu_barrier(), OOM callbacks, and expediting. */
193 	struct rcu_head barrier_head;
194 	int exp_dynticks_snap;		/* Double-check need for IPI. */
195 
196 	/* 5) Callback offloading. */
197 #ifdef CONFIG_RCU_NOCB_CPU
198 	struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
199 	struct swait_queue_head nocb_state_wq; /* For offloading state changes */
200 	struct task_struct *nocb_gp_kthread;
201 	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
202 	atomic_t nocb_lock_contended;	/* Contention experienced. */
203 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
204 	struct timer_list nocb_timer;	/* Enforce finite deferral. */
205 	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */
206 	struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
207 					    /* spawning */
208 
209 	/* The following fields are used by call_rcu, hence own cacheline. */
210 	raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
211 	struct rcu_cblist nocb_bypass;	/* Lock-contention-bypass CB list. */
212 	unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
213 	unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
214 	int nocb_nobypass_count;	/* # ->cblist enqueues at ^^^ time. */
215 
216 	/* The following fields are used by GP kthread, hence own cacheline. */
217 	raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
218 	u8 nocb_gp_sleep;		/* Is the nocb GP thread asleep? */
219 	u8 nocb_gp_bypass;		/* Found a bypass on last scan? */
220 	u8 nocb_gp_gp;			/* GP to wait for on last scan? */
221 	unsigned long nocb_gp_seq;	/*  If so, ->gp_seq to wait for. */
222 	unsigned long nocb_gp_loops;	/* # passes through wait code. */
223 	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
224 	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */
225 	struct task_struct *nocb_cb_kthread;
226 	struct list_head nocb_head_rdp; /*
227 					 * Head of rcu_data list in wakeup chain,
228 					 * if rdp_gp.
229 					 */
230 	struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
231 
232 	/* The following fields are used by CB kthread, hence new cacheline. */
233 	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
234 					/* GP rdp takes GP-end wakeups. */
235 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
236 
237 	/* 6) RCU priority boosting. */
238 	struct task_struct *rcu_cpu_kthread_task;
239 					/* rcuc per-CPU kthread or NULL. */
240 	unsigned int rcu_cpu_kthread_status;
241 	char rcu_cpu_has_work;
242 
243 	/* 7) Diagnostic data, including RCU CPU stall warnings. */
244 	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
245 	/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
246 	struct irq_work rcu_iw;		/* Check for non-irq activity. */
247 	bool rcu_iw_pending;		/* Is ->rcu_iw pending? */
248 	unsigned long rcu_iw_gp_seq;	/* ->gp_seq associated with ->rcu_iw. */
249 	unsigned long rcu_ofl_gp_seq;	/* ->gp_seq at last offline. */
250 	short rcu_ofl_gp_flags;		/* ->gp_flags at last offline. */
251 	unsigned long rcu_onl_gp_seq;	/* ->gp_seq at last online. */
252 	short rcu_onl_gp_flags;		/* ->gp_flags at last online. */
253 	unsigned long last_fqs_resched;	/* Time of last rcu_resched(). */
254 
255 	int cpu;
256 };
257 
258 /* Values for nocb_defer_wakeup field in struct rcu_data. */
259 #define RCU_NOCB_WAKE_NOT	0
260 #define RCU_NOCB_WAKE_BYPASS	1
261 #define RCU_NOCB_WAKE		2
262 #define RCU_NOCB_WAKE_FORCE	3
263 
264 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
265 					/* For jiffies_till_first_fqs and */
266 					/*  and jiffies_till_next_fqs. */
267 
268 #define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
269 					/*  delay between bouts of */
270 					/*  quiescent-state forcing. */
271 
272 #define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
273 					/*  at least one scheduling clock */
274 					/*  irq before ratting on them. */
275 
276 #define rcu_wait(cond)							\
277 do {									\
278 	for (;;) {							\
279 		set_current_state(TASK_INTERRUPTIBLE);			\
280 		if (cond)						\
281 			break;						\
282 		schedule();						\
283 	}								\
284 	__set_current_state(TASK_RUNNING);				\
285 } while (0)
286 
287 /*
288  * RCU global state, including node hierarchy.  This hierarchy is
289  * represented in "heap" form in a dense array.  The root (first level)
290  * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
291  * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
292  * and the third level in ->node[m+1] and following (->node[m+1] referenced
293  * by ->level[2]).  The number of levels is determined by the number of
294  * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
295  * consisting of a single rcu_node.
296  */
297 struct rcu_state {
298 	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
299 	struct rcu_node *level[RCU_NUM_LVLS + 1];
300 						/* Hierarchy levels (+1 to */
301 						/*  shut bogus gcc warning) */
302 	int ncpus;				/* # CPUs seen so far. */
303 	int n_online_cpus;			/* # CPUs online for RCU. */
304 
305 	/* The following fields are guarded by the root rcu_node's lock. */
306 
307 	u8	boost ____cacheline_internodealigned_in_smp;
308 						/* Subject to priority boost. */
309 	unsigned long gp_seq;			/* Grace-period sequence #. */
310 	unsigned long gp_max;			/* Maximum GP duration in */
311 						/*  jiffies. */
312 	struct task_struct *gp_kthread;		/* Task for grace periods. */
313 	struct swait_queue_head gp_wq;		/* Where GP task waits. */
314 	short gp_flags;				/* Commands for GP task. */
315 	short gp_state;				/* GP kthread sleep state. */
316 	unsigned long gp_wake_time;		/* Last GP kthread wake. */
317 	unsigned long gp_wake_seq;		/* ->gp_seq at ^^^. */
318 
319 	/* End of fields guarded by root rcu_node's lock. */
320 
321 	struct mutex barrier_mutex;		/* Guards barrier fields. */
322 	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
323 	struct completion barrier_completion;	/* Wake at barrier end. */
324 	unsigned long barrier_sequence;		/* ++ at start and end of */
325 						/*  rcu_barrier(). */
326 	/* End of fields guarded by barrier_mutex. */
327 
328 	struct mutex exp_mutex;			/* Serialize expedited GP. */
329 	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
330 	unsigned long expedited_sequence;	/* Take a ticket. */
331 	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
332 	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
333 	int ncpus_snap;				/* # CPUs seen last time. */
334 	u8 cbovld;				/* Callback overload now? */
335 	u8 cbovldnext;				/* ^        ^  next time? */
336 
337 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
338 						/*  force_quiescent_state(). */
339 	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
340 						/*  kthreads, if configured. */
341 	unsigned long n_force_qs;		/* Number of calls to */
342 						/*  force_quiescent_state(). */
343 	unsigned long gp_start;			/* Time at which GP started, */
344 						/*  but in jiffies. */
345 	unsigned long gp_end;			/* Time last GP ended, again */
346 						/*  in jiffies. */
347 	unsigned long gp_activity;		/* Time of last GP kthread */
348 						/*  activity in jiffies. */
349 	unsigned long gp_req_activity;		/* Time of last GP request */
350 						/*  in jiffies. */
351 	unsigned long jiffies_stall;		/* Time at which to check */
352 						/*  for CPU stalls. */
353 	unsigned long jiffies_resched;		/* Time at which to resched */
354 						/*  a reluctant CPU. */
355 	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
356 						/*  GP start. */
357 	const char *name;			/* Name of structure. */
358 	char abbr;				/* Abbreviated name. */
359 
360 	raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
361 						/* Synchronize offline with */
362 						/*  GP pre-initialization. */
363 };
364 
365 /* Values for rcu_state structure's gp_flags field. */
366 #define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
367 #define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
368 #define RCU_GP_FLAG_OVLD 0x4	/* Experiencing callback overload. */
369 
370 /* Values for rcu_state structure's gp_state field. */
371 #define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
372 #define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
373 #define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
374 #define RCU_GP_ONOFF     3	/* Grace-period initialization hotplug. */
375 #define RCU_GP_INIT      4	/* Grace-period initialization. */
376 #define RCU_GP_WAIT_FQS  5	/* Wait for force-quiescent-state time. */
377 #define RCU_GP_DOING_FQS 6	/* Wait done for force-quiescent-state time. */
378 #define RCU_GP_CLEANUP   7	/* Grace-period cleanup started. */
379 #define RCU_GP_CLEANED   8	/* Grace-period cleanup complete. */
380 
381 /*
382  * In order to export the rcu_state name to the tracing tools, it
383  * needs to be added in the __tracepoint_string section.
384  * This requires defining a separate variable tp_<sname>_varname
385  * that points to the string being used, and this will allow
386  * the tracing userspace tools to be able to decipher the string
387  * address to the matching string.
388  */
389 #ifdef CONFIG_PREEMPT_RCU
390 #define RCU_ABBR 'p'
391 #define RCU_NAME_RAW "rcu_preempt"
392 #else /* #ifdef CONFIG_PREEMPT_RCU */
393 #define RCU_ABBR 's'
394 #define RCU_NAME_RAW "rcu_sched"
395 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
396 #ifndef CONFIG_TRACING
397 #define RCU_NAME RCU_NAME_RAW
398 #else /* #ifdef CONFIG_TRACING */
399 static char rcu_name[] = RCU_NAME_RAW;
400 static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
401 #define RCU_NAME rcu_name
402 #endif /* #else #ifdef CONFIG_TRACING */
403 
404 /* Forward declarations for tree_plugin.h */
405 static void rcu_bootup_announce(void);
406 static void rcu_qs(void);
407 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
408 #ifdef CONFIG_HOTPLUG_CPU
409 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
410 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
411 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
412 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
413 static void rcu_flavor_sched_clock_irq(int user);
414 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
415 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
416 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
417 static bool rcu_is_callbacks_kthread(void);
418 static void rcu_cpu_kthread_setup(unsigned int cpu);
419 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
420 static void __init rcu_spawn_boost_kthreads(void);
421 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
422 static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
423 static void rcu_preempt_deferred_qs(struct task_struct *t);
424 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
425 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
426 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
427 static void rcu_init_one_nocb(struct rcu_node *rnp);
428 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
429 				  unsigned long j);
430 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
431 				bool *was_alldone, unsigned long flags);
432 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
433 				 unsigned long flags);
434 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
435 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
436 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
437 static void rcu_spawn_cpu_nocb_kthread(int cpu);
438 static void __init rcu_spawn_nocb_kthreads(void);
439 static void show_rcu_nocb_state(struct rcu_data *rdp);
440 static void rcu_nocb_lock(struct rcu_data *rdp);
441 static void rcu_nocb_unlock(struct rcu_data *rdp);
442 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
443 				       unsigned long flags);
444 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
445 #ifdef CONFIG_RCU_NOCB_CPU
446 static void __init rcu_organize_nocb_kthreads(void);
447 
448 /*
449  * Disable IRQs before checking offloaded state so that local
450  * locking is safe against concurrent de-offloading.
451  */
452 #define rcu_nocb_lock_irqsave(rdp, flags)			\
453 do {								\
454 	local_irq_save(flags);					\
455 	if (rcu_segcblist_is_offloaded(&(rdp)->cblist))	\
456 		raw_spin_lock(&(rdp)->nocb_lock);		\
457 } while (0)
458 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
459 #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
460 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
461 
462 static void rcu_bind_gp_kthread(void);
463 static bool rcu_nohz_full_cpu(void);
464 static void rcu_dynticks_task_enter(void);
465 static void rcu_dynticks_task_exit(void);
466 static void rcu_dynticks_task_trace_enter(void);
467 static void rcu_dynticks_task_trace_exit(void);
468 
469 /* Forward declarations for tree_stall.h */
470 static void record_gp_stall_check_time(void);
471 static void rcu_iw_handler(struct irq_work *iwp);
472 static void check_cpu_stall(struct rcu_data *rdp);
473 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
474 				     const unsigned long gpssdelay);
475