1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * Internal non-public definitions. 5 * 6 * Copyright IBM Corporation, 2008 7 * 8 * Author: Ingo Molnar <mingo@elte.hu> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 */ 11 12 #include <linux/cache.h> 13 #include <linux/spinlock.h> 14 #include <linux/rtmutex.h> 15 #include <linux/threads.h> 16 #include <linux/cpumask.h> 17 #include <linux/seqlock.h> 18 #include <linux/swait.h> 19 #include <linux/rcu_node_tree.h> 20 21 #include "rcu_segcblist.h" 22 23 /* Communicate arguments to a workqueue handler. */ 24 struct rcu_exp_work { 25 unsigned long rew_s; 26 struct work_struct rew_work; 27 }; 28 29 /* RCU's kthread states for tracing. */ 30 #define RCU_KTHREAD_STOPPED 0 31 #define RCU_KTHREAD_RUNNING 1 32 #define RCU_KTHREAD_WAITING 2 33 #define RCU_KTHREAD_OFFCPU 3 34 #define RCU_KTHREAD_YIELDING 4 35 #define RCU_KTHREAD_MAX 4 36 37 /* 38 * Definition for node within the RCU grace-period-detection hierarchy. 39 */ 40 struct rcu_node { 41 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ 42 /* some rcu_state fields as well as */ 43 /* following. */ 44 unsigned long gp_seq; /* Track rsp->gp_seq. */ 45 unsigned long gp_seq_needed; /* Track furthest future GP request. */ 46 unsigned long completedqs; /* All QSes done for this node. */ 47 unsigned long qsmask; /* CPUs or groups that need to switch in */ 48 /* order for current grace period to proceed.*/ 49 /* In leaf rcu_node, each bit corresponds to */ 50 /* an rcu_data structure, otherwise, each */ 51 /* bit corresponds to a child rcu_node */ 52 /* structure. */ 53 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */ 54 unsigned long qsmaskinit; 55 /* Per-GP initial value for qsmask. */ 56 /* Initialized from ->qsmaskinitnext at the */ 57 /* beginning of each grace period. */ 58 unsigned long qsmaskinitnext; 59 /* Online CPUs for next grace period. */ 60 unsigned long expmask; /* CPUs or groups that need to check in */ 61 /* to allow the current expedited GP */ 62 /* to complete. */ 63 unsigned long expmaskinit; 64 /* Per-GP initial values for expmask. */ 65 /* Initialized from ->expmaskinitnext at the */ 66 /* beginning of each expedited GP. */ 67 unsigned long expmaskinitnext; 68 /* Online CPUs for next expedited GP. */ 69 /* Any CPU that has ever been online will */ 70 /* have its bit set. */ 71 unsigned long cbovldmask; 72 /* CPUs experiencing callback overload. */ 73 unsigned long ffmask; /* Fully functional CPUs. */ 74 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 75 /* Only one bit will be set in this mask. */ 76 int grplo; /* lowest-numbered CPU here. */ 77 int grphi; /* highest-numbered CPU here. */ 78 u8 grpnum; /* group number for next level up. */ 79 u8 level; /* root is at level 0. */ 80 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ 81 /* exit RCU read-side critical sections */ 82 /* before propagating offline up the */ 83 /* rcu_node tree? */ 84 struct rcu_node *parent; 85 struct list_head blkd_tasks; 86 /* Tasks blocked in RCU read-side critical */ 87 /* section. Tasks are placed at the head */ 88 /* of this list and age towards the tail. */ 89 struct list_head *gp_tasks; 90 /* Pointer to the first task blocking the */ 91 /* current grace period, or NULL if there */ 92 /* is no such task. */ 93 struct list_head *exp_tasks; 94 /* Pointer to the first task blocking the */ 95 /* current expedited grace period, or NULL */ 96 /* if there is no such task. If there */ 97 /* is no current expedited grace period, */ 98 /* then there can cannot be any such task. */ 99 struct list_head *boost_tasks; 100 /* Pointer to first task that needs to be */ 101 /* priority boosted, or NULL if no priority */ 102 /* boosting is needed for this rcu_node */ 103 /* structure. If there are no tasks */ 104 /* queued on this rcu_node structure that */ 105 /* are blocking the current grace period, */ 106 /* there can be no such task. */ 107 struct rt_mutex boost_mtx; 108 /* Used only for the priority-boosting */ 109 /* side effect, not as a lock. */ 110 unsigned long boost_time; 111 /* When to start boosting (jiffies). */ 112 struct task_struct *boost_kthread_task; 113 /* kthread that takes care of priority */ 114 /* boosting for this rcu_node structure. */ 115 unsigned int boost_kthread_status; 116 /* State of boost_kthread_task for tracing. */ 117 #ifdef CONFIG_RCU_NOCB_CPU 118 struct swait_queue_head nocb_gp_wq[2]; 119 /* Place for rcu_nocb_kthread() to wait GP. */ 120 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 121 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 122 123 spinlock_t exp_lock ____cacheline_internodealigned_in_smp; 124 unsigned long exp_seq_rq; 125 wait_queue_head_t exp_wq[4]; 126 struct rcu_exp_work rew; 127 bool exp_need_flush; /* Need to flush workitem? */ 128 } ____cacheline_internodealigned_in_smp; 129 130 /* 131 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and 132 * are indexed relative to this interval rather than the global CPU ID space. 133 * This generates the bit for a CPU in node-local masks. 134 */ 135 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) 136 137 /* 138 * Union to allow "aggregate OR" operation on the need for a quiescent 139 * state by the normal and expedited grace periods. 140 */ 141 union rcu_noqs { 142 struct { 143 u8 norm; 144 u8 exp; 145 } b; /* Bits. */ 146 u16 s; /* Set of bits, aggregate OR here. */ 147 }; 148 149 /* Per-CPU data for read-copy update. */ 150 struct rcu_data { 151 /* 1) quiescent-state and grace-period handling : */ 152 unsigned long gp_seq; /* Track rsp->gp_seq counter. */ 153 unsigned long gp_seq_needed; /* Track furthest future GP request. */ 154 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ 155 bool core_needs_qs; /* Core waits for quiesc state. */ 156 bool beenonline; /* CPU online at least once. */ 157 bool gpwrap; /* Possible ->gp_seq wrap. */ 158 bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */ 159 bool cpu_started; /* RCU watching this onlining CPU. */ 160 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 161 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 162 unsigned long ticks_this_gp; /* The number of scheduling-clock */ 163 /* ticks this CPU has handled */ 164 /* during and after the last grace */ 165 /* period it is aware of. */ 166 struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ 167 bool defer_qs_iw_pending; /* Scheduler attention pending? */ 168 struct work_struct strict_work; /* Schedule readers for strict GPs. */ 169 170 /* 2) batch handling */ 171 struct rcu_segcblist cblist; /* Segmented callback list, with */ 172 /* different callbacks waiting for */ 173 /* different grace periods. */ 174 long qlen_last_fqs_check; 175 /* qlen at last check for QS forcing */ 176 unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */ 177 unsigned long n_force_qs_snap; 178 /* did other CPU force QS recently? */ 179 long blimit; /* Upper limit on a processed batch */ 180 181 /* 3) dynticks interface. */ 182 int dynticks_snap; /* Per-GP tracking for dynticks. */ 183 long dynticks_nesting; /* Track process nesting level. */ 184 long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ 185 atomic_t dynticks; /* Even value for idle, else odd. */ 186 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ 187 bool rcu_urgent_qs; /* GP old need light quiescent state. */ 188 bool rcu_forced_tick; /* Forced tick to provide QS. */ 189 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ 190 #ifdef CONFIG_RCU_FAST_NO_HZ 191 unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ 192 unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */ 193 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 194 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 195 196 /* 4) rcu_barrier(), OOM callbacks, and expediting. */ 197 struct rcu_head barrier_head; 198 int exp_dynticks_snap; /* Double-check need for IPI. */ 199 200 /* 5) Callback offloading. */ 201 #ifdef CONFIG_RCU_NOCB_CPU 202 struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ 203 struct task_struct *nocb_gp_kthread; 204 raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ 205 atomic_t nocb_lock_contended; /* Contention experienced. */ 206 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 207 struct timer_list nocb_timer; /* Enforce finite deferral. */ 208 unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ 209 210 /* The following fields are used by call_rcu, hence own cacheline. */ 211 raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp; 212 struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */ 213 unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */ 214 unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */ 215 int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */ 216 217 /* The following fields are used by GP kthread, hence own cacheline. */ 218 raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp; 219 struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */ 220 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */ 221 u8 nocb_gp_bypass; /* Found a bypass on last scan? */ 222 u8 nocb_gp_gp; /* GP to wait for on last scan? */ 223 unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */ 224 unsigned long nocb_gp_loops; /* # passes through wait code. */ 225 struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */ 226 bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */ 227 struct task_struct *nocb_cb_kthread; 228 struct rcu_data *nocb_next_cb_rdp; 229 /* Next rcu_data in wakeup chain. */ 230 231 /* The following fields are used by CB kthread, hence new cacheline. */ 232 struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; 233 /* GP rdp takes GP-end wakeups. */ 234 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 235 236 /* 6) RCU priority boosting. */ 237 struct task_struct *rcu_cpu_kthread_task; 238 /* rcuc per-CPU kthread or NULL. */ 239 unsigned int rcu_cpu_kthread_status; 240 char rcu_cpu_has_work; 241 242 /* 7) Diagnostic data, including RCU CPU stall warnings. */ 243 unsigned int softirq_snap; /* Snapshot of softirq activity. */ 244 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ 245 struct irq_work rcu_iw; /* Check for non-irq activity. */ 246 bool rcu_iw_pending; /* Is ->rcu_iw pending? */ 247 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ 248 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */ 249 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */ 250 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ 251 short rcu_onl_gp_flags; /* ->gp_flags at last online. */ 252 unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ 253 254 int cpu; 255 }; 256 257 /* Values for nocb_defer_wakeup field in struct rcu_data. */ 258 #define RCU_NOCB_WAKE_NOT 0 259 #define RCU_NOCB_WAKE 1 260 #define RCU_NOCB_WAKE_FORCE 2 261 262 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) 263 /* For jiffies_till_first_fqs and */ 264 /* and jiffies_till_next_fqs. */ 265 266 #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ 267 /* delay between bouts of */ 268 /* quiescent-state forcing. */ 269 270 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ 271 /* at least one scheduling clock */ 272 /* irq before ratting on them. */ 273 274 #define rcu_wait(cond) \ 275 do { \ 276 for (;;) { \ 277 set_current_state(TASK_INTERRUPTIBLE); \ 278 if (cond) \ 279 break; \ 280 schedule(); \ 281 } \ 282 __set_current_state(TASK_RUNNING); \ 283 } while (0) 284 285 /* 286 * RCU global state, including node hierarchy. This hierarchy is 287 * represented in "heap" form in a dense array. The root (first level) 288 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second 289 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), 290 * and the third level in ->node[m+1] and following (->node[m+1] referenced 291 * by ->level[2]). The number of levels is determined by the number of 292 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" 293 * consisting of a single rcu_node. 294 */ 295 struct rcu_state { 296 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ 297 struct rcu_node *level[RCU_NUM_LVLS + 1]; 298 /* Hierarchy levels (+1 to */ 299 /* shut bogus gcc warning) */ 300 int ncpus; /* # CPUs seen so far. */ 301 302 /* The following fields are guarded by the root rcu_node's lock. */ 303 304 u8 boost ____cacheline_internodealigned_in_smp; 305 /* Subject to priority boost. */ 306 unsigned long gp_seq; /* Grace-period sequence #. */ 307 unsigned long gp_max; /* Maximum GP duration in */ 308 /* jiffies. */ 309 struct task_struct *gp_kthread; /* Task for grace periods. */ 310 struct swait_queue_head gp_wq; /* Where GP task waits. */ 311 short gp_flags; /* Commands for GP task. */ 312 short gp_state; /* GP kthread sleep state. */ 313 unsigned long gp_wake_time; /* Last GP kthread wake. */ 314 unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */ 315 316 /* End of fields guarded by root rcu_node's lock. */ 317 318 struct mutex barrier_mutex; /* Guards barrier fields. */ 319 atomic_t barrier_cpu_count; /* # CPUs waiting on. */ 320 struct completion barrier_completion; /* Wake at barrier end. */ 321 unsigned long barrier_sequence; /* ++ at start and end of */ 322 /* rcu_barrier(). */ 323 /* End of fields guarded by barrier_mutex. */ 324 325 struct mutex exp_mutex; /* Serialize expedited GP. */ 326 struct mutex exp_wake_mutex; /* Serialize wakeup. */ 327 unsigned long expedited_sequence; /* Take a ticket. */ 328 atomic_t expedited_need_qs; /* # CPUs left to check in. */ 329 struct swait_queue_head expedited_wq; /* Wait for check-ins. */ 330 int ncpus_snap; /* # CPUs seen last time. */ 331 u8 cbovld; /* Callback overload now? */ 332 u8 cbovldnext; /* ^ ^ next time? */ 333 334 unsigned long jiffies_force_qs; /* Time at which to invoke */ 335 /* force_quiescent_state(). */ 336 unsigned long jiffies_kick_kthreads; /* Time at which to kick */ 337 /* kthreads, if configured. */ 338 unsigned long n_force_qs; /* Number of calls to */ 339 /* force_quiescent_state(). */ 340 unsigned long gp_start; /* Time at which GP started, */ 341 /* but in jiffies. */ 342 unsigned long gp_end; /* Time last GP ended, again */ 343 /* in jiffies. */ 344 unsigned long gp_activity; /* Time of last GP kthread */ 345 /* activity in jiffies. */ 346 unsigned long gp_req_activity; /* Time of last GP request */ 347 /* in jiffies. */ 348 unsigned long jiffies_stall; /* Time at which to check */ 349 /* for CPU stalls. */ 350 unsigned long jiffies_resched; /* Time at which to resched */ 351 /* a reluctant CPU. */ 352 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ 353 /* GP start. */ 354 const char *name; /* Name of structure. */ 355 char abbr; /* Abbreviated name. */ 356 357 raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; 358 /* Synchronize offline with */ 359 /* GP pre-initialization. */ 360 }; 361 362 /* Values for rcu_state structure's gp_flags field. */ 363 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ 364 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 365 #define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */ 366 367 /* Values for rcu_state structure's gp_state field. */ 368 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ 369 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 370 #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 371 #define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */ 372 #define RCU_GP_INIT 4 /* Grace-period initialization. */ 373 #define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */ 374 #define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */ 375 #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */ 376 #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */ 377 378 /* 379 * In order to export the rcu_state name to the tracing tools, it 380 * needs to be added in the __tracepoint_string section. 381 * This requires defining a separate variable tp_<sname>_varname 382 * that points to the string being used, and this will allow 383 * the tracing userspace tools to be able to decipher the string 384 * address to the matching string. 385 */ 386 #ifdef CONFIG_PREEMPT_RCU 387 #define RCU_ABBR 'p' 388 #define RCU_NAME_RAW "rcu_preempt" 389 #else /* #ifdef CONFIG_PREEMPT_RCU */ 390 #define RCU_ABBR 's' 391 #define RCU_NAME_RAW "rcu_sched" 392 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 393 #ifndef CONFIG_TRACING 394 #define RCU_NAME RCU_NAME_RAW 395 #else /* #ifdef CONFIG_TRACING */ 396 static char rcu_name[] = RCU_NAME_RAW; 397 static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; 398 #define RCU_NAME rcu_name 399 #endif /* #else #ifdef CONFIG_TRACING */ 400 401 /* Forward declarations for tree_plugin.h */ 402 static void rcu_bootup_announce(void); 403 static void rcu_qs(void); 404 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 405 #ifdef CONFIG_HOTPLUG_CPU 406 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 407 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 408 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 409 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 410 static void rcu_flavor_sched_clock_irq(int user); 411 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 412 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 413 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 414 static bool rcu_is_callbacks_kthread(void); 415 static void rcu_cpu_kthread_setup(unsigned int cpu); 416 static void __init rcu_spawn_boost_kthreads(void); 417 static void rcu_prepare_kthreads(int cpu); 418 static void rcu_cleanup_after_idle(void); 419 static void rcu_prepare_for_idle(void); 420 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 421 static bool rcu_preempt_need_deferred_qs(struct task_struct *t); 422 static void rcu_preempt_deferred_qs(struct task_struct *t); 423 static void zero_cpu_stall_ticks(struct rcu_data *rdp); 424 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); 425 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); 426 static void rcu_init_one_nocb(struct rcu_node *rnp); 427 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 428 unsigned long j); 429 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, 430 bool *was_alldone, unsigned long flags); 431 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, 432 unsigned long flags); 433 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); 434 static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 435 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 436 static void rcu_spawn_cpu_nocb_kthread(int cpu); 437 static void __init rcu_spawn_nocb_kthreads(void); 438 static void show_rcu_nocb_state(struct rcu_data *rdp); 439 static void rcu_nocb_lock(struct rcu_data *rdp); 440 static void rcu_nocb_unlock(struct rcu_data *rdp); 441 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, 442 unsigned long flags); 443 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); 444 #ifdef CONFIG_RCU_NOCB_CPU 445 static void __init rcu_organize_nocb_kthreads(void); 446 #define rcu_nocb_lock_irqsave(rdp, flags) \ 447 do { \ 448 if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ 449 local_irq_save(flags); \ 450 else \ 451 raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \ 452 } while (0) 453 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 454 #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) 455 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 456 457 static void rcu_bind_gp_kthread(void); 458 static bool rcu_nohz_full_cpu(void); 459 static void rcu_dynticks_task_enter(void); 460 static void rcu_dynticks_task_exit(void); 461 static void rcu_dynticks_task_trace_enter(void); 462 static void rcu_dynticks_task_trace_exit(void); 463 464 /* Forward declarations for tree_stall.h */ 465 static void record_gp_stall_check_time(void); 466 static void rcu_iw_handler(struct irq_work *iwp); 467 static void check_cpu_stall(struct rcu_data *rdp); 468 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, 469 const unsigned long gpssdelay); 470