1 /* 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 3 * Internal non-public definitions. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, you can access it online at 17 * http://www.gnu.org/licenses/gpl-2.0.html. 18 * 19 * Copyright IBM Corporation, 2008 20 * 21 * Author: Ingo Molnar <mingo@elte.hu> 22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 23 */ 24 25 #include <linux/cache.h> 26 #include <linux/spinlock.h> 27 #include <linux/rtmutex.h> 28 #include <linux/threads.h> 29 #include <linux/cpumask.h> 30 #include <linux/seqlock.h> 31 #include <linux/swait.h> 32 #include <linux/stop_machine.h> 33 34 /* 35 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and 36 * CONFIG_RCU_FANOUT_LEAF. 37 * In theory, it should be possible to add more levels straightforwardly. 38 * In practice, this did work well going from three levels to four. 39 * Of course, your mileage may vary. 40 */ 41 42 #ifdef CONFIG_RCU_FANOUT 43 #define RCU_FANOUT CONFIG_RCU_FANOUT 44 #else /* #ifdef CONFIG_RCU_FANOUT */ 45 # ifdef CONFIG_64BIT 46 # define RCU_FANOUT 64 47 # else 48 # define RCU_FANOUT 32 49 # endif 50 #endif /* #else #ifdef CONFIG_RCU_FANOUT */ 51 52 #ifdef CONFIG_RCU_FANOUT_LEAF 53 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF 54 #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ 55 # ifdef CONFIG_64BIT 56 # define RCU_FANOUT_LEAF 64 57 # else 58 # define RCU_FANOUT_LEAF 32 59 # endif 60 #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ 61 62 #define RCU_FANOUT_1 (RCU_FANOUT_LEAF) 63 #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) 64 #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) 65 #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) 66 67 #if NR_CPUS <= RCU_FANOUT_1 68 # define RCU_NUM_LVLS 1 69 # define NUM_RCU_LVL_0 1 70 # define NUM_RCU_NODES NUM_RCU_LVL_0 71 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 } 72 # define RCU_NODE_NAME_INIT { "rcu_node_0" } 73 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } 74 #elif NR_CPUS <= RCU_FANOUT_2 75 # define RCU_NUM_LVLS 2 76 # define NUM_RCU_LVL_0 1 77 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 78 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1) 79 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 } 80 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } 81 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } 82 #elif NR_CPUS <= RCU_FANOUT_3 83 # define RCU_NUM_LVLS 3 84 # define NUM_RCU_LVL_0 1 85 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 86 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 87 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2) 88 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 } 89 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } 90 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } 91 #elif NR_CPUS <= RCU_FANOUT_4 92 # define RCU_NUM_LVLS 4 93 # define NUM_RCU_LVL_0 1 94 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) 95 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 96 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 97 # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) 98 # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 } 99 # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } 100 # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } 101 #else 102 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 103 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ 104 105 extern int rcu_num_lvls; 106 extern int rcu_num_nodes; 107 108 /* 109 * Dynticks per-CPU state. 110 */ 111 struct rcu_dynticks { 112 long long dynticks_nesting; /* Track irq/process nesting level. */ 113 /* Process level is worth LLONG_MAX/2. */ 114 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 115 atomic_t dynticks; /* Even value for idle, else odd. */ 116 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE 117 long long dynticks_idle_nesting; 118 /* irq/process nesting level from idle. */ 119 atomic_t dynticks_idle; /* Even value for idle, else odd. */ 120 /* "Idle" excludes userspace execution. */ 121 unsigned long dynticks_idle_jiffies; 122 /* End of last non-NMI non-idle period. */ 123 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 124 #ifdef CONFIG_RCU_FAST_NO_HZ 125 bool all_lazy; /* Are all CPU's CBs lazy? */ 126 unsigned long nonlazy_posted; 127 /* # times non-lazy CBs posted to CPU. */ 128 unsigned long nonlazy_posted_snap; 129 /* idle-period nonlazy_posted snapshot. */ 130 unsigned long last_accelerate; 131 /* Last jiffy CBs were accelerated. */ 132 unsigned long last_advance_all; 133 /* Last jiffy CBs were all advanced. */ 134 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ 135 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 136 }; 137 138 /* RCU's kthread states for tracing. */ 139 #define RCU_KTHREAD_STOPPED 0 140 #define RCU_KTHREAD_RUNNING 1 141 #define RCU_KTHREAD_WAITING 2 142 #define RCU_KTHREAD_OFFCPU 3 143 #define RCU_KTHREAD_YIELDING 4 144 #define RCU_KTHREAD_MAX 4 145 146 /* 147 * Definition for node within the RCU grace-period-detection hierarchy. 148 */ 149 struct rcu_node { 150 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ 151 /* some rcu_state fields as well as */ 152 /* following. */ 153 unsigned long gpnum; /* Current grace period for this node. */ 154 /* This will either be equal to or one */ 155 /* behind the root rcu_node's gpnum. */ 156 unsigned long completed; /* Last GP completed for this node. */ 157 /* This will either be equal to or one */ 158 /* behind the root rcu_node's gpnum. */ 159 unsigned long qsmask; /* CPUs or groups that need to switch in */ 160 /* order for current grace period to proceed.*/ 161 /* In leaf rcu_node, each bit corresponds to */ 162 /* an rcu_data structure, otherwise, each */ 163 /* bit corresponds to a child rcu_node */ 164 /* structure. */ 165 unsigned long qsmaskinit; 166 /* Per-GP initial value for qsmask. */ 167 /* Initialized from ->qsmaskinitnext at the */ 168 /* beginning of each grace period. */ 169 unsigned long qsmaskinitnext; 170 /* Online CPUs for next grace period. */ 171 unsigned long expmask; /* CPUs or groups that need to check in */ 172 /* to allow the current expedited GP */ 173 /* to complete. */ 174 unsigned long expmaskinit; 175 /* Per-GP initial values for expmask. */ 176 /* Initialized from ->expmaskinitnext at the */ 177 /* beginning of each expedited GP. */ 178 unsigned long expmaskinitnext; 179 /* Online CPUs for next expedited GP. */ 180 /* Any CPU that has ever been online will */ 181 /* have its bit set. */ 182 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 183 /* Only one bit will be set in this mask. */ 184 int grplo; /* lowest-numbered CPU or group here. */ 185 int grphi; /* highest-numbered CPU or group here. */ 186 u8 grpnum; /* CPU/group number for next level up. */ 187 u8 level; /* root is at level 0. */ 188 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ 189 /* exit RCU read-side critical sections */ 190 /* before propagating offline up the */ 191 /* rcu_node tree? */ 192 struct rcu_node *parent; 193 struct list_head blkd_tasks; 194 /* Tasks blocked in RCU read-side critical */ 195 /* section. Tasks are placed at the head */ 196 /* of this list and age towards the tail. */ 197 struct list_head *gp_tasks; 198 /* Pointer to the first task blocking the */ 199 /* current grace period, or NULL if there */ 200 /* is no such task. */ 201 struct list_head *exp_tasks; 202 /* Pointer to the first task blocking the */ 203 /* current expedited grace period, or NULL */ 204 /* if there is no such task. If there */ 205 /* is no current expedited grace period, */ 206 /* then there can cannot be any such task. */ 207 struct list_head *boost_tasks; 208 /* Pointer to first task that needs to be */ 209 /* priority boosted, or NULL if no priority */ 210 /* boosting is needed for this rcu_node */ 211 /* structure. If there are no tasks */ 212 /* queued on this rcu_node structure that */ 213 /* are blocking the current grace period, */ 214 /* there can be no such task. */ 215 struct rt_mutex boost_mtx; 216 /* Used only for the priority-boosting */ 217 /* side effect, not as a lock. */ 218 unsigned long boost_time; 219 /* When to start boosting (jiffies). */ 220 struct task_struct *boost_kthread_task; 221 /* kthread that takes care of priority */ 222 /* boosting for this rcu_node structure. */ 223 unsigned int boost_kthread_status; 224 /* State of boost_kthread_task for tracing. */ 225 unsigned long n_tasks_boosted; 226 /* Total number of tasks boosted. */ 227 unsigned long n_exp_boosts; 228 /* Number of tasks boosted for expedited GP. */ 229 unsigned long n_normal_boosts; 230 /* Number of tasks boosted for normal GP. */ 231 unsigned long n_balk_blkd_tasks; 232 /* Refused to boost: no blocked tasks. */ 233 unsigned long n_balk_exp_gp_tasks; 234 /* Refused to boost: nothing blocking GP. */ 235 unsigned long n_balk_boost_tasks; 236 /* Refused to boost: already boosting. */ 237 unsigned long n_balk_notblocked; 238 /* Refused to boost: RCU RS CS still running. */ 239 unsigned long n_balk_notyet; 240 /* Refused to boost: not yet time. */ 241 unsigned long n_balk_nos; 242 /* Refused to boost: not sure why, though. */ 243 /* This can happen due to race conditions. */ 244 #ifdef CONFIG_RCU_NOCB_CPU 245 struct swait_queue_head nocb_gp_wq[2]; 246 /* Place for rcu_nocb_kthread() to wait GP. */ 247 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 248 int need_future_gp[2]; 249 /* Counts of upcoming no-CB GP requests. */ 250 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 251 252 spinlock_t exp_lock ____cacheline_internodealigned_in_smp; 253 unsigned long exp_seq_rq; 254 wait_queue_head_t exp_wq[4]; 255 } ____cacheline_internodealigned_in_smp; 256 257 /* 258 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and 259 * are indexed relative to this interval rather than the global CPU ID space. 260 * This generates the bit for a CPU in node-local masks. 261 */ 262 #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo)) 263 264 /* 265 * Do a full breadth-first scan of the rcu_node structures for the 266 * specified rcu_state structure. 267 */ 268 #define rcu_for_each_node_breadth_first(rsp, rnp) \ 269 for ((rnp) = &(rsp)->node[0]; \ 270 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 271 272 /* 273 * Do a breadth-first scan of the non-leaf rcu_node structures for the 274 * specified rcu_state structure. Note that if there is a singleton 275 * rcu_node tree with but one rcu_node structure, this loop is a no-op. 276 */ 277 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ 278 for ((rnp) = &(rsp)->node[0]; \ 279 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++) 280 281 /* 282 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state 283 * structure. Note that if there is a singleton rcu_node tree with but 284 * one rcu_node structure, this loop -will- visit the rcu_node structure. 285 * It is still a leaf node, even if it is also the root node. 286 */ 287 #define rcu_for_each_leaf_node(rsp, rnp) \ 288 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ 289 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 290 291 /* 292 * Iterate over all possible CPUs in a leaf RCU node. 293 */ 294 #define for_each_leaf_node_possible_cpu(rnp, cpu) \ 295 for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \ 296 cpu <= rnp->grphi; \ 297 cpu = cpumask_next((cpu), cpu_possible_mask)) 298 299 /* 300 * Union to allow "aggregate OR" operation on the need for a quiescent 301 * state by the normal and expedited grace periods. 302 */ 303 union rcu_noqs { 304 struct { 305 u8 norm; 306 u8 exp; 307 } b; /* Bits. */ 308 u16 s; /* Set of bits, aggregate OR here. */ 309 }; 310 311 /* Index values for nxttail array in struct rcu_data. */ 312 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 313 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 314 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ 315 #define RCU_NEXT_TAIL 3 316 #define RCU_NEXT_SIZE 4 317 318 /* Per-CPU data for read-copy update. */ 319 struct rcu_data { 320 /* 1) quiescent-state and grace-period handling : */ 321 unsigned long completed; /* Track rsp->completed gp number */ 322 /* in order to detect GP end. */ 323 unsigned long gpnum; /* Highest gp number that this CPU */ 324 /* is aware of having started. */ 325 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ 326 /* for rcu_all_qs() invocations. */ 327 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ 328 bool core_needs_qs; /* Core waits for quiesc state. */ 329 bool beenonline; /* CPU online at least once. */ 330 bool gpwrap; /* Possible gpnum/completed wrap. */ 331 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 332 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 333 unsigned long ticks_this_gp; /* The number of scheduling-clock */ 334 /* ticks this CPU has handled */ 335 /* during and after the last grace */ 336 /* period it is aware of. */ 337 338 /* 2) batch handling */ 339 /* 340 * If nxtlist is not NULL, it is partitioned as follows. 341 * Any of the partitions might be empty, in which case the 342 * pointer to that partition will be equal to the pointer for 343 * the following partition. When the list is empty, all of 344 * the nxttail elements point to the ->nxtlist pointer itself, 345 * which in that case is NULL. 346 * 347 * [nxtlist, *nxttail[RCU_DONE_TAIL]): 348 * Entries that batch # <= ->completed 349 * The grace period for these entries has completed, and 350 * the other grace-period-completed entries may be moved 351 * here temporarily in rcu_process_callbacks(). 352 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): 353 * Entries that batch # <= ->completed - 1: waiting for current GP 354 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): 355 * Entries known to have arrived before current GP ended 356 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): 357 * Entries that might have arrived after current GP ended 358 * Note that the value of *nxttail[RCU_NEXT_TAIL] will 359 * always be NULL, as this is the end of the list. 360 */ 361 struct rcu_head *nxtlist; 362 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 363 unsigned long nxtcompleted[RCU_NEXT_SIZE]; 364 /* grace periods for sublists. */ 365 long qlen_lazy; /* # of lazy queued callbacks */ 366 long qlen; /* # of queued callbacks, incl lazy */ 367 long qlen_last_fqs_check; 368 /* qlen at last check for QS forcing */ 369 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ 370 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */ 371 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ 372 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ 373 unsigned long n_force_qs_snap; 374 /* did other CPU force QS recently? */ 375 long blimit; /* Upper limit on a processed batch */ 376 377 /* 3) dynticks interface. */ 378 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ 379 int dynticks_snap; /* Per-GP tracking for dynticks. */ 380 381 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 382 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 383 unsigned long offline_fqs; /* Kicked due to being offline. */ 384 unsigned long cond_resched_completed; 385 /* Grace period that needs help */ 386 /* from cond_resched(). */ 387 388 /* 5) __rcu_pending() statistics. */ 389 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 390 unsigned long n_rp_core_needs_qs; 391 unsigned long n_rp_report_qs; 392 unsigned long n_rp_cb_ready; 393 unsigned long n_rp_cpu_needs_gp; 394 unsigned long n_rp_gp_completed; 395 unsigned long n_rp_gp_started; 396 unsigned long n_rp_nocb_defer_wakeup; 397 unsigned long n_rp_need_nothing; 398 399 /* 6) _rcu_barrier(), OOM callbacks, and expediting. */ 400 struct rcu_head barrier_head; 401 #ifdef CONFIG_RCU_FAST_NO_HZ 402 struct rcu_head oom_head; 403 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 404 atomic_long_t exp_workdone0; /* # done by workqueue. */ 405 atomic_long_t exp_workdone1; /* # done by others #1. */ 406 atomic_long_t exp_workdone2; /* # done by others #2. */ 407 atomic_long_t exp_workdone3; /* # done by others #3. */ 408 int exp_dynticks_snap; /* Double-check need for IPI. */ 409 410 /* 7) Callback offloading. */ 411 #ifdef CONFIG_RCU_NOCB_CPU 412 struct rcu_head *nocb_head; /* CBs waiting for kthread. */ 413 struct rcu_head **nocb_tail; 414 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ 415 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ 416 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ 417 struct rcu_head **nocb_follower_tail; 418 struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */ 419 struct task_struct *nocb_kthread; 420 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ 421 422 /* The following fields are used by the leader, hence own cacheline. */ 423 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; 424 /* CBs waiting for GP. */ 425 struct rcu_head **nocb_gp_tail; 426 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ 427 struct rcu_data *nocb_next_follower; 428 /* Next follower in wakeup chain. */ 429 430 /* The following fields are used by the follower, hence new cachline. */ 431 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp; 432 /* Leader CPU takes GP-end wakeups. */ 433 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 434 435 /* 8) RCU CPU stall data. */ 436 unsigned int softirq_snap; /* Snapshot of softirq activity. */ 437 438 int cpu; 439 struct rcu_state *rsp; 440 }; 441 442 /* Values for nocb_defer_wakeup field in struct rcu_data. */ 443 #define RCU_NOGP_WAKE_NOT 0 444 #define RCU_NOGP_WAKE 1 445 #define RCU_NOGP_WAKE_FORCE 2 446 447 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) 448 /* For jiffies_till_first_fqs and */ 449 /* and jiffies_till_next_fqs. */ 450 451 #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ 452 /* delay between bouts of */ 453 /* quiescent-state forcing. */ 454 455 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ 456 /* at least one scheduling clock */ 457 /* irq before ratting on them. */ 458 459 #define rcu_wait(cond) \ 460 do { \ 461 for (;;) { \ 462 set_current_state(TASK_INTERRUPTIBLE); \ 463 if (cond) \ 464 break; \ 465 schedule(); \ 466 } \ 467 __set_current_state(TASK_RUNNING); \ 468 } while (0) 469 470 /* 471 * RCU global state, including node hierarchy. This hierarchy is 472 * represented in "heap" form in a dense array. The root (first level) 473 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second 474 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), 475 * and the third level in ->node[m+1] and following (->node[m+1] referenced 476 * by ->level[2]). The number of levels is determined by the number of 477 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" 478 * consisting of a single rcu_node. 479 */ 480 struct rcu_state { 481 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ 482 struct rcu_node *level[RCU_NUM_LVLS + 1]; 483 /* Hierarchy levels (+1 to */ 484 /* shut bogus gcc warning) */ 485 u8 flavor_mask; /* bit in flavor mask. */ 486 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 487 call_rcu_func_t call; /* call_rcu() flavor. */ 488 int ncpus; /* # CPUs seen so far. */ 489 490 /* The following fields are guarded by the root rcu_node's lock. */ 491 492 u8 boost ____cacheline_internodealigned_in_smp; 493 /* Subject to priority boost. */ 494 unsigned long gpnum; /* Current gp number. */ 495 unsigned long completed; /* # of last completed gp. */ 496 struct task_struct *gp_kthread; /* Task for grace periods. */ 497 struct swait_queue_head gp_wq; /* Where GP task waits. */ 498 short gp_flags; /* Commands for GP task. */ 499 short gp_state; /* GP kthread sleep state. */ 500 501 /* End of fields guarded by root rcu_node's lock. */ 502 503 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp; 504 /* Protect following fields. */ 505 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ 506 /* need a grace period. */ 507 struct rcu_head **orphan_nxttail; /* Tail of above. */ 508 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */ 509 /* are ready to invoke. */ 510 struct rcu_head **orphan_donetail; /* Tail of above. */ 511 long qlen_lazy; /* Number of lazy callbacks. */ 512 long qlen; /* Total number of callbacks. */ 513 /* End of fields guarded by orphan_lock. */ 514 515 struct mutex barrier_mutex; /* Guards barrier fields. */ 516 atomic_t barrier_cpu_count; /* # CPUs waiting on. */ 517 struct completion barrier_completion; /* Wake at barrier end. */ 518 unsigned long barrier_sequence; /* ++ at start and end of */ 519 /* _rcu_barrier(). */ 520 /* End of fields guarded by barrier_mutex. */ 521 522 struct mutex exp_mutex; /* Serialize expedited GP. */ 523 struct mutex exp_wake_mutex; /* Serialize wakeup. */ 524 unsigned long expedited_sequence; /* Take a ticket. */ 525 atomic_t expedited_need_qs; /* # CPUs left to check in. */ 526 struct swait_queue_head expedited_wq; /* Wait for check-ins. */ 527 int ncpus_snap; /* # CPUs seen last time. */ 528 529 unsigned long jiffies_force_qs; /* Time at which to invoke */ 530 /* force_quiescent_state(). */ 531 unsigned long jiffies_kick_kthreads; /* Time at which to kick */ 532 /* kthreads, if configured. */ 533 unsigned long n_force_qs; /* Number of calls to */ 534 /* force_quiescent_state(). */ 535 unsigned long n_force_qs_lh; /* ~Number of calls leaving */ 536 /* due to lock unavailable. */ 537 unsigned long n_force_qs_ngp; /* Number of calls leaving */ 538 /* due to no GP active. */ 539 unsigned long gp_start; /* Time at which GP started, */ 540 /* but in jiffies. */ 541 unsigned long gp_activity; /* Time of last GP kthread */ 542 /* activity in jiffies. */ 543 unsigned long jiffies_stall; /* Time at which to check */ 544 /* for CPU stalls. */ 545 unsigned long jiffies_resched; /* Time at which to resched */ 546 /* a reluctant CPU. */ 547 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ 548 /* GP start. */ 549 unsigned long gp_max; /* Maximum GP duration in */ 550 /* jiffies. */ 551 const char *name; /* Name of structure. */ 552 char abbr; /* Abbreviated name. */ 553 struct list_head flavors; /* List of RCU flavors. */ 554 }; 555 556 /* Values for rcu_state structure's gp_flags field. */ 557 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ 558 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ 559 560 /* Values for rcu_state structure's gp_state field. */ 561 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ 562 #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 563 #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 564 #define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */ 565 #define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */ 566 #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */ 567 #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */ 568 569 #ifndef RCU_TREE_NONCORE 570 static const char * const gp_state_names[] = { 571 "RCU_GP_IDLE", 572 "RCU_GP_WAIT_GPS", 573 "RCU_GP_DONE_GPS", 574 "RCU_GP_WAIT_FQS", 575 "RCU_GP_DOING_FQS", 576 "RCU_GP_CLEANUP", 577 "RCU_GP_CLEANED", 578 }; 579 #endif /* #ifndef RCU_TREE_NONCORE */ 580 581 extern struct list_head rcu_struct_flavors; 582 583 /* Sequence through rcu_state structures for each RCU flavor. */ 584 #define for_each_rcu_flavor(rsp) \ 585 list_for_each_entry((rsp), &rcu_struct_flavors, flavors) 586 587 /* 588 * RCU implementation internal declarations: 589 */ 590 extern struct rcu_state rcu_sched_state; 591 592 extern struct rcu_state rcu_bh_state; 593 594 #ifdef CONFIG_PREEMPT_RCU 595 extern struct rcu_state rcu_preempt_state; 596 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 597 598 int rcu_dynticks_snap(struct rcu_dynticks *rdtp); 599 600 #ifdef CONFIG_RCU_BOOST 601 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 602 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); 603 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 604 DECLARE_PER_CPU(char, rcu_cpu_has_work); 605 #endif /* #ifdef CONFIG_RCU_BOOST */ 606 607 #ifndef RCU_TREE_NONCORE 608 609 /* Forward declarations for rcutree_plugin.h */ 610 static void rcu_bootup_announce(void); 611 static void rcu_preempt_note_context_switch(void); 612 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 613 #ifdef CONFIG_HOTPLUG_CPU 614 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 615 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 616 static void rcu_print_detail_task_stall(struct rcu_state *rsp); 617 static int rcu_print_task_stall(struct rcu_node *rnp); 618 static int rcu_print_task_exp_stall(struct rcu_node *rnp); 619 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 620 static void rcu_preempt_check_callbacks(void); 621 void call_rcu(struct rcu_head *head, rcu_callback_t func); 622 static void __init __rcu_init_preempt(void); 623 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 624 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 625 static void invoke_rcu_callbacks_kthread(void); 626 static bool rcu_is_callbacks_kthread(void); 627 #ifdef CONFIG_RCU_BOOST 628 static void rcu_preempt_do_callbacks(void); 629 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 630 struct rcu_node *rnp); 631 #endif /* #ifdef CONFIG_RCU_BOOST */ 632 static void __init rcu_spawn_boost_kthreads(void); 633 static void rcu_prepare_kthreads(int cpu); 634 static void rcu_cleanup_after_idle(void); 635 static void rcu_prepare_for_idle(void); 636 static void rcu_idle_count_callbacks_posted(void); 637 static bool rcu_preempt_has_tasks(struct rcu_node *rnp); 638 static void print_cpu_stall_info_begin(void); 639 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); 640 static void print_cpu_stall_info_end(void); 641 static void zero_cpu_stall_ticks(struct rcu_data *rdp); 642 static void increment_cpu_stall_ticks(void); 643 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); 644 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); 645 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); 646 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); 647 static void rcu_init_one_nocb(struct rcu_node *rnp); 648 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, 649 bool lazy, unsigned long flags); 650 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, 651 struct rcu_data *rdp, 652 unsigned long flags); 653 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); 654 static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 655 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 656 static void rcu_spawn_all_nocb_kthreads(int cpu); 657 static void __init rcu_spawn_nocb_kthreads(void); 658 #ifdef CONFIG_RCU_NOCB_CPU 659 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); 660 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 661 static void __maybe_unused rcu_kick_nohz_cpu(int cpu); 662 static bool init_nocb_callback_list(struct rcu_data *rdp); 663 static void rcu_sysidle_enter(int irq); 664 static void rcu_sysidle_exit(int irq); 665 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, 666 unsigned long *maxj); 667 static bool is_sysidle_rcu_state(struct rcu_state *rsp); 668 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, 669 unsigned long maxj); 670 static void rcu_bind_gp_kthread(void); 671 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); 672 static bool rcu_nohz_full_cpu(struct rcu_state *rsp); 673 static void rcu_dynticks_task_enter(void); 674 static void rcu_dynticks_task_exit(void); 675 676 #endif /* #ifndef RCU_TREE_NONCORE */ 677 678 #ifdef CONFIG_RCU_TRACE 679 /* Read out queue lengths for tracing. */ 680 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) 681 { 682 #ifdef CONFIG_RCU_NOCB_CPU 683 *ql = atomic_long_read(&rdp->nocb_q_count); 684 *qll = atomic_long_read(&rdp->nocb_q_count_lazy); 685 #else /* #ifdef CONFIG_RCU_NOCB_CPU */ 686 *ql = 0; 687 *qll = 0; 688 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 689 } 690 #endif /* #ifdef CONFIG_RCU_TRACE */ 691 692 /* 693 * Wrappers for the rcu_node::lock acquire and release. 694 * 695 * Because the rcu_nodes form a tree, the tree traversal locking will observe 696 * different lock values, this in turn means that an UNLOCK of one level 697 * followed by a LOCK of another level does not imply a full memory barrier; 698 * and most importantly transitivity is lost. 699 * 700 * In order to restore full ordering between tree levels, augment the regular 701 * lock acquire functions with smp_mb__after_unlock_lock(). 702 * 703 * As ->lock of struct rcu_node is a __private field, therefore one should use 704 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. 705 */ 706 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) 707 { 708 raw_spin_lock(&ACCESS_PRIVATE(rnp, lock)); 709 smp_mb__after_unlock_lock(); 710 } 711 712 static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp) 713 { 714 raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock)); 715 } 716 717 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) 718 { 719 raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock)); 720 smp_mb__after_unlock_lock(); 721 } 722 723 static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp) 724 { 725 raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock)); 726 } 727 728 #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ 729 do { \ 730 typecheck(unsigned long, flags); \ 731 raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \ 732 smp_mb__after_unlock_lock(); \ 733 } while (0) 734 735 #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \ 736 do { \ 737 typecheck(unsigned long, flags); \ 738 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \ 739 } while (0) 740 741 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) 742 { 743 bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock)); 744 745 if (locked) 746 smp_mb__after_unlock_lock(); 747 return locked; 748 } 749