1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/percpu.h> 36 #include <linux/notifier.h> 37 #include <linux/cpu.h> 38 #include <linux/mutex.h> 39 #include <linux/time.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/wait.h> 42 #include <linux/kthread.h> 43 #include <uapi/linux/sched/types.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 #include <linux/stop_machine.h> 47 #include <linux/random.h> 48 #include <linux/trace_events.h> 49 #include <linux/suspend.h> 50 #include <linux/ftrace.h> 51 #include <linux/tick.h> 52 #include <linux/sysrq.h> 53 #include <linux/kprobes.h> 54 55 #include "tree.h" 56 #include "rcu.h" 57 58 #ifdef MODULE_PARAM_PREFIX 59 #undef MODULE_PARAM_PREFIX 60 #endif 61 #define MODULE_PARAM_PREFIX "rcutree." 62 63 /* Data structures. */ 64 65 /* 66 * Steal a bit from the bottom of ->dynticks for idle entry/exit 67 * control. Initially this is for TLB flushing. 68 */ 69 #define RCU_DYNTICK_CTRL_MASK 0x1 70 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 71 #ifndef rcu_eqs_special_exit 72 #define rcu_eqs_special_exit() do { } while (0) 73 #endif 74 75 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 76 .dynticks_nesting = 1, 77 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 78 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 79 }; 80 struct rcu_state rcu_state = { 81 .level = { &rcu_state.node[0] }, 82 .gp_state = RCU_GP_IDLE, 83 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 84 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 85 .name = RCU_NAME, 86 .abbr = RCU_ABBR, 87 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 88 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 89 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 90 }; 91 92 /* Dump rcu_node combining tree at boot to verify correct setup. */ 93 static bool dump_tree; 94 module_param(dump_tree, bool, 0444); 95 /* Control rcu_node-tree auto-balancing at boot time. */ 96 static bool rcu_fanout_exact; 97 module_param(rcu_fanout_exact, bool, 0444); 98 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 99 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 100 module_param(rcu_fanout_leaf, int, 0444); 101 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 102 /* Number of rcu_nodes at specified level. */ 103 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 104 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 105 106 /* 107 * The rcu_scheduler_active variable is initialized to the value 108 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 109 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 110 * RCU can assume that there is but one task, allowing RCU to (for example) 111 * optimize synchronize_rcu() to a simple barrier(). When this variable 112 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 113 * to detect real grace periods. This variable is also used to suppress 114 * boot-time false positives from lockdep-RCU error checking. Finally, it 115 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 116 * is fully initialized, including all of its kthreads having been spawned. 117 */ 118 int rcu_scheduler_active __read_mostly; 119 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 120 121 /* 122 * The rcu_scheduler_fully_active variable transitions from zero to one 123 * during the early_initcall() processing, which is after the scheduler 124 * is capable of creating new tasks. So RCU processing (for example, 125 * creating tasks for RCU priority boosting) must be delayed until after 126 * rcu_scheduler_fully_active transitions from zero to one. We also 127 * currently delay invocation of any RCU callbacks until after this point. 128 * 129 * It might later prove better for people registering RCU callbacks during 130 * early boot to take responsibility for these callbacks, but one step at 131 * a time. 132 */ 133 static int rcu_scheduler_fully_active __read_mostly; 134 135 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 136 unsigned long gps, unsigned long flags); 137 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 138 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 139 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 140 static void invoke_rcu_core(void); 141 static void invoke_rcu_callbacks(struct rcu_data *rdp); 142 static void rcu_report_exp_rdp(struct rcu_data *rdp); 143 static void sync_sched_exp_online_cleanup(int cpu); 144 145 /* rcuc/rcub kthread realtime priority */ 146 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 147 module_param(kthread_prio, int, 0444); 148 149 /* Delay in jiffies for grace-period initialization delays, debug only. */ 150 151 static int gp_preinit_delay; 152 module_param(gp_preinit_delay, int, 0444); 153 static int gp_init_delay; 154 module_param(gp_init_delay, int, 0444); 155 static int gp_cleanup_delay; 156 module_param(gp_cleanup_delay, int, 0444); 157 158 /* Retrieve RCU kthreads priority for rcutorture */ 159 int rcu_get_gp_kthreads_prio(void) 160 { 161 return kthread_prio; 162 } 163 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 164 165 /* 166 * Number of grace periods between delays, normalized by the duration of 167 * the delay. The longer the delay, the more the grace periods between 168 * each delay. The reason for this normalization is that it means that, 169 * for non-zero delays, the overall slowdown of grace periods is constant 170 * regardless of the duration of the delay. This arrangement balances 171 * the need for long delays to increase some race probabilities with the 172 * need for fast grace periods to increase other race probabilities. 173 */ 174 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 175 176 /* 177 * Compute the mask of online CPUs for the specified rcu_node structure. 178 * This will not be stable unless the rcu_node structure's ->lock is 179 * held, but the bit corresponding to the current CPU will be stable 180 * in most contexts. 181 */ 182 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 183 { 184 return READ_ONCE(rnp->qsmaskinitnext); 185 } 186 187 /* 188 * Return true if an RCU grace period is in progress. The READ_ONCE()s 189 * permit this function to be invoked without holding the root rcu_node 190 * structure's ->lock, but of course results can be subject to change. 191 */ 192 static int rcu_gp_in_progress(void) 193 { 194 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 195 } 196 197 /* 198 * Return the number of callbacks queued on the specified CPU. 199 * Handles both the nocbs and normal cases. 200 */ 201 static long rcu_get_n_cbs_cpu(int cpu) 202 { 203 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 204 205 if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */ 206 return rcu_segcblist_n_cbs(&rdp->cblist); 207 return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */ 208 } 209 210 void rcu_softirq_qs(void) 211 { 212 rcu_qs(); 213 rcu_preempt_deferred_qs(current); 214 } 215 216 /* 217 * Record entry into an extended quiescent state. This is only to be 218 * called when not already in an extended quiescent state. 219 */ 220 static void rcu_dynticks_eqs_enter(void) 221 { 222 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 223 int seq; 224 225 /* 226 * CPUs seeing atomic_add_return() must see prior RCU read-side 227 * critical sections, and we also must force ordering with the 228 * next idle sojourn. 229 */ 230 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 231 /* Better be in an extended quiescent state! */ 232 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 233 (seq & RCU_DYNTICK_CTRL_CTR)); 234 /* Better not have special action (TLB flush) pending! */ 235 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 236 (seq & RCU_DYNTICK_CTRL_MASK)); 237 } 238 239 /* 240 * Record exit from an extended quiescent state. This is only to be 241 * called from an extended quiescent state. 242 */ 243 static void rcu_dynticks_eqs_exit(void) 244 { 245 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 246 int seq; 247 248 /* 249 * CPUs seeing atomic_add_return() must see prior idle sojourns, 250 * and we also must force ordering with the next RCU read-side 251 * critical section. 252 */ 253 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 254 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 255 !(seq & RCU_DYNTICK_CTRL_CTR)); 256 if (seq & RCU_DYNTICK_CTRL_MASK) { 257 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); 258 smp_mb__after_atomic(); /* _exit after clearing mask. */ 259 /* Prefer duplicate flushes to losing a flush. */ 260 rcu_eqs_special_exit(); 261 } 262 } 263 264 /* 265 * Reset the current CPU's ->dynticks counter to indicate that the 266 * newly onlined CPU is no longer in an extended quiescent state. 267 * This will either leave the counter unchanged, or increment it 268 * to the next non-quiescent value. 269 * 270 * The non-atomic test/increment sequence works because the upper bits 271 * of the ->dynticks counter are manipulated only by the corresponding CPU, 272 * or when the corresponding CPU is offline. 273 */ 274 static void rcu_dynticks_eqs_online(void) 275 { 276 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 277 278 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) 279 return; 280 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 281 } 282 283 /* 284 * Is the current CPU in an extended quiescent state? 285 * 286 * No ordering, as we are sampling CPU-local information. 287 */ 288 bool rcu_dynticks_curr_cpu_in_eqs(void) 289 { 290 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 291 292 return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); 293 } 294 295 /* 296 * Snapshot the ->dynticks counter with full ordering so as to allow 297 * stable comparison of this counter with past and future snapshots. 298 */ 299 int rcu_dynticks_snap(struct rcu_data *rdp) 300 { 301 int snap = atomic_add_return(0, &rdp->dynticks); 302 303 return snap & ~RCU_DYNTICK_CTRL_MASK; 304 } 305 306 /* 307 * Return true if the snapshot returned from rcu_dynticks_snap() 308 * indicates that RCU is in an extended quiescent state. 309 */ 310 static bool rcu_dynticks_in_eqs(int snap) 311 { 312 return !(snap & RCU_DYNTICK_CTRL_CTR); 313 } 314 315 /* 316 * Return true if the CPU corresponding to the specified rcu_data 317 * structure has spent some time in an extended quiescent state since 318 * rcu_dynticks_snap() returned the specified snapshot. 319 */ 320 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 321 { 322 return snap != rcu_dynticks_snap(rdp); 323 } 324 325 /* 326 * Set the special (bottom) bit of the specified CPU so that it 327 * will take special action (such as flushing its TLB) on the 328 * next exit from an extended quiescent state. Returns true if 329 * the bit was successfully set, or false if the CPU was not in 330 * an extended quiescent state. 331 */ 332 bool rcu_eqs_special_set(int cpu) 333 { 334 int old; 335 int new; 336 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 337 338 do { 339 old = atomic_read(&rdp->dynticks); 340 if (old & RCU_DYNTICK_CTRL_CTR) 341 return false; 342 new = old | RCU_DYNTICK_CTRL_MASK; 343 } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old); 344 return true; 345 } 346 347 /* 348 * Let the RCU core know that this CPU has gone through the scheduler, 349 * which is a quiescent state. This is called when the need for a 350 * quiescent state is urgent, so we burn an atomic operation and full 351 * memory barriers to let the RCU core know about it, regardless of what 352 * this CPU might (or might not) do in the near future. 353 * 354 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 355 * 356 * The caller must have disabled interrupts and must not be idle. 357 */ 358 static void __maybe_unused rcu_momentary_dyntick_idle(void) 359 { 360 int special; 361 362 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 363 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 364 &this_cpu_ptr(&rcu_data)->dynticks); 365 /* It is illegal to call this from idle state. */ 366 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 367 rcu_preempt_deferred_qs(current); 368 } 369 370 /** 371 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle 372 * 373 * If the current CPU is idle or running at a first-level (not nested) 374 * interrupt from idle, return true. The caller must have at least 375 * disabled preemption. 376 */ 377 static int rcu_is_cpu_rrupt_from_idle(void) 378 { 379 return __this_cpu_read(rcu_data.dynticks_nesting) <= 0 && 380 __this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 1; 381 } 382 383 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */ 384 static long blimit = DEFAULT_RCU_BLIMIT; 385 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ 386 static long qhimark = DEFAULT_RCU_QHIMARK; 387 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ 388 static long qlowmark = DEFAULT_RCU_QLOMARK; 389 390 module_param(blimit, long, 0444); 391 module_param(qhimark, long, 0444); 392 module_param(qlowmark, long, 0444); 393 394 static ulong jiffies_till_first_fqs = ULONG_MAX; 395 static ulong jiffies_till_next_fqs = ULONG_MAX; 396 static bool rcu_kick_kthreads; 397 398 /* 399 * How long the grace period must be before we start recruiting 400 * quiescent-state help from rcu_note_context_switch(). 401 */ 402 static ulong jiffies_till_sched_qs = ULONG_MAX; 403 module_param(jiffies_till_sched_qs, ulong, 0444); 404 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 405 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 406 407 /* 408 * Make sure that we give the grace-period kthread time to detect any 409 * idle CPUs before taking active measures to force quiescent states. 410 * However, don't go below 100 milliseconds, adjusted upwards for really 411 * large systems. 412 */ 413 static void adjust_jiffies_till_sched_qs(void) 414 { 415 unsigned long j; 416 417 /* If jiffies_till_sched_qs was specified, respect the request. */ 418 if (jiffies_till_sched_qs != ULONG_MAX) { 419 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 420 return; 421 } 422 /* Otherwise, set to third fqs scan, but bound below on large system. */ 423 j = READ_ONCE(jiffies_till_first_fqs) + 424 2 * READ_ONCE(jiffies_till_next_fqs); 425 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 426 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 427 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 428 WRITE_ONCE(jiffies_to_sched_qs, j); 429 } 430 431 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 432 { 433 ulong j; 434 int ret = kstrtoul(val, 0, &j); 435 436 if (!ret) { 437 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 438 adjust_jiffies_till_sched_qs(); 439 } 440 return ret; 441 } 442 443 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 444 { 445 ulong j; 446 int ret = kstrtoul(val, 0, &j); 447 448 if (!ret) { 449 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 450 adjust_jiffies_till_sched_qs(); 451 } 452 return ret; 453 } 454 455 static struct kernel_param_ops first_fqs_jiffies_ops = { 456 .set = param_set_first_fqs_jiffies, 457 .get = param_get_ulong, 458 }; 459 460 static struct kernel_param_ops next_fqs_jiffies_ops = { 461 .set = param_set_next_fqs_jiffies, 462 .get = param_get_ulong, 463 }; 464 465 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 466 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 467 module_param(rcu_kick_kthreads, bool, 0644); 468 469 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 470 static int rcu_pending(void); 471 472 /* 473 * Return the number of RCU GPs completed thus far for debug & stats. 474 */ 475 unsigned long rcu_get_gp_seq(void) 476 { 477 return READ_ONCE(rcu_state.gp_seq); 478 } 479 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 480 481 /* 482 * Return the number of RCU expedited batches completed thus far for 483 * debug & stats. Odd numbers mean that a batch is in progress, even 484 * numbers mean idle. The value returned will thus be roughly double 485 * the cumulative batches since boot. 486 */ 487 unsigned long rcu_exp_batches_completed(void) 488 { 489 return rcu_state.expedited_sequence; 490 } 491 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 492 493 /* 494 * Return the root node of the rcu_state structure. 495 */ 496 static struct rcu_node *rcu_get_root(void) 497 { 498 return &rcu_state.node[0]; 499 } 500 501 /* 502 * Convert a ->gp_state value to a character string. 503 */ 504 static const char *gp_state_getname(short gs) 505 { 506 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) 507 return "???"; 508 return gp_state_names[gs]; 509 } 510 511 /* 512 * Send along grace-period-related data for rcutorture diagnostics. 513 */ 514 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 515 unsigned long *gp_seq) 516 { 517 switch (test_type) { 518 case RCU_FLAVOR: 519 *flags = READ_ONCE(rcu_state.gp_flags); 520 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 521 break; 522 default: 523 break; 524 } 525 } 526 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 527 528 /* 529 * Enter an RCU extended quiescent state, which can be either the 530 * idle loop or adaptive-tickless usermode execution. 531 * 532 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 533 * the possibility of usermode upcalls having messed up our count 534 * of interrupt nesting level during the prior busy period. 535 */ 536 static void rcu_eqs_enter(bool user) 537 { 538 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 539 540 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); 541 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); 542 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 543 rdp->dynticks_nesting == 0); 544 if (rdp->dynticks_nesting != 1) { 545 rdp->dynticks_nesting--; 546 return; 547 } 548 549 lockdep_assert_irqs_disabled(); 550 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); 551 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 552 rdp = this_cpu_ptr(&rcu_data); 553 do_nocb_deferred_wakeup(rdp); 554 rcu_prepare_for_idle(); 555 rcu_preempt_deferred_qs(current); 556 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 557 rcu_dynticks_eqs_enter(); 558 rcu_dynticks_task_enter(); 559 } 560 561 /** 562 * rcu_idle_enter - inform RCU that current CPU is entering idle 563 * 564 * Enter idle mode, in other words, -leave- the mode in which RCU 565 * read-side critical sections can occur. (Though RCU read-side 566 * critical sections can occur in irq handlers in idle, a possibility 567 * handled by irq_enter() and irq_exit().) 568 * 569 * If you add or remove a call to rcu_idle_enter(), be sure to test with 570 * CONFIG_RCU_EQS_DEBUG=y. 571 */ 572 void rcu_idle_enter(void) 573 { 574 lockdep_assert_irqs_disabled(); 575 rcu_eqs_enter(false); 576 } 577 578 #ifdef CONFIG_NO_HZ_FULL 579 /** 580 * rcu_user_enter - inform RCU that we are resuming userspace. 581 * 582 * Enter RCU idle mode right before resuming userspace. No use of RCU 583 * is permitted between this call and rcu_user_exit(). This way the 584 * CPU doesn't need to maintain the tick for RCU maintenance purposes 585 * when the CPU runs in userspace. 586 * 587 * If you add or remove a call to rcu_user_enter(), be sure to test with 588 * CONFIG_RCU_EQS_DEBUG=y. 589 */ 590 void rcu_user_enter(void) 591 { 592 lockdep_assert_irqs_disabled(); 593 rcu_eqs_enter(true); 594 } 595 #endif /* CONFIG_NO_HZ_FULL */ 596 597 /* 598 * If we are returning from the outermost NMI handler that interrupted an 599 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting 600 * to let the RCU grace-period handling know that the CPU is back to 601 * being RCU-idle. 602 * 603 * If you add or remove a call to rcu_nmi_exit_common(), be sure to test 604 * with CONFIG_RCU_EQS_DEBUG=y. 605 */ 606 static __always_inline void rcu_nmi_exit_common(bool irq) 607 { 608 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 609 610 /* 611 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 612 * (We are exiting an NMI handler, so RCU better be paying attention 613 * to us!) 614 */ 615 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); 616 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 617 618 /* 619 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 620 * leave it in non-RCU-idle state. 621 */ 622 if (rdp->dynticks_nmi_nesting != 1) { 623 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks); 624 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ 625 rdp->dynticks_nmi_nesting - 2); 626 return; 627 } 628 629 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 630 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks); 631 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 632 633 if (irq) 634 rcu_prepare_for_idle(); 635 636 rcu_dynticks_eqs_enter(); 637 638 if (irq) 639 rcu_dynticks_task_enter(); 640 } 641 642 /** 643 * rcu_nmi_exit - inform RCU of exit from NMI context 644 * 645 * If you add or remove a call to rcu_nmi_exit(), be sure to test 646 * with CONFIG_RCU_EQS_DEBUG=y. 647 */ 648 void rcu_nmi_exit(void) 649 { 650 rcu_nmi_exit_common(false); 651 } 652 653 /** 654 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 655 * 656 * Exit from an interrupt handler, which might possibly result in entering 657 * idle mode, in other words, leaving the mode in which read-side critical 658 * sections can occur. The caller must have disabled interrupts. 659 * 660 * This code assumes that the idle loop never does anything that might 661 * result in unbalanced calls to irq_enter() and irq_exit(). If your 662 * architecture's idle loop violates this assumption, RCU will give you what 663 * you deserve, good and hard. But very infrequently and irreproducibly. 664 * 665 * Use things like work queues to work around this limitation. 666 * 667 * You have been warned. 668 * 669 * If you add or remove a call to rcu_irq_exit(), be sure to test with 670 * CONFIG_RCU_EQS_DEBUG=y. 671 */ 672 void rcu_irq_exit(void) 673 { 674 lockdep_assert_irqs_disabled(); 675 rcu_nmi_exit_common(true); 676 } 677 678 /* 679 * Wrapper for rcu_irq_exit() where interrupts are enabled. 680 * 681 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 682 * with CONFIG_RCU_EQS_DEBUG=y. 683 */ 684 void rcu_irq_exit_irqson(void) 685 { 686 unsigned long flags; 687 688 local_irq_save(flags); 689 rcu_irq_exit(); 690 local_irq_restore(flags); 691 } 692 693 /* 694 * Exit an RCU extended quiescent state, which can be either the 695 * idle loop or adaptive-tickless usermode execution. 696 * 697 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 698 * allow for the possibility of usermode upcalls messing up our count of 699 * interrupt nesting level during the busy period that is just now starting. 700 */ 701 static void rcu_eqs_exit(bool user) 702 { 703 struct rcu_data *rdp; 704 long oldval; 705 706 lockdep_assert_irqs_disabled(); 707 rdp = this_cpu_ptr(&rcu_data); 708 oldval = rdp->dynticks_nesting; 709 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 710 if (oldval) { 711 rdp->dynticks_nesting++; 712 return; 713 } 714 rcu_dynticks_task_exit(); 715 rcu_dynticks_eqs_exit(); 716 rcu_cleanup_after_idle(); 717 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); 718 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 719 WRITE_ONCE(rdp->dynticks_nesting, 1); 720 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); 721 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 722 } 723 724 /** 725 * rcu_idle_exit - inform RCU that current CPU is leaving idle 726 * 727 * Exit idle mode, in other words, -enter- the mode in which RCU 728 * read-side critical sections can occur. 729 * 730 * If you add or remove a call to rcu_idle_exit(), be sure to test with 731 * CONFIG_RCU_EQS_DEBUG=y. 732 */ 733 void rcu_idle_exit(void) 734 { 735 unsigned long flags; 736 737 local_irq_save(flags); 738 rcu_eqs_exit(false); 739 local_irq_restore(flags); 740 } 741 742 #ifdef CONFIG_NO_HZ_FULL 743 /** 744 * rcu_user_exit - inform RCU that we are exiting userspace. 745 * 746 * Exit RCU idle mode while entering the kernel because it can 747 * run a RCU read side critical section anytime. 748 * 749 * If you add or remove a call to rcu_user_exit(), be sure to test with 750 * CONFIG_RCU_EQS_DEBUG=y. 751 */ 752 void rcu_user_exit(void) 753 { 754 rcu_eqs_exit(1); 755 } 756 #endif /* CONFIG_NO_HZ_FULL */ 757 758 /** 759 * rcu_nmi_enter_common - inform RCU of entry to NMI context 760 * @irq: Is this call from rcu_irq_enter? 761 * 762 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and 763 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know 764 * that the CPU is active. This implementation permits nested NMIs, as 765 * long as the nesting level does not overflow an int. (You will probably 766 * run out of stack space first.) 767 * 768 * If you add or remove a call to rcu_nmi_enter_common(), be sure to test 769 * with CONFIG_RCU_EQS_DEBUG=y. 770 */ 771 static __always_inline void rcu_nmi_enter_common(bool irq) 772 { 773 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 774 long incby = 2; 775 776 /* Complain about underflow. */ 777 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); 778 779 /* 780 * If idle from RCU viewpoint, atomically increment ->dynticks 781 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 782 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 783 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 784 * to be in the outermost NMI handler that interrupted an RCU-idle 785 * period (observation due to Andy Lutomirski). 786 */ 787 if (rcu_dynticks_curr_cpu_in_eqs()) { 788 789 if (irq) 790 rcu_dynticks_task_exit(); 791 792 rcu_dynticks_eqs_exit(); 793 794 if (irq) 795 rcu_cleanup_after_idle(); 796 797 incby = 1; 798 } 799 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 800 rdp->dynticks_nmi_nesting, 801 rdp->dynticks_nmi_nesting + incby, rdp->dynticks); 802 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ 803 rdp->dynticks_nmi_nesting + incby); 804 barrier(); 805 } 806 807 /** 808 * rcu_nmi_enter - inform RCU of entry to NMI context 809 */ 810 void rcu_nmi_enter(void) 811 { 812 rcu_nmi_enter_common(false); 813 } 814 NOKPROBE_SYMBOL(rcu_nmi_enter); 815 816 /** 817 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 818 * 819 * Enter an interrupt handler, which might possibly result in exiting 820 * idle mode, in other words, entering the mode in which read-side critical 821 * sections can occur. The caller must have disabled interrupts. 822 * 823 * Note that the Linux kernel is fully capable of entering an interrupt 824 * handler that it never exits, for example when doing upcalls to user mode! 825 * This code assumes that the idle loop never does upcalls to user mode. 826 * If your architecture's idle loop does do upcalls to user mode (or does 827 * anything else that results in unbalanced calls to the irq_enter() and 828 * irq_exit() functions), RCU will give you what you deserve, good and hard. 829 * But very infrequently and irreproducibly. 830 * 831 * Use things like work queues to work around this limitation. 832 * 833 * You have been warned. 834 * 835 * If you add or remove a call to rcu_irq_enter(), be sure to test with 836 * CONFIG_RCU_EQS_DEBUG=y. 837 */ 838 void rcu_irq_enter(void) 839 { 840 lockdep_assert_irqs_disabled(); 841 rcu_nmi_enter_common(true); 842 } 843 844 /* 845 * Wrapper for rcu_irq_enter() where interrupts are enabled. 846 * 847 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 848 * with CONFIG_RCU_EQS_DEBUG=y. 849 */ 850 void rcu_irq_enter_irqson(void) 851 { 852 unsigned long flags; 853 854 local_irq_save(flags); 855 rcu_irq_enter(); 856 local_irq_restore(flags); 857 } 858 859 /** 860 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 861 * 862 * Return true if RCU is watching the running CPU, which means that this 863 * CPU can safely enter RCU read-side critical sections. In other words, 864 * if the current CPU is not in its idle loop or is in an interrupt or 865 * NMI handler, return true. 866 */ 867 bool notrace rcu_is_watching(void) 868 { 869 bool ret; 870 871 preempt_disable_notrace(); 872 ret = !rcu_dynticks_curr_cpu_in_eqs(); 873 preempt_enable_notrace(); 874 return ret; 875 } 876 EXPORT_SYMBOL_GPL(rcu_is_watching); 877 878 /* 879 * If a holdout task is actually running, request an urgent quiescent 880 * state from its CPU. This is unsynchronized, so migrations can cause 881 * the request to go to the wrong CPU. Which is OK, all that will happen 882 * is that the CPU's next context switch will be a bit slower and next 883 * time around this task will generate another request. 884 */ 885 void rcu_request_urgent_qs_task(struct task_struct *t) 886 { 887 int cpu; 888 889 barrier(); 890 cpu = task_cpu(t); 891 if (!task_curr(t)) 892 return; /* This task is not running on that CPU. */ 893 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 894 } 895 896 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 897 898 /* 899 * Is the current CPU online as far as RCU is concerned? 900 * 901 * Disable preemption to avoid false positives that could otherwise 902 * happen due to the current CPU number being sampled, this task being 903 * preempted, its old CPU being taken offline, resuming on some other CPU, 904 * then determining that its old CPU is now offline. 905 * 906 * Disable checking if in an NMI handler because we cannot safely 907 * report errors from NMI handlers anyway. In addition, it is OK to use 908 * RCU on an offline processor during initial boot, hence the check for 909 * rcu_scheduler_fully_active. 910 */ 911 bool rcu_lockdep_current_cpu_online(void) 912 { 913 struct rcu_data *rdp; 914 struct rcu_node *rnp; 915 bool ret = false; 916 917 if (in_nmi() || !rcu_scheduler_fully_active) 918 return true; 919 preempt_disable(); 920 rdp = this_cpu_ptr(&rcu_data); 921 rnp = rdp->mynode; 922 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) 923 ret = true; 924 preempt_enable(); 925 return ret; 926 } 927 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 928 929 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 930 931 /* 932 * We are reporting a quiescent state on behalf of some other CPU, so 933 * it is our responsibility to check for and handle potential overflow 934 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 935 * After all, the CPU might be in deep idle state, and thus executing no 936 * code whatsoever. 937 */ 938 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 939 { 940 raw_lockdep_assert_held_rcu_node(rnp); 941 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 942 rnp->gp_seq)) 943 WRITE_ONCE(rdp->gpwrap, true); 944 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 945 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 946 } 947 948 /* 949 * Snapshot the specified CPU's dynticks counter so that we can later 950 * credit them with an implicit quiescent state. Return 1 if this CPU 951 * is in dynticks idle mode, which is an extended quiescent state. 952 */ 953 static int dyntick_save_progress_counter(struct rcu_data *rdp) 954 { 955 rdp->dynticks_snap = rcu_dynticks_snap(rdp); 956 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 957 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 958 rcu_gpnum_ovf(rdp->mynode, rdp); 959 return 1; 960 } 961 return 0; 962 } 963 964 /* 965 * Return true if the specified CPU has passed through a quiescent 966 * state by virtue of being in or having passed through an dynticks 967 * idle state since the last call to dyntick_save_progress_counter() 968 * for this same CPU, or by virtue of having been offline. 969 */ 970 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 971 { 972 unsigned long jtsq; 973 bool *rnhqp; 974 bool *ruqp; 975 struct rcu_node *rnp = rdp->mynode; 976 977 /* 978 * If the CPU passed through or entered a dynticks idle phase with 979 * no active irq/NMI handlers, then we can safely pretend that the CPU 980 * already acknowledged the request to pass through a quiescent 981 * state. Either way, that CPU cannot possibly be in an RCU 982 * read-side critical section that started before the beginning 983 * of the current RCU grace period. 984 */ 985 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 986 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 987 rcu_gpnum_ovf(rnp, rdp); 988 return 1; 989 } 990 991 /* If waiting too long on an offline CPU, complain. */ 992 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && 993 time_after(jiffies, rcu_state.gp_start + HZ)) { 994 bool onl; 995 struct rcu_node *rnp1; 996 997 WARN_ON(1); /* Offline CPUs are supposed to report QS! */ 998 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 999 __func__, rnp->grplo, rnp->grphi, rnp->level, 1000 (long)rnp->gp_seq, (long)rnp->completedqs); 1001 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 1002 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 1003 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 1004 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 1005 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 1006 __func__, rdp->cpu, ".o"[onl], 1007 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 1008 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 1009 return 1; /* Break things loose after complaining. */ 1010 } 1011 1012 /* 1013 * A CPU running for an extended time within the kernel can 1014 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 1015 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 1016 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 1017 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 1018 * variable are safe because the assignments are repeated if this 1019 * CPU failed to pass through a quiescent state. This code 1020 * also checks .jiffies_resched in case jiffies_to_sched_qs 1021 * is set way high. 1022 */ 1023 jtsq = READ_ONCE(jiffies_to_sched_qs); 1024 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); 1025 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); 1026 if (!READ_ONCE(*rnhqp) && 1027 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 1028 time_after(jiffies, rcu_state.jiffies_resched))) { 1029 WRITE_ONCE(*rnhqp, true); 1030 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1031 smp_store_release(ruqp, true); 1032 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 1033 WRITE_ONCE(*ruqp, true); 1034 } 1035 1036 /* 1037 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 1038 * The above code handles this, but only for straight cond_resched(). 1039 * And some in-kernel loops check need_resched() before calling 1040 * cond_resched(), which defeats the above code for CPUs that are 1041 * running in-kernel with scheduling-clock interrupts disabled. 1042 * So hit them over the head with the resched_cpu() hammer! 1043 */ 1044 if (tick_nohz_full_cpu(rdp->cpu) && 1045 time_after(jiffies, 1046 READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) { 1047 resched_cpu(rdp->cpu); 1048 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1049 } 1050 1051 /* 1052 * If more than halfway to RCU CPU stall-warning time, invoke 1053 * resched_cpu() more frequently to try to loosen things up a bit. 1054 * Also check to see if the CPU is getting hammered with interrupts, 1055 * but only once per grace period, just to keep the IPIs down to 1056 * a dull roar. 1057 */ 1058 if (time_after(jiffies, rcu_state.jiffies_resched)) { 1059 if (time_after(jiffies, 1060 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 1061 resched_cpu(rdp->cpu); 1062 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1063 } 1064 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1065 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 1066 (rnp->ffmask & rdp->grpmask)) { 1067 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1068 rdp->rcu_iw_pending = true; 1069 rdp->rcu_iw_gp_seq = rnp->gp_seq; 1070 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1071 } 1072 } 1073 1074 return 0; 1075 } 1076 1077 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1078 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1079 unsigned long gp_seq_req, const char *s) 1080 { 1081 trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req, 1082 rnp->level, rnp->grplo, rnp->grphi, s); 1083 } 1084 1085 /* 1086 * rcu_start_this_gp - Request the start of a particular grace period 1087 * @rnp_start: The leaf node of the CPU from which to start. 1088 * @rdp: The rcu_data corresponding to the CPU from which to start. 1089 * @gp_seq_req: The gp_seq of the grace period to start. 1090 * 1091 * Start the specified grace period, as needed to handle newly arrived 1092 * callbacks. The required future grace periods are recorded in each 1093 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1094 * is reason to awaken the grace-period kthread. 1095 * 1096 * The caller must hold the specified rcu_node structure's ->lock, which 1097 * is why the caller is responsible for waking the grace-period kthread. 1098 * 1099 * Returns true if the GP thread needs to be awakened else false. 1100 */ 1101 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1102 unsigned long gp_seq_req) 1103 { 1104 bool ret = false; 1105 struct rcu_node *rnp; 1106 1107 /* 1108 * Use funnel locking to either acquire the root rcu_node 1109 * structure's lock or bail out if the need for this grace period 1110 * has already been recorded -- or if that grace period has in 1111 * fact already started. If there is already a grace period in 1112 * progress in a non-leaf node, no recording is needed because the 1113 * end of the grace period will scan the leaf rcu_node structures. 1114 * Note that rnp_start->lock must not be released. 1115 */ 1116 raw_lockdep_assert_held_rcu_node(rnp_start); 1117 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1118 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1119 if (rnp != rnp_start) 1120 raw_spin_lock_rcu_node(rnp); 1121 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1122 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1123 (rnp != rnp_start && 1124 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1125 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1126 TPS("Prestarted")); 1127 goto unlock_out; 1128 } 1129 rnp->gp_seq_needed = gp_seq_req; 1130 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1131 /* 1132 * We just marked the leaf or internal node, and a 1133 * grace period is in progress, which means that 1134 * rcu_gp_cleanup() will see the marking. Bail to 1135 * reduce contention. 1136 */ 1137 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1138 TPS("Startedleaf")); 1139 goto unlock_out; 1140 } 1141 if (rnp != rnp_start && rnp->parent != NULL) 1142 raw_spin_unlock_rcu_node(rnp); 1143 if (!rnp->parent) 1144 break; /* At root, and perhaps also leaf. */ 1145 } 1146 1147 /* If GP already in progress, just leave, otherwise start one. */ 1148 if (rcu_gp_in_progress()) { 1149 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1150 goto unlock_out; 1151 } 1152 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1153 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1154 rcu_state.gp_req_activity = jiffies; 1155 if (!rcu_state.gp_kthread) { 1156 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1157 goto unlock_out; 1158 } 1159 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq")); 1160 ret = true; /* Caller must wake GP kthread. */ 1161 unlock_out: 1162 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1163 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1164 rnp_start->gp_seq_needed = rnp->gp_seq_needed; 1165 rdp->gp_seq_needed = rnp->gp_seq_needed; 1166 } 1167 if (rnp != rnp_start) 1168 raw_spin_unlock_rcu_node(rnp); 1169 return ret; 1170 } 1171 1172 /* 1173 * Clean up any old requests for the just-ended grace period. Also return 1174 * whether any additional grace periods have been requested. 1175 */ 1176 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1177 { 1178 bool needmore; 1179 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1180 1181 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1182 if (!needmore) 1183 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1184 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1185 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1186 return needmore; 1187 } 1188 1189 /* 1190 * Awaken the grace-period kthread. Don't do a self-awaken (unless in 1191 * an interrupt or softirq handler), and don't bother awakening when there 1192 * is nothing for the grace-period kthread to do (as in several CPUs raced 1193 * to awaken, and we lost), and finally don't try to awaken a kthread that 1194 * has not yet been created. If all those checks are passed, track some 1195 * debug information and awaken. 1196 * 1197 * So why do the self-wakeup when in an interrupt or softirq handler 1198 * in the grace-period kthread's context? Because the kthread might have 1199 * been interrupted just as it was going to sleep, and just after the final 1200 * pre-sleep check of the awaken condition. In this case, a wakeup really 1201 * is required, and is therefore supplied. 1202 */ 1203 static void rcu_gp_kthread_wake(void) 1204 { 1205 if ((current == rcu_state.gp_kthread && 1206 !in_irq() && !in_serving_softirq()) || 1207 !READ_ONCE(rcu_state.gp_flags) || 1208 !rcu_state.gp_kthread) 1209 return; 1210 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1211 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1212 swake_up_one(&rcu_state.gp_wq); 1213 } 1214 1215 /* 1216 * If there is room, assign a ->gp_seq number to any callbacks on this 1217 * CPU that have not already been assigned. Also accelerate any callbacks 1218 * that were previously assigned a ->gp_seq number that has since proven 1219 * to be too conservative, which can happen if callbacks get assigned a 1220 * ->gp_seq number while RCU is idle, but with reference to a non-root 1221 * rcu_node structure. This function is idempotent, so it does not hurt 1222 * to call it repeatedly. Returns an flag saying that we should awaken 1223 * the RCU grace-period kthread. 1224 * 1225 * The caller must hold rnp->lock with interrupts disabled. 1226 */ 1227 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1228 { 1229 unsigned long gp_seq_req; 1230 bool ret = false; 1231 1232 raw_lockdep_assert_held_rcu_node(rnp); 1233 1234 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1235 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1236 return false; 1237 1238 /* 1239 * Callbacks are often registered with incomplete grace-period 1240 * information. Something about the fact that getting exact 1241 * information requires acquiring a global lock... RCU therefore 1242 * makes a conservative estimate of the grace period number at which 1243 * a given callback will become ready to invoke. The following 1244 * code checks this estimate and improves it when possible, thus 1245 * accelerating callback invocation to an earlier grace-period 1246 * number. 1247 */ 1248 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1249 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1250 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1251 1252 /* Trace depending on how much we were able to accelerate. */ 1253 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1254 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); 1255 else 1256 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); 1257 return ret; 1258 } 1259 1260 /* 1261 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1262 * rcu_node structure's ->lock be held. It consults the cached value 1263 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1264 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1265 * while holding the leaf rcu_node structure's ->lock. 1266 */ 1267 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1268 struct rcu_data *rdp) 1269 { 1270 unsigned long c; 1271 bool needwake; 1272 1273 lockdep_assert_irqs_disabled(); 1274 c = rcu_seq_snap(&rcu_state.gp_seq); 1275 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1276 /* Old request still live, so mark recent callbacks. */ 1277 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1278 return; 1279 } 1280 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1281 needwake = rcu_accelerate_cbs(rnp, rdp); 1282 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1283 if (needwake) 1284 rcu_gp_kthread_wake(); 1285 } 1286 1287 /* 1288 * Move any callbacks whose grace period has completed to the 1289 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1290 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1291 * sublist. This function is idempotent, so it does not hurt to 1292 * invoke it repeatedly. As long as it is not invoked -too- often... 1293 * Returns true if the RCU grace-period kthread needs to be awakened. 1294 * 1295 * The caller must hold rnp->lock with interrupts disabled. 1296 */ 1297 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1298 { 1299 raw_lockdep_assert_held_rcu_node(rnp); 1300 1301 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1302 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1303 return false; 1304 1305 /* 1306 * Find all callbacks whose ->gp_seq numbers indicate that they 1307 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1308 */ 1309 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1310 1311 /* Classify any remaining callbacks. */ 1312 return rcu_accelerate_cbs(rnp, rdp); 1313 } 1314 1315 /* 1316 * Update CPU-local rcu_data state to record the beginnings and ends of 1317 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1318 * structure corresponding to the current CPU, and must have irqs disabled. 1319 * Returns true if the grace-period kthread needs to be awakened. 1320 */ 1321 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1322 { 1323 bool ret; 1324 bool need_gp; 1325 1326 raw_lockdep_assert_held_rcu_node(rnp); 1327 1328 if (rdp->gp_seq == rnp->gp_seq) 1329 return false; /* Nothing to do. */ 1330 1331 /* Handle the ends of any preceding grace periods first. */ 1332 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1333 unlikely(READ_ONCE(rdp->gpwrap))) { 1334 ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */ 1335 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1336 } else { 1337 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */ 1338 } 1339 1340 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1341 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1342 unlikely(READ_ONCE(rdp->gpwrap))) { 1343 /* 1344 * If the current grace period is waiting for this CPU, 1345 * set up to detect a quiescent state, otherwise don't 1346 * go looking for one. 1347 */ 1348 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1349 need_gp = !!(rnp->qsmask & rdp->grpmask); 1350 rdp->cpu_no_qs.b.norm = need_gp; 1351 rdp->core_needs_qs = need_gp; 1352 zero_cpu_stall_ticks(rdp); 1353 } 1354 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1355 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1356 rdp->gp_seq_needed = rnp->gp_seq_needed; 1357 WRITE_ONCE(rdp->gpwrap, false); 1358 rcu_gpnum_ovf(rnp, rdp); 1359 return ret; 1360 } 1361 1362 static void note_gp_changes(struct rcu_data *rdp) 1363 { 1364 unsigned long flags; 1365 bool needwake; 1366 struct rcu_node *rnp; 1367 1368 local_irq_save(flags); 1369 rnp = rdp->mynode; 1370 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1371 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1372 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1373 local_irq_restore(flags); 1374 return; 1375 } 1376 needwake = __note_gp_changes(rnp, rdp); 1377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1378 if (needwake) 1379 rcu_gp_kthread_wake(); 1380 } 1381 1382 static void rcu_gp_slow(int delay) 1383 { 1384 if (delay > 0 && 1385 !(rcu_seq_ctr(rcu_state.gp_seq) % 1386 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1387 schedule_timeout_uninterruptible(delay); 1388 } 1389 1390 /* 1391 * Initialize a new grace period. Return false if no grace period required. 1392 */ 1393 static bool rcu_gp_init(void) 1394 { 1395 unsigned long flags; 1396 unsigned long oldmask; 1397 unsigned long mask; 1398 struct rcu_data *rdp; 1399 struct rcu_node *rnp = rcu_get_root(); 1400 1401 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1402 raw_spin_lock_irq_rcu_node(rnp); 1403 if (!READ_ONCE(rcu_state.gp_flags)) { 1404 /* Spurious wakeup, tell caller to go back to sleep. */ 1405 raw_spin_unlock_irq_rcu_node(rnp); 1406 return false; 1407 } 1408 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1409 1410 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1411 /* 1412 * Grace period already in progress, don't start another. 1413 * Not supposed to be able to happen. 1414 */ 1415 raw_spin_unlock_irq_rcu_node(rnp); 1416 return false; 1417 } 1418 1419 /* Advance to a new grace period and initialize state. */ 1420 record_gp_stall_check_time(); 1421 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1422 rcu_seq_start(&rcu_state.gp_seq); 1423 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1424 raw_spin_unlock_irq_rcu_node(rnp); 1425 1426 /* 1427 * Apply per-leaf buffered online and offline operations to the 1428 * rcu_node tree. Note that this new grace period need not wait 1429 * for subsequent online CPUs, and that quiescent-state forcing 1430 * will handle subsequent offline CPUs. 1431 */ 1432 rcu_state.gp_state = RCU_GP_ONOFF; 1433 rcu_for_each_leaf_node(rnp) { 1434 raw_spin_lock(&rcu_state.ofl_lock); 1435 raw_spin_lock_irq_rcu_node(rnp); 1436 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1437 !rnp->wait_blkd_tasks) { 1438 /* Nothing to do on this leaf rcu_node structure. */ 1439 raw_spin_unlock_irq_rcu_node(rnp); 1440 raw_spin_unlock(&rcu_state.ofl_lock); 1441 continue; 1442 } 1443 1444 /* Record old state, apply changes to ->qsmaskinit field. */ 1445 oldmask = rnp->qsmaskinit; 1446 rnp->qsmaskinit = rnp->qsmaskinitnext; 1447 1448 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1449 if (!oldmask != !rnp->qsmaskinit) { 1450 if (!oldmask) { /* First online CPU for rcu_node. */ 1451 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1452 rcu_init_new_rnp(rnp); 1453 } else if (rcu_preempt_has_tasks(rnp)) { 1454 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1455 } else { /* Last offline CPU and can propagate. */ 1456 rcu_cleanup_dead_rnp(rnp); 1457 } 1458 } 1459 1460 /* 1461 * If all waited-on tasks from prior grace period are 1462 * done, and if all this rcu_node structure's CPUs are 1463 * still offline, propagate up the rcu_node tree and 1464 * clear ->wait_blkd_tasks. Otherwise, if one of this 1465 * rcu_node structure's CPUs has since come back online, 1466 * simply clear ->wait_blkd_tasks. 1467 */ 1468 if (rnp->wait_blkd_tasks && 1469 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1470 rnp->wait_blkd_tasks = false; 1471 if (!rnp->qsmaskinit) 1472 rcu_cleanup_dead_rnp(rnp); 1473 } 1474 1475 raw_spin_unlock_irq_rcu_node(rnp); 1476 raw_spin_unlock(&rcu_state.ofl_lock); 1477 } 1478 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1479 1480 /* 1481 * Set the quiescent-state-needed bits in all the rcu_node 1482 * structures for all currently online CPUs in breadth-first 1483 * order, starting from the root rcu_node structure, relying on the 1484 * layout of the tree within the rcu_state.node[] array. Note that 1485 * other CPUs will access only the leaves of the hierarchy, thus 1486 * seeing that no grace period is in progress, at least until the 1487 * corresponding leaf node has been initialized. 1488 * 1489 * The grace period cannot complete until the initialization 1490 * process finishes, because this kthread handles both. 1491 */ 1492 rcu_state.gp_state = RCU_GP_INIT; 1493 rcu_for_each_node_breadth_first(rnp) { 1494 rcu_gp_slow(gp_init_delay); 1495 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1496 rdp = this_cpu_ptr(&rcu_data); 1497 rcu_preempt_check_blocked_tasks(rnp); 1498 rnp->qsmask = rnp->qsmaskinit; 1499 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1500 if (rnp == rdp->mynode) 1501 (void)__note_gp_changes(rnp, rdp); 1502 rcu_preempt_boost_start_gp(rnp); 1503 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1504 rnp->level, rnp->grplo, 1505 rnp->grphi, rnp->qsmask); 1506 /* Quiescent states for tasks on any now-offline CPUs. */ 1507 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1508 rnp->rcu_gp_init_mask = mask; 1509 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1510 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1511 else 1512 raw_spin_unlock_irq_rcu_node(rnp); 1513 cond_resched_tasks_rcu_qs(); 1514 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1515 } 1516 1517 return true; 1518 } 1519 1520 /* 1521 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1522 * time. 1523 */ 1524 static bool rcu_gp_fqs_check_wake(int *gfp) 1525 { 1526 struct rcu_node *rnp = rcu_get_root(); 1527 1528 /* Someone like call_rcu() requested a force-quiescent-state scan. */ 1529 *gfp = READ_ONCE(rcu_state.gp_flags); 1530 if (*gfp & RCU_GP_FLAG_FQS) 1531 return true; 1532 1533 /* The current grace period has completed. */ 1534 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1535 return true; 1536 1537 return false; 1538 } 1539 1540 /* 1541 * Do one round of quiescent-state forcing. 1542 */ 1543 static void rcu_gp_fqs(bool first_time) 1544 { 1545 struct rcu_node *rnp = rcu_get_root(); 1546 1547 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1548 rcu_state.n_force_qs++; 1549 if (first_time) { 1550 /* Collect dyntick-idle snapshots. */ 1551 force_qs_rnp(dyntick_save_progress_counter); 1552 } else { 1553 /* Handle dyntick-idle and offline CPUs. */ 1554 force_qs_rnp(rcu_implicit_dynticks_qs); 1555 } 1556 /* Clear flag to prevent immediate re-entry. */ 1557 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1558 raw_spin_lock_irq_rcu_node(rnp); 1559 WRITE_ONCE(rcu_state.gp_flags, 1560 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1561 raw_spin_unlock_irq_rcu_node(rnp); 1562 } 1563 } 1564 1565 /* 1566 * Loop doing repeated quiescent-state forcing until the grace period ends. 1567 */ 1568 static void rcu_gp_fqs_loop(void) 1569 { 1570 bool first_gp_fqs; 1571 int gf; 1572 unsigned long j; 1573 int ret; 1574 struct rcu_node *rnp = rcu_get_root(); 1575 1576 first_gp_fqs = true; 1577 j = READ_ONCE(jiffies_till_first_fqs); 1578 ret = 0; 1579 for (;;) { 1580 if (!ret) { 1581 rcu_state.jiffies_force_qs = jiffies + j; 1582 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1583 jiffies + (j ? 3 * j : 2)); 1584 } 1585 trace_rcu_grace_period(rcu_state.name, 1586 READ_ONCE(rcu_state.gp_seq), 1587 TPS("fqswait")); 1588 rcu_state.gp_state = RCU_GP_WAIT_FQS; 1589 ret = swait_event_idle_timeout_exclusive( 1590 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); 1591 rcu_state.gp_state = RCU_GP_DOING_FQS; 1592 /* Locking provides needed memory barriers. */ 1593 /* If grace period done, leave loop. */ 1594 if (!READ_ONCE(rnp->qsmask) && 1595 !rcu_preempt_blocked_readers_cgp(rnp)) 1596 break; 1597 /* If time for quiescent-state forcing, do it. */ 1598 if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) || 1599 (gf & RCU_GP_FLAG_FQS)) { 1600 trace_rcu_grace_period(rcu_state.name, 1601 READ_ONCE(rcu_state.gp_seq), 1602 TPS("fqsstart")); 1603 rcu_gp_fqs(first_gp_fqs); 1604 first_gp_fqs = false; 1605 trace_rcu_grace_period(rcu_state.name, 1606 READ_ONCE(rcu_state.gp_seq), 1607 TPS("fqsend")); 1608 cond_resched_tasks_rcu_qs(); 1609 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1610 ret = 0; /* Force full wait till next FQS. */ 1611 j = READ_ONCE(jiffies_till_next_fqs); 1612 } else { 1613 /* Deal with stray signal. */ 1614 cond_resched_tasks_rcu_qs(); 1615 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1616 WARN_ON(signal_pending(current)); 1617 trace_rcu_grace_period(rcu_state.name, 1618 READ_ONCE(rcu_state.gp_seq), 1619 TPS("fqswaitsig")); 1620 ret = 1; /* Keep old FQS timing. */ 1621 j = jiffies; 1622 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1623 j = 1; 1624 else 1625 j = rcu_state.jiffies_force_qs - j; 1626 } 1627 } 1628 } 1629 1630 /* 1631 * Clean up after the old grace period. 1632 */ 1633 static void rcu_gp_cleanup(void) 1634 { 1635 unsigned long gp_duration; 1636 bool needgp = false; 1637 unsigned long new_gp_seq; 1638 struct rcu_data *rdp; 1639 struct rcu_node *rnp = rcu_get_root(); 1640 struct swait_queue_head *sq; 1641 1642 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1643 raw_spin_lock_irq_rcu_node(rnp); 1644 rcu_state.gp_end = jiffies; 1645 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 1646 if (gp_duration > rcu_state.gp_max) 1647 rcu_state.gp_max = gp_duration; 1648 1649 /* 1650 * We know the grace period is complete, but to everyone else 1651 * it appears to still be ongoing. But it is also the case 1652 * that to everyone else it looks like there is nothing that 1653 * they can do to advance the grace period. It is therefore 1654 * safe for us to drop the lock in order to mark the grace 1655 * period as completed in all of the rcu_node structures. 1656 */ 1657 raw_spin_unlock_irq_rcu_node(rnp); 1658 1659 /* 1660 * Propagate new ->gp_seq value to rcu_node structures so that 1661 * other CPUs don't have to wait until the start of the next grace 1662 * period to process their callbacks. This also avoids some nasty 1663 * RCU grace-period initialization races by forcing the end of 1664 * the current grace period to be completely recorded in all of 1665 * the rcu_node structures before the beginning of the next grace 1666 * period is recorded in any of the rcu_node structures. 1667 */ 1668 new_gp_seq = rcu_state.gp_seq; 1669 rcu_seq_end(&new_gp_seq); 1670 rcu_for_each_node_breadth_first(rnp) { 1671 raw_spin_lock_irq_rcu_node(rnp); 1672 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 1673 dump_blkd_tasks(rnp, 10); 1674 WARN_ON_ONCE(rnp->qsmask); 1675 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 1676 rdp = this_cpu_ptr(&rcu_data); 1677 if (rnp == rdp->mynode) 1678 needgp = __note_gp_changes(rnp, rdp) || needgp; 1679 /* smp_mb() provided by prior unlock-lock pair. */ 1680 needgp = rcu_future_gp_cleanup(rnp) || needgp; 1681 sq = rcu_nocb_gp_get(rnp); 1682 raw_spin_unlock_irq_rcu_node(rnp); 1683 rcu_nocb_gp_cleanup(sq); 1684 cond_resched_tasks_rcu_qs(); 1685 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1686 rcu_gp_slow(gp_cleanup_delay); 1687 } 1688 rnp = rcu_get_root(); 1689 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 1690 1691 /* Declare grace period done, trace first to use old GP number. */ 1692 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 1693 rcu_seq_end(&rcu_state.gp_seq); 1694 rcu_state.gp_state = RCU_GP_IDLE; 1695 /* Check for GP requests since above loop. */ 1696 rdp = this_cpu_ptr(&rcu_data); 1697 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 1698 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 1699 TPS("CleanupMore")); 1700 needgp = true; 1701 } 1702 /* Advance CBs to reduce false positives below. */ 1703 if (!rcu_accelerate_cbs(rnp, rdp) && needgp) { 1704 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 1705 rcu_state.gp_req_activity = jiffies; 1706 trace_rcu_grace_period(rcu_state.name, 1707 READ_ONCE(rcu_state.gp_seq), 1708 TPS("newreq")); 1709 } else { 1710 WRITE_ONCE(rcu_state.gp_flags, 1711 rcu_state.gp_flags & RCU_GP_FLAG_INIT); 1712 } 1713 raw_spin_unlock_irq_rcu_node(rnp); 1714 } 1715 1716 /* 1717 * Body of kthread that handles grace periods. 1718 */ 1719 static int __noreturn rcu_gp_kthread(void *unused) 1720 { 1721 rcu_bind_gp_kthread(); 1722 for (;;) { 1723 1724 /* Handle grace-period start. */ 1725 for (;;) { 1726 trace_rcu_grace_period(rcu_state.name, 1727 READ_ONCE(rcu_state.gp_seq), 1728 TPS("reqwait")); 1729 rcu_state.gp_state = RCU_GP_WAIT_GPS; 1730 swait_event_idle_exclusive(rcu_state.gp_wq, 1731 READ_ONCE(rcu_state.gp_flags) & 1732 RCU_GP_FLAG_INIT); 1733 rcu_state.gp_state = RCU_GP_DONE_GPS; 1734 /* Locking provides needed memory barrier. */ 1735 if (rcu_gp_init()) 1736 break; 1737 cond_resched_tasks_rcu_qs(); 1738 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1739 WARN_ON(signal_pending(current)); 1740 trace_rcu_grace_period(rcu_state.name, 1741 READ_ONCE(rcu_state.gp_seq), 1742 TPS("reqwaitsig")); 1743 } 1744 1745 /* Handle quiescent-state forcing. */ 1746 rcu_gp_fqs_loop(); 1747 1748 /* Handle grace-period end. */ 1749 rcu_state.gp_state = RCU_GP_CLEANUP; 1750 rcu_gp_cleanup(); 1751 rcu_state.gp_state = RCU_GP_CLEANED; 1752 } 1753 } 1754 1755 /* 1756 * Report a full set of quiescent states to the rcu_state data structure. 1757 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 1758 * another grace period is required. Whether we wake the grace-period 1759 * kthread or it awakens itself for the next round of quiescent-state 1760 * forcing, that kthread will clean up after the just-completed grace 1761 * period. Note that the caller must hold rnp->lock, which is released 1762 * before return. 1763 */ 1764 static void rcu_report_qs_rsp(unsigned long flags) 1765 __releases(rcu_get_root()->lock) 1766 { 1767 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 1768 WARN_ON_ONCE(!rcu_gp_in_progress()); 1769 WRITE_ONCE(rcu_state.gp_flags, 1770 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 1771 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 1772 rcu_gp_kthread_wake(); 1773 } 1774 1775 /* 1776 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 1777 * Allows quiescent states for a group of CPUs to be reported at one go 1778 * to the specified rcu_node structure, though all the CPUs in the group 1779 * must be represented by the same rcu_node structure (which need not be a 1780 * leaf rcu_node structure, though it often will be). The gps parameter 1781 * is the grace-period snapshot, which means that the quiescent states 1782 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 1783 * must be held upon entry, and it is released before return. 1784 * 1785 * As a special case, if mask is zero, the bit-already-cleared check is 1786 * disabled. This allows propagating quiescent state due to resumed tasks 1787 * during grace-period initialization. 1788 */ 1789 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 1790 unsigned long gps, unsigned long flags) 1791 __releases(rnp->lock) 1792 { 1793 unsigned long oldmask = 0; 1794 struct rcu_node *rnp_c; 1795 1796 raw_lockdep_assert_held_rcu_node(rnp); 1797 1798 /* Walk up the rcu_node hierarchy. */ 1799 for (;;) { 1800 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 1801 1802 /* 1803 * Our bit has already been cleared, or the 1804 * relevant grace period is already over, so done. 1805 */ 1806 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1807 return; 1808 } 1809 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 1810 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 1811 rcu_preempt_blocked_readers_cgp(rnp)); 1812 rnp->qsmask &= ~mask; 1813 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 1814 mask, rnp->qsmask, rnp->level, 1815 rnp->grplo, rnp->grphi, 1816 !!rnp->gp_tasks); 1817 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 1818 1819 /* Other bits still set at this level, so done. */ 1820 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1821 return; 1822 } 1823 rnp->completedqs = rnp->gp_seq; 1824 mask = rnp->grpmask; 1825 if (rnp->parent == NULL) { 1826 1827 /* No more levels. Exit loop holding root lock. */ 1828 1829 break; 1830 } 1831 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1832 rnp_c = rnp; 1833 rnp = rnp->parent; 1834 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1835 oldmask = rnp_c->qsmask; 1836 } 1837 1838 /* 1839 * Get here if we are the last CPU to pass through a quiescent 1840 * state for this grace period. Invoke rcu_report_qs_rsp() 1841 * to clean up and start the next grace period if one is needed. 1842 */ 1843 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 1844 } 1845 1846 /* 1847 * Record a quiescent state for all tasks that were previously queued 1848 * on the specified rcu_node structure and that were blocking the current 1849 * RCU grace period. The caller must hold the corresponding rnp->lock with 1850 * irqs disabled, and this lock is released upon return, but irqs remain 1851 * disabled. 1852 */ 1853 static void __maybe_unused 1854 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 1855 __releases(rnp->lock) 1856 { 1857 unsigned long gps; 1858 unsigned long mask; 1859 struct rcu_node *rnp_p; 1860 1861 raw_lockdep_assert_held_rcu_node(rnp); 1862 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) || 1863 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 1864 rnp->qsmask != 0) { 1865 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1866 return; /* Still need more quiescent states! */ 1867 } 1868 1869 rnp->completedqs = rnp->gp_seq; 1870 rnp_p = rnp->parent; 1871 if (rnp_p == NULL) { 1872 /* 1873 * Only one rcu_node structure in the tree, so don't 1874 * try to report up to its nonexistent parent! 1875 */ 1876 rcu_report_qs_rsp(flags); 1877 return; 1878 } 1879 1880 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 1881 gps = rnp->gp_seq; 1882 mask = rnp->grpmask; 1883 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1884 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 1885 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 1886 } 1887 1888 /* 1889 * Record a quiescent state for the specified CPU to that CPU's rcu_data 1890 * structure. This must be called from the specified CPU. 1891 */ 1892 static void 1893 rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) 1894 { 1895 unsigned long flags; 1896 unsigned long mask; 1897 bool needwake; 1898 struct rcu_node *rnp; 1899 1900 rnp = rdp->mynode; 1901 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1902 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 1903 rdp->gpwrap) { 1904 1905 /* 1906 * The grace period in which this quiescent state was 1907 * recorded has ended, so don't report it upwards. 1908 * We will instead need a new quiescent state that lies 1909 * within the current grace period. 1910 */ 1911 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 1912 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1913 return; 1914 } 1915 mask = rdp->grpmask; 1916 rdp->core_needs_qs = false; 1917 if ((rnp->qsmask & mask) == 0) { 1918 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1919 } else { 1920 /* 1921 * This GP can't end until cpu checks in, so all of our 1922 * callbacks can be processed during the next GP. 1923 */ 1924 needwake = rcu_accelerate_cbs(rnp, rdp); 1925 1926 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1927 /* ^^^ Released rnp->lock */ 1928 if (needwake) 1929 rcu_gp_kthread_wake(); 1930 } 1931 } 1932 1933 /* 1934 * Check to see if there is a new grace period of which this CPU 1935 * is not yet aware, and if so, set up local rcu_data state for it. 1936 * Otherwise, see if this CPU has just passed through its first 1937 * quiescent state for this grace period, and record that fact if so. 1938 */ 1939 static void 1940 rcu_check_quiescent_state(struct rcu_data *rdp) 1941 { 1942 /* Check for grace-period ends and beginnings. */ 1943 note_gp_changes(rdp); 1944 1945 /* 1946 * Does this CPU still need to do its part for current grace period? 1947 * If no, return and let the other CPUs do their part as well. 1948 */ 1949 if (!rdp->core_needs_qs) 1950 return; 1951 1952 /* 1953 * Was there a quiescent state since the beginning of the grace 1954 * period? If no, then exit and wait for the next call. 1955 */ 1956 if (rdp->cpu_no_qs.b.norm) 1957 return; 1958 1959 /* 1960 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 1961 * judge of that). 1962 */ 1963 rcu_report_qs_rdp(rdp->cpu, rdp); 1964 } 1965 1966 /* 1967 * Near the end of the offline process. Trace the fact that this CPU 1968 * is going offline. 1969 */ 1970 int rcutree_dying_cpu(unsigned int cpu) 1971 { 1972 bool blkd; 1973 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1974 struct rcu_node *rnp = rdp->mynode; 1975 1976 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 1977 return 0; 1978 1979 blkd = !!(rnp->qsmask & rdp->grpmask); 1980 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, 1981 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 1982 return 0; 1983 } 1984 1985 /* 1986 * All CPUs for the specified rcu_node structure have gone offline, 1987 * and all tasks that were preempted within an RCU read-side critical 1988 * section while running on one of those CPUs have since exited their RCU 1989 * read-side critical section. Some other CPU is reporting this fact with 1990 * the specified rcu_node structure's ->lock held and interrupts disabled. 1991 * This function therefore goes up the tree of rcu_node structures, 1992 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 1993 * the leaf rcu_node structure's ->qsmaskinit field has already been 1994 * updated. 1995 * 1996 * This function does check that the specified rcu_node structure has 1997 * all CPUs offline and no blocked tasks, so it is OK to invoke it 1998 * prematurely. That said, invoking it after the fact will cost you 1999 * a needless lock acquisition. So once it has done its work, don't 2000 * invoke it again. 2001 */ 2002 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2003 { 2004 long mask; 2005 struct rcu_node *rnp = rnp_leaf; 2006 2007 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2008 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2009 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2010 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2011 return; 2012 for (;;) { 2013 mask = rnp->grpmask; 2014 rnp = rnp->parent; 2015 if (!rnp) 2016 break; 2017 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2018 rnp->qsmaskinit &= ~mask; 2019 /* Between grace periods, so better already be zero! */ 2020 WARN_ON_ONCE(rnp->qsmask); 2021 if (rnp->qsmaskinit) { 2022 raw_spin_unlock_rcu_node(rnp); 2023 /* irqs remain disabled. */ 2024 return; 2025 } 2026 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2027 } 2028 } 2029 2030 /* 2031 * The CPU has been completely removed, and some other CPU is reporting 2032 * this fact from process context. Do the remainder of the cleanup. 2033 * There can only be one CPU hotplug operation at a time, so no need for 2034 * explicit locking. 2035 */ 2036 int rcutree_dead_cpu(unsigned int cpu) 2037 { 2038 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2039 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2040 2041 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2042 return 0; 2043 2044 /* Adjust any no-longer-needed kthreads. */ 2045 rcu_boost_kthread_setaffinity(rnp, -1); 2046 /* Do any needed no-CB deferred wakeups from this CPU. */ 2047 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); 2048 return 0; 2049 } 2050 2051 /* 2052 * Invoke any RCU callbacks that have made it to the end of their grace 2053 * period. Thottle as specified by rdp->blimit. 2054 */ 2055 static void rcu_do_batch(struct rcu_data *rdp) 2056 { 2057 unsigned long flags; 2058 struct rcu_head *rhp; 2059 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2060 long bl, count; 2061 2062 /* If no callbacks are ready, just return. */ 2063 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2064 trace_rcu_batch_start(rcu_state.name, 2065 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2066 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2067 trace_rcu_batch_end(rcu_state.name, 0, 2068 !rcu_segcblist_empty(&rdp->cblist), 2069 need_resched(), is_idle_task(current), 2070 rcu_is_callbacks_kthread()); 2071 return; 2072 } 2073 2074 /* 2075 * Extract the list of ready callbacks, disabling to prevent 2076 * races with call_rcu() from interrupt handlers. Leave the 2077 * callback counts, as rcu_barrier() needs to be conservative. 2078 */ 2079 local_irq_save(flags); 2080 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2081 bl = rdp->blimit; 2082 trace_rcu_batch_start(rcu_state.name, 2083 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2084 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2085 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2086 local_irq_restore(flags); 2087 2088 /* Invoke callbacks. */ 2089 rhp = rcu_cblist_dequeue(&rcl); 2090 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2091 debug_rcu_head_unqueue(rhp); 2092 if (__rcu_reclaim(rcu_state.name, rhp)) 2093 rcu_cblist_dequeued_lazy(&rcl); 2094 /* 2095 * Stop only if limit reached and CPU has something to do. 2096 * Note: The rcl structure counts down from zero. 2097 */ 2098 if (-rcl.len >= bl && 2099 (need_resched() || 2100 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2101 break; 2102 } 2103 2104 local_irq_save(flags); 2105 count = -rcl.len; 2106 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2107 is_idle_task(current), rcu_is_callbacks_kthread()); 2108 2109 /* Update counts and requeue any remaining callbacks. */ 2110 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2111 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2112 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2113 2114 /* Reinstate batch limit if we have worked down the excess. */ 2115 count = rcu_segcblist_n_cbs(&rdp->cblist); 2116 if (rdp->blimit == LONG_MAX && count <= qlowmark) 2117 rdp->blimit = blimit; 2118 2119 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2120 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2121 rdp->qlen_last_fqs_check = 0; 2122 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2123 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2124 rdp->qlen_last_fqs_check = count; 2125 2126 /* 2127 * The following usually indicates a double call_rcu(). To track 2128 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2129 */ 2130 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0)); 2131 2132 local_irq_restore(flags); 2133 2134 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2135 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2136 invoke_rcu_core(); 2137 } 2138 2139 /* 2140 * This function is invoked from each scheduling-clock interrupt, 2141 * and checks to see if this CPU is in a non-context-switch quiescent 2142 * state, for example, user mode or idle loop. It also schedules RCU 2143 * core processing. If the current grace period has gone on too long, 2144 * it will ask the scheduler to manufacture a context switch for the sole 2145 * purpose of providing a providing the needed quiescent state. 2146 */ 2147 void rcu_sched_clock_irq(int user) 2148 { 2149 trace_rcu_utilization(TPS("Start scheduler-tick")); 2150 raw_cpu_inc(rcu_data.ticks_this_gp); 2151 /* The load-acquire pairs with the store-release setting to true. */ 2152 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2153 /* Idle and userspace execution already are quiescent states. */ 2154 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2155 set_tsk_need_resched(current); 2156 set_preempt_need_resched(); 2157 } 2158 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2159 } 2160 rcu_flavor_sched_clock_irq(user); 2161 if (rcu_pending()) 2162 invoke_rcu_core(); 2163 2164 trace_rcu_utilization(TPS("End scheduler-tick")); 2165 } 2166 2167 /* 2168 * Scan the leaf rcu_node structures. For each structure on which all 2169 * CPUs have reported a quiescent state and on which there are tasks 2170 * blocking the current grace period, initiate RCU priority boosting. 2171 * Otherwise, invoke the specified function to check dyntick state for 2172 * each CPU that has not yet reported a quiescent state. 2173 */ 2174 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2175 { 2176 int cpu; 2177 unsigned long flags; 2178 unsigned long mask; 2179 struct rcu_node *rnp; 2180 2181 rcu_for_each_leaf_node(rnp) { 2182 cond_resched_tasks_rcu_qs(); 2183 mask = 0; 2184 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2185 if (rnp->qsmask == 0) { 2186 if (!IS_ENABLED(CONFIG_PREEMPT) || 2187 rcu_preempt_blocked_readers_cgp(rnp)) { 2188 /* 2189 * No point in scanning bits because they 2190 * are all zero. But we might need to 2191 * priority-boost blocked readers. 2192 */ 2193 rcu_initiate_boost(rnp, flags); 2194 /* rcu_initiate_boost() releases rnp->lock */ 2195 continue; 2196 } 2197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2198 continue; 2199 } 2200 for_each_leaf_node_possible_cpu(rnp, cpu) { 2201 unsigned long bit = leaf_node_cpu_bit(rnp, cpu); 2202 if ((rnp->qsmask & bit) != 0) { 2203 if (f(per_cpu_ptr(&rcu_data, cpu))) 2204 mask |= bit; 2205 } 2206 } 2207 if (mask != 0) { 2208 /* Idle/offline CPUs, report (releases rnp->lock). */ 2209 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2210 } else { 2211 /* Nothing to do here, so just drop the lock. */ 2212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2213 } 2214 } 2215 } 2216 2217 /* 2218 * Force quiescent states on reluctant CPUs, and also detect which 2219 * CPUs are in dyntick-idle mode. 2220 */ 2221 void rcu_force_quiescent_state(void) 2222 { 2223 unsigned long flags; 2224 bool ret; 2225 struct rcu_node *rnp; 2226 struct rcu_node *rnp_old = NULL; 2227 2228 /* Funnel through hierarchy to reduce memory contention. */ 2229 rnp = __this_cpu_read(rcu_data.mynode); 2230 for (; rnp != NULL; rnp = rnp->parent) { 2231 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2232 !raw_spin_trylock(&rnp->fqslock); 2233 if (rnp_old != NULL) 2234 raw_spin_unlock(&rnp_old->fqslock); 2235 if (ret) 2236 return; 2237 rnp_old = rnp; 2238 } 2239 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2240 2241 /* Reached the root of the rcu_node tree, acquire lock. */ 2242 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2243 raw_spin_unlock(&rnp_old->fqslock); 2244 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2245 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2246 return; /* Someone beat us to it. */ 2247 } 2248 WRITE_ONCE(rcu_state.gp_flags, 2249 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2250 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2251 rcu_gp_kthread_wake(); 2252 } 2253 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2254 2255 /* Perform RCU core processing work for the current CPU. */ 2256 static __latent_entropy void rcu_core(struct softirq_action *unused) 2257 { 2258 unsigned long flags; 2259 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2260 struct rcu_node *rnp = rdp->mynode; 2261 2262 if (cpu_is_offline(smp_processor_id())) 2263 return; 2264 trace_rcu_utilization(TPS("Start RCU core")); 2265 WARN_ON_ONCE(!rdp->beenonline); 2266 2267 /* Report any deferred quiescent states if preemption enabled. */ 2268 if (!(preempt_count() & PREEMPT_MASK)) { 2269 rcu_preempt_deferred_qs(current); 2270 } else if (rcu_preempt_need_deferred_qs(current)) { 2271 set_tsk_need_resched(current); 2272 set_preempt_need_resched(); 2273 } 2274 2275 /* Update RCU state based on any recent quiescent states. */ 2276 rcu_check_quiescent_state(rdp); 2277 2278 /* No grace period and unregistered callbacks? */ 2279 if (!rcu_gp_in_progress() && 2280 rcu_segcblist_is_enabled(&rdp->cblist)) { 2281 local_irq_save(flags); 2282 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2283 rcu_accelerate_cbs_unlocked(rnp, rdp); 2284 local_irq_restore(flags); 2285 } 2286 2287 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2288 2289 /* If there are callbacks ready, invoke them. */ 2290 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2291 invoke_rcu_callbacks(rdp); 2292 2293 /* Do any needed deferred wakeups of rcuo kthreads. */ 2294 do_nocb_deferred_wakeup(rdp); 2295 trace_rcu_utilization(TPS("End RCU core")); 2296 } 2297 2298 /* 2299 * Schedule RCU callback invocation. If the running implementation of RCU 2300 * does not support RCU priority boosting, just do a direct call, otherwise 2301 * wake up the per-CPU kernel kthread. Note that because we are running 2302 * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task 2303 * cannot disappear out from under us. 2304 */ 2305 static void invoke_rcu_callbacks(struct rcu_data *rdp) 2306 { 2307 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) 2308 return; 2309 if (likely(!rcu_state.boost)) { 2310 rcu_do_batch(rdp); 2311 return; 2312 } 2313 invoke_rcu_callbacks_kthread(); 2314 } 2315 2316 static void invoke_rcu_core(void) 2317 { 2318 if (cpu_online(smp_processor_id())) 2319 raise_softirq(RCU_SOFTIRQ); 2320 } 2321 2322 /* 2323 * Handle any core-RCU processing required by a call_rcu() invocation. 2324 */ 2325 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2326 unsigned long flags) 2327 { 2328 /* 2329 * If called from an extended quiescent state, invoke the RCU 2330 * core in order to force a re-evaluation of RCU's idleness. 2331 */ 2332 if (!rcu_is_watching()) 2333 invoke_rcu_core(); 2334 2335 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2336 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2337 return; 2338 2339 /* 2340 * Force the grace period if too many callbacks or too long waiting. 2341 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2342 * if some other CPU has recently done so. Also, don't bother 2343 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2344 * is the only one waiting for a grace period to complete. 2345 */ 2346 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2347 rdp->qlen_last_fqs_check + qhimark)) { 2348 2349 /* Are we ignoring a completed grace period? */ 2350 note_gp_changes(rdp); 2351 2352 /* Start a new grace period if one not already started. */ 2353 if (!rcu_gp_in_progress()) { 2354 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2355 } else { 2356 /* Give the grace period a kick. */ 2357 rdp->blimit = LONG_MAX; 2358 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2359 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2360 rcu_force_quiescent_state(); 2361 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2362 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2363 } 2364 } 2365 } 2366 2367 /* 2368 * RCU callback function to leak a callback. 2369 */ 2370 static void rcu_leak_callback(struct rcu_head *rhp) 2371 { 2372 } 2373 2374 /* 2375 * Helper function for call_rcu() and friends. The cpu argument will 2376 * normally be -1, indicating "currently running CPU". It may specify 2377 * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier() 2378 * is expected to specify a CPU. 2379 */ 2380 static void 2381 __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy) 2382 { 2383 unsigned long flags; 2384 struct rcu_data *rdp; 2385 2386 /* Misaligned rcu_head! */ 2387 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2388 2389 if (debug_rcu_head_queue(head)) { 2390 /* 2391 * Probable double call_rcu(), so leak the callback. 2392 * Use rcu:rcu_callback trace event to find the previous 2393 * time callback was passed to __call_rcu(). 2394 */ 2395 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2396 head, head->func); 2397 WRITE_ONCE(head->func, rcu_leak_callback); 2398 return; 2399 } 2400 head->func = func; 2401 head->next = NULL; 2402 local_irq_save(flags); 2403 rdp = this_cpu_ptr(&rcu_data); 2404 2405 /* Add the callback to our list. */ 2406 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) { 2407 int offline; 2408 2409 if (cpu != -1) 2410 rdp = per_cpu_ptr(&rcu_data, cpu); 2411 if (likely(rdp->mynode)) { 2412 /* Post-boot, so this should be for a no-CBs CPU. */ 2413 offline = !__call_rcu_nocb(rdp, head, lazy, flags); 2414 WARN_ON_ONCE(offline); 2415 /* Offline CPU, _call_rcu() illegal, leak callback. */ 2416 local_irq_restore(flags); 2417 return; 2418 } 2419 /* 2420 * Very early boot, before rcu_init(). Initialize if needed 2421 * and then drop through to queue the callback. 2422 */ 2423 WARN_ON_ONCE(cpu != -1); 2424 WARN_ON_ONCE(!rcu_is_watching()); 2425 if (rcu_segcblist_empty(&rdp->cblist)) 2426 rcu_segcblist_init(&rdp->cblist); 2427 } 2428 rcu_segcblist_enqueue(&rdp->cblist, head, lazy); 2429 if (__is_kfree_rcu_offset((unsigned long)func)) 2430 trace_rcu_kfree_callback(rcu_state.name, head, 2431 (unsigned long)func, 2432 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2433 rcu_segcblist_n_cbs(&rdp->cblist)); 2434 else 2435 trace_rcu_callback(rcu_state.name, head, 2436 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2437 rcu_segcblist_n_cbs(&rdp->cblist)); 2438 2439 /* Go handle any RCU core processing required. */ 2440 __call_rcu_core(rdp, head, flags); 2441 local_irq_restore(flags); 2442 } 2443 2444 /** 2445 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2446 * @head: structure to be used for queueing the RCU updates. 2447 * @func: actual callback function to be invoked after the grace period 2448 * 2449 * The callback function will be invoked some time after a full grace 2450 * period elapses, in other words after all pre-existing RCU read-side 2451 * critical sections have completed. However, the callback function 2452 * might well execute concurrently with RCU read-side critical sections 2453 * that started after call_rcu() was invoked. RCU read-side critical 2454 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and 2455 * may be nested. In addition, regions of code across which interrupts, 2456 * preemption, or softirqs have been disabled also serve as RCU read-side 2457 * critical sections. This includes hardware interrupt handlers, softirq 2458 * handlers, and NMI handlers. 2459 * 2460 * Note that all CPUs must agree that the grace period extended beyond 2461 * all pre-existing RCU read-side critical section. On systems with more 2462 * than one CPU, this means that when "func()" is invoked, each CPU is 2463 * guaranteed to have executed a full memory barrier since the end of its 2464 * last RCU read-side critical section whose beginning preceded the call 2465 * to call_rcu(). It also means that each CPU executing an RCU read-side 2466 * critical section that continues beyond the start of "func()" must have 2467 * executed a memory barrier after the call_rcu() but before the beginning 2468 * of that RCU read-side critical section. Note that these guarantees 2469 * include CPUs that are offline, idle, or executing in user mode, as 2470 * well as CPUs that are executing in the kernel. 2471 * 2472 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 2473 * resulting RCU callback function "func()", then both CPU A and CPU B are 2474 * guaranteed to execute a full memory barrier during the time interval 2475 * between the call to call_rcu() and the invocation of "func()" -- even 2476 * if CPU A and CPU B are the same CPU (but again only if the system has 2477 * more than one CPU). 2478 */ 2479 void call_rcu(struct rcu_head *head, rcu_callback_t func) 2480 { 2481 __call_rcu(head, func, -1, 0); 2482 } 2483 EXPORT_SYMBOL_GPL(call_rcu); 2484 2485 /* 2486 * Queue an RCU callback for lazy invocation after a grace period. 2487 * This will likely be later named something like "call_rcu_lazy()", 2488 * but this change will require some way of tagging the lazy RCU 2489 * callbacks in the list of pending callbacks. Until then, this 2490 * function may only be called from __kfree_rcu(). 2491 */ 2492 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 2493 { 2494 __call_rcu(head, func, -1, 1); 2495 } 2496 EXPORT_SYMBOL_GPL(kfree_call_rcu); 2497 2498 /* 2499 * During early boot, any blocking grace-period wait automatically 2500 * implies a grace period. Later on, this is never the case for PREEMPT. 2501 * 2502 * Howevr, because a context switch is a grace period for !PREEMPT, any 2503 * blocking grace-period wait automatically implies a grace period if 2504 * there is only one CPU online at any point time during execution of 2505 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 2506 * occasionally incorrectly indicate that there are multiple CPUs online 2507 * when there was in fact only one the whole time, as this just adds some 2508 * overhead: RCU still operates correctly. 2509 */ 2510 static int rcu_blocking_is_gp(void) 2511 { 2512 int ret; 2513 2514 if (IS_ENABLED(CONFIG_PREEMPT)) 2515 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 2516 might_sleep(); /* Check for RCU read-side critical section. */ 2517 preempt_disable(); 2518 ret = num_online_cpus() <= 1; 2519 preempt_enable(); 2520 return ret; 2521 } 2522 2523 /** 2524 * synchronize_rcu - wait until a grace period has elapsed. 2525 * 2526 * Control will return to the caller some time after a full grace 2527 * period has elapsed, in other words after all currently executing RCU 2528 * read-side critical sections have completed. Note, however, that 2529 * upon return from synchronize_rcu(), the caller might well be executing 2530 * concurrently with new RCU read-side critical sections that began while 2531 * synchronize_rcu() was waiting. RCU read-side critical sections are 2532 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 2533 * In addition, regions of code across which interrupts, preemption, or 2534 * softirqs have been disabled also serve as RCU read-side critical 2535 * sections. This includes hardware interrupt handlers, softirq handlers, 2536 * and NMI handlers. 2537 * 2538 * Note that this guarantee implies further memory-ordering guarantees. 2539 * On systems with more than one CPU, when synchronize_rcu() returns, 2540 * each CPU is guaranteed to have executed a full memory barrier since 2541 * the end of its last RCU read-side critical section whose beginning 2542 * preceded the call to synchronize_rcu(). In addition, each CPU having 2543 * an RCU read-side critical section that extends beyond the return from 2544 * synchronize_rcu() is guaranteed to have executed a full memory barrier 2545 * after the beginning of synchronize_rcu() and before the beginning of 2546 * that RCU read-side critical section. Note that these guarantees include 2547 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 2548 * that are executing in the kernel. 2549 * 2550 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 2551 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 2552 * to have executed a full memory barrier during the execution of 2553 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 2554 * again only if the system has more than one CPU). 2555 */ 2556 void synchronize_rcu(void) 2557 { 2558 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 2559 lock_is_held(&rcu_lock_map) || 2560 lock_is_held(&rcu_sched_lock_map), 2561 "Illegal synchronize_rcu() in RCU read-side critical section"); 2562 if (rcu_blocking_is_gp()) 2563 return; 2564 if (rcu_gp_is_expedited()) 2565 synchronize_rcu_expedited(); 2566 else 2567 wait_rcu_gp(call_rcu); 2568 } 2569 EXPORT_SYMBOL_GPL(synchronize_rcu); 2570 2571 /** 2572 * get_state_synchronize_rcu - Snapshot current RCU state 2573 * 2574 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 2575 * to determine whether or not a full grace period has elapsed in the 2576 * meantime. 2577 */ 2578 unsigned long get_state_synchronize_rcu(void) 2579 { 2580 /* 2581 * Any prior manipulation of RCU-protected data must happen 2582 * before the load from ->gp_seq. 2583 */ 2584 smp_mb(); /* ^^^ */ 2585 return rcu_seq_snap(&rcu_state.gp_seq); 2586 } 2587 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 2588 2589 /** 2590 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 2591 * 2592 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 2593 * 2594 * If a full RCU grace period has elapsed since the earlier call to 2595 * get_state_synchronize_rcu(), just return. Otherwise, invoke 2596 * synchronize_rcu() to wait for a full grace period. 2597 * 2598 * Yes, this function does not take counter wrap into account. But 2599 * counter wrap is harmless. If the counter wraps, we have waited for 2600 * more than 2 billion grace periods (and way more on a 64-bit system!), 2601 * so waiting for one additional grace period should be just fine. 2602 */ 2603 void cond_synchronize_rcu(unsigned long oldstate) 2604 { 2605 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) 2606 synchronize_rcu(); 2607 else 2608 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 2609 } 2610 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 2611 2612 /* 2613 * Check to see if there is any immediate RCU-related work to be done by 2614 * the current CPU, returning 1 if so and zero otherwise. The checks are 2615 * in order of increasing expense: checks that can be carried out against 2616 * CPU-local state are performed first. However, we must check for CPU 2617 * stalls first, else we might not get a chance. 2618 */ 2619 static int rcu_pending(void) 2620 { 2621 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2622 struct rcu_node *rnp = rdp->mynode; 2623 2624 /* Check for CPU stalls, if enabled. */ 2625 check_cpu_stall(rdp); 2626 2627 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ 2628 if (rcu_nohz_full_cpu()) 2629 return 0; 2630 2631 /* Is the RCU core waiting for a quiescent state from this CPU? */ 2632 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) 2633 return 1; 2634 2635 /* Does this CPU have callbacks ready to invoke? */ 2636 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2637 return 1; 2638 2639 /* Has RCU gone idle with this CPU needing another grace period? */ 2640 if (!rcu_gp_in_progress() && 2641 rcu_segcblist_is_enabled(&rdp->cblist) && 2642 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2643 return 1; 2644 2645 /* Have RCU grace period completed or started? */ 2646 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 2647 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 2648 return 1; 2649 2650 /* Does this CPU need a deferred NOCB wakeup? */ 2651 if (rcu_nocb_need_deferred_wakeup(rdp)) 2652 return 1; 2653 2654 /* nothing to do */ 2655 return 0; 2656 } 2657 2658 /* 2659 * Helper function for rcu_barrier() tracing. If tracing is disabled, 2660 * the compiler is expected to optimize this away. 2661 */ 2662 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 2663 { 2664 trace_rcu_barrier(rcu_state.name, s, cpu, 2665 atomic_read(&rcu_state.barrier_cpu_count), done); 2666 } 2667 2668 /* 2669 * RCU callback function for rcu_barrier(). If we are last, wake 2670 * up the task executing rcu_barrier(). 2671 */ 2672 static void rcu_barrier_callback(struct rcu_head *rhp) 2673 { 2674 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 2675 rcu_barrier_trace(TPS("LastCB"), -1, 2676 rcu_state.barrier_sequence); 2677 complete(&rcu_state.barrier_completion); 2678 } else { 2679 rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence); 2680 } 2681 } 2682 2683 /* 2684 * Called with preemption disabled, and from cross-cpu IRQ context. 2685 */ 2686 static void rcu_barrier_func(void *unused) 2687 { 2688 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2689 2690 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 2691 rdp->barrier_head.func = rcu_barrier_callback; 2692 debug_rcu_head_queue(&rdp->barrier_head); 2693 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { 2694 atomic_inc(&rcu_state.barrier_cpu_count); 2695 } else { 2696 debug_rcu_head_unqueue(&rdp->barrier_head); 2697 rcu_barrier_trace(TPS("IRQNQ"), -1, 2698 rcu_state.barrier_sequence); 2699 } 2700 } 2701 2702 /** 2703 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 2704 * 2705 * Note that this primitive does not necessarily wait for an RCU grace period 2706 * to complete. For example, if there are no RCU callbacks queued anywhere 2707 * in the system, then rcu_barrier() is within its rights to return 2708 * immediately, without waiting for anything, much less an RCU grace period. 2709 */ 2710 void rcu_barrier(void) 2711 { 2712 int cpu; 2713 struct rcu_data *rdp; 2714 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 2715 2716 rcu_barrier_trace(TPS("Begin"), -1, s); 2717 2718 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 2719 mutex_lock(&rcu_state.barrier_mutex); 2720 2721 /* Did someone else do our work for us? */ 2722 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 2723 rcu_barrier_trace(TPS("EarlyExit"), -1, 2724 rcu_state.barrier_sequence); 2725 smp_mb(); /* caller's subsequent code after above check. */ 2726 mutex_unlock(&rcu_state.barrier_mutex); 2727 return; 2728 } 2729 2730 /* Mark the start of the barrier operation. */ 2731 rcu_seq_start(&rcu_state.barrier_sequence); 2732 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 2733 2734 /* 2735 * Initialize the count to one rather than to zero in order to 2736 * avoid a too-soon return to zero in case of a short grace period 2737 * (or preemption of this task). Exclude CPU-hotplug operations 2738 * to ensure that no offline CPU has callbacks queued. 2739 */ 2740 init_completion(&rcu_state.barrier_completion); 2741 atomic_set(&rcu_state.barrier_cpu_count, 1); 2742 get_online_cpus(); 2743 2744 /* 2745 * Force each CPU with callbacks to register a new callback. 2746 * When that callback is invoked, we will know that all of the 2747 * corresponding CPU's preceding callbacks have been invoked. 2748 */ 2749 for_each_possible_cpu(cpu) { 2750 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) 2751 continue; 2752 rdp = per_cpu_ptr(&rcu_data, cpu); 2753 if (rcu_is_nocb_cpu(cpu)) { 2754 if (!rcu_nocb_cpu_needs_barrier(cpu)) { 2755 rcu_barrier_trace(TPS("OfflineNoCB"), cpu, 2756 rcu_state.barrier_sequence); 2757 } else { 2758 rcu_barrier_trace(TPS("OnlineNoCB"), cpu, 2759 rcu_state.barrier_sequence); 2760 smp_mb__before_atomic(); 2761 atomic_inc(&rcu_state.barrier_cpu_count); 2762 __call_rcu(&rdp->barrier_head, 2763 rcu_barrier_callback, cpu, 0); 2764 } 2765 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) { 2766 rcu_barrier_trace(TPS("OnlineQ"), cpu, 2767 rcu_state.barrier_sequence); 2768 smp_call_function_single(cpu, rcu_barrier_func, NULL, 1); 2769 } else { 2770 rcu_barrier_trace(TPS("OnlineNQ"), cpu, 2771 rcu_state.barrier_sequence); 2772 } 2773 } 2774 put_online_cpus(); 2775 2776 /* 2777 * Now that we have an rcu_barrier_callback() callback on each 2778 * CPU, and thus each counted, remove the initial count. 2779 */ 2780 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) 2781 complete(&rcu_state.barrier_completion); 2782 2783 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 2784 wait_for_completion(&rcu_state.barrier_completion); 2785 2786 /* Mark the end of the barrier operation. */ 2787 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 2788 rcu_seq_end(&rcu_state.barrier_sequence); 2789 2790 /* Other rcu_barrier() invocations can now safely proceed. */ 2791 mutex_unlock(&rcu_state.barrier_mutex); 2792 } 2793 EXPORT_SYMBOL_GPL(rcu_barrier); 2794 2795 /* 2796 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 2797 * first CPU in a given leaf rcu_node structure coming online. The caller 2798 * must hold the corresponding leaf rcu_node ->lock with interrrupts 2799 * disabled. 2800 */ 2801 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 2802 { 2803 long mask; 2804 long oldmask; 2805 struct rcu_node *rnp = rnp_leaf; 2806 2807 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2808 WARN_ON_ONCE(rnp->wait_blkd_tasks); 2809 for (;;) { 2810 mask = rnp->grpmask; 2811 rnp = rnp->parent; 2812 if (rnp == NULL) 2813 return; 2814 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 2815 oldmask = rnp->qsmaskinit; 2816 rnp->qsmaskinit |= mask; 2817 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 2818 if (oldmask) 2819 return; 2820 } 2821 } 2822 2823 /* 2824 * Do boot-time initialization of a CPU's per-CPU RCU data. 2825 */ 2826 static void __init 2827 rcu_boot_init_percpu_data(int cpu) 2828 { 2829 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2830 2831 /* Set up local state, ensuring consistent view of global state. */ 2832 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 2833 WARN_ON_ONCE(rdp->dynticks_nesting != 1); 2834 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); 2835 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 2836 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 2837 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 2838 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 2839 rdp->cpu = cpu; 2840 rcu_boot_init_nocb_percpu_data(rdp); 2841 } 2842 2843 /* 2844 * Invoked early in the CPU-online process, when pretty much all services 2845 * are available. The incoming CPU is not present. 2846 * 2847 * Initializes a CPU's per-CPU RCU data. Note that only one online or 2848 * offline event can be happening at a given time. Note also that we can 2849 * accept some slop in the rsp->gp_seq access due to the fact that this 2850 * CPU cannot possibly have any RCU callbacks in flight yet. 2851 */ 2852 int rcutree_prepare_cpu(unsigned int cpu) 2853 { 2854 unsigned long flags; 2855 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2856 struct rcu_node *rnp = rcu_get_root(); 2857 2858 /* Set up local state, ensuring consistent view of global state. */ 2859 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2860 rdp->qlen_last_fqs_check = 0; 2861 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2862 rdp->blimit = blimit; 2863 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 2864 !init_nocb_callback_list(rdp)) 2865 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 2866 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ 2867 rcu_dynticks_eqs_online(); 2868 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2869 2870 /* 2871 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 2872 * propagation up the rcu_node tree will happen at the beginning 2873 * of the next grace period. 2874 */ 2875 rnp = rdp->mynode; 2876 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2877 rdp->beenonline = true; /* We have now been online. */ 2878 rdp->gp_seq = rnp->gp_seq; 2879 rdp->gp_seq_needed = rnp->gp_seq; 2880 rdp->cpu_no_qs.b.norm = true; 2881 rdp->core_needs_qs = false; 2882 rdp->rcu_iw_pending = false; 2883 rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; 2884 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 2885 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2886 rcu_prepare_kthreads(cpu); 2887 rcu_spawn_cpu_nocb_kthread(cpu); 2888 2889 return 0; 2890 } 2891 2892 /* 2893 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 2894 */ 2895 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 2896 { 2897 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2898 2899 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 2900 } 2901 2902 /* 2903 * Near the end of the CPU-online process. Pretty much all services 2904 * enabled, and the CPU is now very much alive. 2905 */ 2906 int rcutree_online_cpu(unsigned int cpu) 2907 { 2908 unsigned long flags; 2909 struct rcu_data *rdp; 2910 struct rcu_node *rnp; 2911 2912 rdp = per_cpu_ptr(&rcu_data, cpu); 2913 rnp = rdp->mynode; 2914 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2915 rnp->ffmask |= rdp->grpmask; 2916 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2917 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 2918 return 0; /* Too early in boot for scheduler work. */ 2919 sync_sched_exp_online_cleanup(cpu); 2920 rcutree_affinity_setting(cpu, -1); 2921 return 0; 2922 } 2923 2924 /* 2925 * Near the beginning of the process. The CPU is still very much alive 2926 * with pretty much all services enabled. 2927 */ 2928 int rcutree_offline_cpu(unsigned int cpu) 2929 { 2930 unsigned long flags; 2931 struct rcu_data *rdp; 2932 struct rcu_node *rnp; 2933 2934 rdp = per_cpu_ptr(&rcu_data, cpu); 2935 rnp = rdp->mynode; 2936 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2937 rnp->ffmask &= ~rdp->grpmask; 2938 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2939 2940 rcutree_affinity_setting(cpu, cpu); 2941 return 0; 2942 } 2943 2944 static DEFINE_PER_CPU(int, rcu_cpu_started); 2945 2946 /* 2947 * Mark the specified CPU as being online so that subsequent grace periods 2948 * (both expedited and normal) will wait on it. Note that this means that 2949 * incoming CPUs are not allowed to use RCU read-side critical sections 2950 * until this function is called. Failing to observe this restriction 2951 * will result in lockdep splats. 2952 * 2953 * Note that this function is special in that it is invoked directly 2954 * from the incoming CPU rather than from the cpuhp_step mechanism. 2955 * This is because this function must be invoked at a precise location. 2956 */ 2957 void rcu_cpu_starting(unsigned int cpu) 2958 { 2959 unsigned long flags; 2960 unsigned long mask; 2961 int nbits; 2962 unsigned long oldmask; 2963 struct rcu_data *rdp; 2964 struct rcu_node *rnp; 2965 2966 if (per_cpu(rcu_cpu_started, cpu)) 2967 return; 2968 2969 per_cpu(rcu_cpu_started, cpu) = 1; 2970 2971 rdp = per_cpu_ptr(&rcu_data, cpu); 2972 rnp = rdp->mynode; 2973 mask = rdp->grpmask; 2974 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2975 rnp->qsmaskinitnext |= mask; 2976 oldmask = rnp->expmaskinitnext; 2977 rnp->expmaskinitnext |= mask; 2978 oldmask ^= rnp->expmaskinitnext; 2979 nbits = bitmap_weight(&oldmask, BITS_PER_LONG); 2980 /* Allow lockless access for expedited grace periods. */ 2981 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */ 2982 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 2983 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 2984 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 2985 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ 2986 /* Report QS -after- changing ->qsmaskinitnext! */ 2987 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2988 } else { 2989 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2990 } 2991 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 2992 } 2993 2994 #ifdef CONFIG_HOTPLUG_CPU 2995 /* 2996 * The outgoing function has no further need of RCU, so remove it from 2997 * the rcu_node tree's ->qsmaskinitnext bit masks. 2998 * 2999 * Note that this function is special in that it is invoked directly 3000 * from the outgoing CPU rather than from the cpuhp_step mechanism. 3001 * This is because this function must be invoked at a precise location. 3002 */ 3003 void rcu_report_dead(unsigned int cpu) 3004 { 3005 unsigned long flags; 3006 unsigned long mask; 3007 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3008 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 3009 3010 /* QS for any half-done expedited grace period. */ 3011 preempt_disable(); 3012 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 3013 preempt_enable(); 3014 rcu_preempt_deferred_qs(current); 3015 3016 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 3017 mask = rdp->grpmask; 3018 raw_spin_lock(&rcu_state.ofl_lock); 3019 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 3020 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 3021 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 3022 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 3023 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 3024 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 3025 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3026 } 3027 rnp->qsmaskinitnext &= ~mask; 3028 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3029 raw_spin_unlock(&rcu_state.ofl_lock); 3030 3031 per_cpu(rcu_cpu_started, cpu) = 0; 3032 } 3033 3034 /* 3035 * The outgoing CPU has just passed through the dying-idle state, and we 3036 * are being invoked from the CPU that was IPIed to continue the offline 3037 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 3038 */ 3039 void rcutree_migrate_callbacks(int cpu) 3040 { 3041 unsigned long flags; 3042 struct rcu_data *my_rdp; 3043 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3044 struct rcu_node *rnp_root = rcu_get_root(); 3045 bool needwake; 3046 3047 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) 3048 return; /* No callbacks to migrate. */ 3049 3050 local_irq_save(flags); 3051 my_rdp = this_cpu_ptr(&rcu_data); 3052 if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) { 3053 local_irq_restore(flags); 3054 return; 3055 } 3056 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ 3057 /* Leverage recent GPs and set GP for new callbacks. */ 3058 needwake = rcu_advance_cbs(rnp_root, rdp) || 3059 rcu_advance_cbs(rnp_root, my_rdp); 3060 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 3061 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 3062 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 3063 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags); 3064 if (needwake) 3065 rcu_gp_kthread_wake(); 3066 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 3067 !rcu_segcblist_empty(&rdp->cblist), 3068 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 3069 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 3070 rcu_segcblist_first_cb(&rdp->cblist)); 3071 } 3072 #endif 3073 3074 /* 3075 * On non-huge systems, use expedited RCU grace periods to make suspend 3076 * and hibernation run faster. 3077 */ 3078 static int rcu_pm_notify(struct notifier_block *self, 3079 unsigned long action, void *hcpu) 3080 { 3081 switch (action) { 3082 case PM_HIBERNATION_PREPARE: 3083 case PM_SUSPEND_PREPARE: 3084 rcu_expedite_gp(); 3085 break; 3086 case PM_POST_HIBERNATION: 3087 case PM_POST_SUSPEND: 3088 rcu_unexpedite_gp(); 3089 break; 3090 default: 3091 break; 3092 } 3093 return NOTIFY_OK; 3094 } 3095 3096 /* 3097 * Spawn the kthreads that handle RCU's grace periods. 3098 */ 3099 static int __init rcu_spawn_gp_kthread(void) 3100 { 3101 unsigned long flags; 3102 int kthread_prio_in = kthread_prio; 3103 struct rcu_node *rnp; 3104 struct sched_param sp; 3105 struct task_struct *t; 3106 3107 /* Force priority into range. */ 3108 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 3109 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 3110 kthread_prio = 2; 3111 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 3112 kthread_prio = 1; 3113 else if (kthread_prio < 0) 3114 kthread_prio = 0; 3115 else if (kthread_prio > 99) 3116 kthread_prio = 99; 3117 3118 if (kthread_prio != kthread_prio_in) 3119 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 3120 kthread_prio, kthread_prio_in); 3121 3122 rcu_scheduler_fully_active = 1; 3123 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 3124 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 3125 return 0; 3126 rnp = rcu_get_root(); 3127 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3128 rcu_state.gp_kthread = t; 3129 if (kthread_prio) { 3130 sp.sched_priority = kthread_prio; 3131 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 3132 } 3133 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3134 wake_up_process(t); 3135 rcu_spawn_nocb_kthreads(); 3136 rcu_spawn_boost_kthreads(); 3137 return 0; 3138 } 3139 early_initcall(rcu_spawn_gp_kthread); 3140 3141 /* 3142 * This function is invoked towards the end of the scheduler's 3143 * initialization process. Before this is called, the idle task might 3144 * contain synchronous grace-period primitives (during which time, this idle 3145 * task is booting the system, and such primitives are no-ops). After this 3146 * function is called, any synchronous grace-period primitives are run as 3147 * expedited, with the requesting task driving the grace period forward. 3148 * A later core_initcall() rcu_set_runtime_mode() will switch to full 3149 * runtime RCU functionality. 3150 */ 3151 void rcu_scheduler_starting(void) 3152 { 3153 WARN_ON(num_online_cpus() != 1); 3154 WARN_ON(nr_context_switches() > 0); 3155 rcu_test_sync_prims(); 3156 rcu_scheduler_active = RCU_SCHEDULER_INIT; 3157 rcu_test_sync_prims(); 3158 } 3159 3160 /* 3161 * Helper function for rcu_init() that initializes the rcu_state structure. 3162 */ 3163 static void __init rcu_init_one(void) 3164 { 3165 static const char * const buf[] = RCU_NODE_NAME_INIT; 3166 static const char * const fqs[] = RCU_FQS_NAME_INIT; 3167 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 3168 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 3169 3170 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 3171 int cpustride = 1; 3172 int i; 3173 int j; 3174 struct rcu_node *rnp; 3175 3176 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 3177 3178 /* Silence gcc 4.8 false positive about array index out of range. */ 3179 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 3180 panic("rcu_init_one: rcu_num_lvls out of range"); 3181 3182 /* Initialize the level-tracking arrays. */ 3183 3184 for (i = 1; i < rcu_num_lvls; i++) 3185 rcu_state.level[i] = 3186 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 3187 rcu_init_levelspread(levelspread, num_rcu_lvl); 3188 3189 /* Initialize the elements themselves, starting from the leaves. */ 3190 3191 for (i = rcu_num_lvls - 1; i >= 0; i--) { 3192 cpustride *= levelspread[i]; 3193 rnp = rcu_state.level[i]; 3194 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 3195 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 3196 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 3197 &rcu_node_class[i], buf[i]); 3198 raw_spin_lock_init(&rnp->fqslock); 3199 lockdep_set_class_and_name(&rnp->fqslock, 3200 &rcu_fqs_class[i], fqs[i]); 3201 rnp->gp_seq = rcu_state.gp_seq; 3202 rnp->gp_seq_needed = rcu_state.gp_seq; 3203 rnp->completedqs = rcu_state.gp_seq; 3204 rnp->qsmask = 0; 3205 rnp->qsmaskinit = 0; 3206 rnp->grplo = j * cpustride; 3207 rnp->grphi = (j + 1) * cpustride - 1; 3208 if (rnp->grphi >= nr_cpu_ids) 3209 rnp->grphi = nr_cpu_ids - 1; 3210 if (i == 0) { 3211 rnp->grpnum = 0; 3212 rnp->grpmask = 0; 3213 rnp->parent = NULL; 3214 } else { 3215 rnp->grpnum = j % levelspread[i - 1]; 3216 rnp->grpmask = BIT(rnp->grpnum); 3217 rnp->parent = rcu_state.level[i - 1] + 3218 j / levelspread[i - 1]; 3219 } 3220 rnp->level = i; 3221 INIT_LIST_HEAD(&rnp->blkd_tasks); 3222 rcu_init_one_nocb(rnp); 3223 init_waitqueue_head(&rnp->exp_wq[0]); 3224 init_waitqueue_head(&rnp->exp_wq[1]); 3225 init_waitqueue_head(&rnp->exp_wq[2]); 3226 init_waitqueue_head(&rnp->exp_wq[3]); 3227 spin_lock_init(&rnp->exp_lock); 3228 } 3229 } 3230 3231 init_swait_queue_head(&rcu_state.gp_wq); 3232 init_swait_queue_head(&rcu_state.expedited_wq); 3233 rnp = rcu_first_leaf_node(); 3234 for_each_possible_cpu(i) { 3235 while (i > rnp->grphi) 3236 rnp++; 3237 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 3238 rcu_boot_init_percpu_data(i); 3239 } 3240 } 3241 3242 /* 3243 * Compute the rcu_node tree geometry from kernel parameters. This cannot 3244 * replace the definitions in tree.h because those are needed to size 3245 * the ->node array in the rcu_state structure. 3246 */ 3247 static void __init rcu_init_geometry(void) 3248 { 3249 ulong d; 3250 int i; 3251 int rcu_capacity[RCU_NUM_LVLS]; 3252 3253 /* 3254 * Initialize any unspecified boot parameters. 3255 * The default values of jiffies_till_first_fqs and 3256 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 3257 * value, which is a function of HZ, then adding one for each 3258 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 3259 */ 3260 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 3261 if (jiffies_till_first_fqs == ULONG_MAX) 3262 jiffies_till_first_fqs = d; 3263 if (jiffies_till_next_fqs == ULONG_MAX) 3264 jiffies_till_next_fqs = d; 3265 adjust_jiffies_till_sched_qs(); 3266 3267 /* If the compile-time values are accurate, just leave. */ 3268 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 3269 nr_cpu_ids == NR_CPUS) 3270 return; 3271 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 3272 rcu_fanout_leaf, nr_cpu_ids); 3273 3274 /* 3275 * The boot-time rcu_fanout_leaf parameter must be at least two 3276 * and cannot exceed the number of bits in the rcu_node masks. 3277 * Complain and fall back to the compile-time values if this 3278 * limit is exceeded. 3279 */ 3280 if (rcu_fanout_leaf < 2 || 3281 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 3282 rcu_fanout_leaf = RCU_FANOUT_LEAF; 3283 WARN_ON(1); 3284 return; 3285 } 3286 3287 /* 3288 * Compute number of nodes that can be handled an rcu_node tree 3289 * with the given number of levels. 3290 */ 3291 rcu_capacity[0] = rcu_fanout_leaf; 3292 for (i = 1; i < RCU_NUM_LVLS; i++) 3293 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 3294 3295 /* 3296 * The tree must be able to accommodate the configured number of CPUs. 3297 * If this limit is exceeded, fall back to the compile-time values. 3298 */ 3299 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 3300 rcu_fanout_leaf = RCU_FANOUT_LEAF; 3301 WARN_ON(1); 3302 return; 3303 } 3304 3305 /* Calculate the number of levels in the tree. */ 3306 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 3307 } 3308 rcu_num_lvls = i + 1; 3309 3310 /* Calculate the number of rcu_nodes at each level of the tree. */ 3311 for (i = 0; i < rcu_num_lvls; i++) { 3312 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 3313 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 3314 } 3315 3316 /* Calculate the total number of rcu_node structures. */ 3317 rcu_num_nodes = 0; 3318 for (i = 0; i < rcu_num_lvls; i++) 3319 rcu_num_nodes += num_rcu_lvl[i]; 3320 } 3321 3322 /* 3323 * Dump out the structure of the rcu_node combining tree associated 3324 * with the rcu_state structure. 3325 */ 3326 static void __init rcu_dump_rcu_node_tree(void) 3327 { 3328 int level = 0; 3329 struct rcu_node *rnp; 3330 3331 pr_info("rcu_node tree layout dump\n"); 3332 pr_info(" "); 3333 rcu_for_each_node_breadth_first(rnp) { 3334 if (rnp->level != level) { 3335 pr_cont("\n"); 3336 pr_info(" "); 3337 level = rnp->level; 3338 } 3339 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 3340 } 3341 pr_cont("\n"); 3342 } 3343 3344 struct workqueue_struct *rcu_gp_wq; 3345 struct workqueue_struct *rcu_par_gp_wq; 3346 3347 void __init rcu_init(void) 3348 { 3349 int cpu; 3350 3351 rcu_early_boot_tests(); 3352 3353 rcu_bootup_announce(); 3354 rcu_init_geometry(); 3355 rcu_init_one(); 3356 if (dump_tree) 3357 rcu_dump_rcu_node_tree(); 3358 open_softirq(RCU_SOFTIRQ, rcu_core); 3359 3360 /* 3361 * We don't need protection against CPU-hotplug here because 3362 * this is called early in boot, before either interrupts 3363 * or the scheduler are operational. 3364 */ 3365 pm_notifier(rcu_pm_notify, 0); 3366 for_each_online_cpu(cpu) { 3367 rcutree_prepare_cpu(cpu); 3368 rcu_cpu_starting(cpu); 3369 rcutree_online_cpu(cpu); 3370 } 3371 3372 /* Create workqueue for expedited GPs and for Tree SRCU. */ 3373 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 3374 WARN_ON(!rcu_gp_wq); 3375 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 3376 WARN_ON(!rcu_par_gp_wq); 3377 srcu_init(); 3378 } 3379 3380 #include "tree_stall.h" 3381 #include "tree_exp.h" 3382 #include "tree_plugin.h" 3383