1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/percpu.h> 36 #include <linux/notifier.h> 37 #include <linux/cpu.h> 38 #include <linux/mutex.h> 39 #include <linux/time.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/wait.h> 42 #include <linux/kthread.h> 43 #include <uapi/linux/sched/types.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 #include <linux/stop_machine.h> 47 #include <linux/random.h> 48 #include <linux/trace_events.h> 49 #include <linux/suspend.h> 50 #include <linux/ftrace.h> 51 #include <linux/tick.h> 52 #include <linux/sysrq.h> 53 #include <linux/kprobes.h> 54 #include <linux/gfp.h> 55 #include <linux/oom.h> 56 #include <linux/smpboot.h> 57 #include <linux/jiffies.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/sched/clock.h> 60 #include "../time/tick-internal.h" 61 62 #include "tree.h" 63 #include "rcu.h" 64 65 #ifdef MODULE_PARAM_PREFIX 66 #undef MODULE_PARAM_PREFIX 67 #endif 68 #define MODULE_PARAM_PREFIX "rcutree." 69 70 /* Data structures. */ 71 72 /* 73 * Steal a bit from the bottom of ->dynticks for idle entry/exit 74 * control. Initially this is for TLB flushing. 75 */ 76 #define RCU_DYNTICK_CTRL_MASK 0x1 77 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 78 #ifndef rcu_eqs_special_exit 79 #define rcu_eqs_special_exit() do { } while (0) 80 #endif 81 82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 83 .dynticks_nesting = 1, 84 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 85 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 86 }; 87 struct rcu_state rcu_state = { 88 .level = { &rcu_state.node[0] }, 89 .gp_state = RCU_GP_IDLE, 90 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 91 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 92 .name = RCU_NAME, 93 .abbr = RCU_ABBR, 94 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 95 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 96 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 97 }; 98 99 /* Dump rcu_node combining tree at boot to verify correct setup. */ 100 static bool dump_tree; 101 module_param(dump_tree, bool, 0444); 102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 103 static bool use_softirq = 1; 104 module_param(use_softirq, bool, 0444); 105 /* Control rcu_node-tree auto-balancing at boot time. */ 106 static bool rcu_fanout_exact; 107 module_param(rcu_fanout_exact, bool, 0444); 108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 110 module_param(rcu_fanout_leaf, int, 0444); 111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 112 /* Number of rcu_nodes at specified level. */ 113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 115 116 /* 117 * The rcu_scheduler_active variable is initialized to the value 118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 120 * RCU can assume that there is but one task, allowing RCU to (for example) 121 * optimize synchronize_rcu() to a simple barrier(). When this variable 122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 123 * to detect real grace periods. This variable is also used to suppress 124 * boot-time false positives from lockdep-RCU error checking. Finally, it 125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 126 * is fully initialized, including all of its kthreads having been spawned. 127 */ 128 int rcu_scheduler_active __read_mostly; 129 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 130 131 /* 132 * The rcu_scheduler_fully_active variable transitions from zero to one 133 * during the early_initcall() processing, which is after the scheduler 134 * is capable of creating new tasks. So RCU processing (for example, 135 * creating tasks for RCU priority boosting) must be delayed until after 136 * rcu_scheduler_fully_active transitions from zero to one. We also 137 * currently delay invocation of any RCU callbacks until after this point. 138 * 139 * It might later prove better for people registering RCU callbacks during 140 * early boot to take responsibility for these callbacks, but one step at 141 * a time. 142 */ 143 static int rcu_scheduler_fully_active __read_mostly; 144 145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 146 unsigned long gps, unsigned long flags); 147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 150 static void invoke_rcu_core(void); 151 static void rcu_report_exp_rdp(struct rcu_data *rdp); 152 static void sync_sched_exp_online_cleanup(int cpu); 153 154 /* rcuc/rcub kthread realtime priority */ 155 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 156 module_param(kthread_prio, int, 0444); 157 158 /* Delay in jiffies for grace-period initialization delays, debug only. */ 159 160 static int gp_preinit_delay; 161 module_param(gp_preinit_delay, int, 0444); 162 static int gp_init_delay; 163 module_param(gp_init_delay, int, 0444); 164 static int gp_cleanup_delay; 165 module_param(gp_cleanup_delay, int, 0444); 166 167 /* Retrieve RCU kthreads priority for rcutorture */ 168 int rcu_get_gp_kthreads_prio(void) 169 { 170 return kthread_prio; 171 } 172 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 173 174 /* 175 * Number of grace periods between delays, normalized by the duration of 176 * the delay. The longer the delay, the more the grace periods between 177 * each delay. The reason for this normalization is that it means that, 178 * for non-zero delays, the overall slowdown of grace periods is constant 179 * regardless of the duration of the delay. This arrangement balances 180 * the need for long delays to increase some race probabilities with the 181 * need for fast grace periods to increase other race probabilities. 182 */ 183 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 184 185 /* 186 * Compute the mask of online CPUs for the specified rcu_node structure. 187 * This will not be stable unless the rcu_node structure's ->lock is 188 * held, but the bit corresponding to the current CPU will be stable 189 * in most contexts. 190 */ 191 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 192 { 193 return READ_ONCE(rnp->qsmaskinitnext); 194 } 195 196 /* 197 * Return true if an RCU grace period is in progress. The READ_ONCE()s 198 * permit this function to be invoked without holding the root rcu_node 199 * structure's ->lock, but of course results can be subject to change. 200 */ 201 static int rcu_gp_in_progress(void) 202 { 203 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 204 } 205 206 /* 207 * Return the number of callbacks queued on the specified CPU. 208 * Handles both the nocbs and normal cases. 209 */ 210 static long rcu_get_n_cbs_cpu(int cpu) 211 { 212 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 213 214 if (rcu_segcblist_is_enabled(&rdp->cblist)) 215 return rcu_segcblist_n_cbs(&rdp->cblist); 216 return 0; 217 } 218 219 void rcu_softirq_qs(void) 220 { 221 rcu_qs(); 222 rcu_preempt_deferred_qs(current); 223 } 224 225 /* 226 * Record entry into an extended quiescent state. This is only to be 227 * called when not already in an extended quiescent state. 228 */ 229 static void rcu_dynticks_eqs_enter(void) 230 { 231 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 232 int seq; 233 234 /* 235 * CPUs seeing atomic_add_return() must see prior RCU read-side 236 * critical sections, and we also must force ordering with the 237 * next idle sojourn. 238 */ 239 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 240 /* Better be in an extended quiescent state! */ 241 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 242 (seq & RCU_DYNTICK_CTRL_CTR)); 243 /* Better not have special action (TLB flush) pending! */ 244 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 245 (seq & RCU_DYNTICK_CTRL_MASK)); 246 } 247 248 /* 249 * Record exit from an extended quiescent state. This is only to be 250 * called from an extended quiescent state. 251 */ 252 static void rcu_dynticks_eqs_exit(void) 253 { 254 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 255 int seq; 256 257 /* 258 * CPUs seeing atomic_add_return() must see prior idle sojourns, 259 * and we also must force ordering with the next RCU read-side 260 * critical section. 261 */ 262 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 263 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 264 !(seq & RCU_DYNTICK_CTRL_CTR)); 265 if (seq & RCU_DYNTICK_CTRL_MASK) { 266 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); 267 smp_mb__after_atomic(); /* _exit after clearing mask. */ 268 /* Prefer duplicate flushes to losing a flush. */ 269 rcu_eqs_special_exit(); 270 } 271 } 272 273 /* 274 * Reset the current CPU's ->dynticks counter to indicate that the 275 * newly onlined CPU is no longer in an extended quiescent state. 276 * This will either leave the counter unchanged, or increment it 277 * to the next non-quiescent value. 278 * 279 * The non-atomic test/increment sequence works because the upper bits 280 * of the ->dynticks counter are manipulated only by the corresponding CPU, 281 * or when the corresponding CPU is offline. 282 */ 283 static void rcu_dynticks_eqs_online(void) 284 { 285 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 286 287 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) 288 return; 289 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 290 } 291 292 /* 293 * Is the current CPU in an extended quiescent state? 294 * 295 * No ordering, as we are sampling CPU-local information. 296 */ 297 bool rcu_dynticks_curr_cpu_in_eqs(void) 298 { 299 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 300 301 return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); 302 } 303 304 /* 305 * Snapshot the ->dynticks counter with full ordering so as to allow 306 * stable comparison of this counter with past and future snapshots. 307 */ 308 int rcu_dynticks_snap(struct rcu_data *rdp) 309 { 310 int snap = atomic_add_return(0, &rdp->dynticks); 311 312 return snap & ~RCU_DYNTICK_CTRL_MASK; 313 } 314 315 /* 316 * Return true if the snapshot returned from rcu_dynticks_snap() 317 * indicates that RCU is in an extended quiescent state. 318 */ 319 static bool rcu_dynticks_in_eqs(int snap) 320 { 321 return !(snap & RCU_DYNTICK_CTRL_CTR); 322 } 323 324 /* 325 * Return true if the CPU corresponding to the specified rcu_data 326 * structure has spent some time in an extended quiescent state since 327 * rcu_dynticks_snap() returned the specified snapshot. 328 */ 329 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 330 { 331 return snap != rcu_dynticks_snap(rdp); 332 } 333 334 /* 335 * Set the special (bottom) bit of the specified CPU so that it 336 * will take special action (such as flushing its TLB) on the 337 * next exit from an extended quiescent state. Returns true if 338 * the bit was successfully set, or false if the CPU was not in 339 * an extended quiescent state. 340 */ 341 bool rcu_eqs_special_set(int cpu) 342 { 343 int old; 344 int new; 345 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 346 347 do { 348 old = atomic_read(&rdp->dynticks); 349 if (old & RCU_DYNTICK_CTRL_CTR) 350 return false; 351 new = old | RCU_DYNTICK_CTRL_MASK; 352 } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old); 353 return true; 354 } 355 356 /* 357 * Let the RCU core know that this CPU has gone through the scheduler, 358 * which is a quiescent state. This is called when the need for a 359 * quiescent state is urgent, so we burn an atomic operation and full 360 * memory barriers to let the RCU core know about it, regardless of what 361 * this CPU might (or might not) do in the near future. 362 * 363 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 364 * 365 * The caller must have disabled interrupts and must not be idle. 366 */ 367 void rcu_momentary_dyntick_idle(void) 368 { 369 int special; 370 371 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 372 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 373 &this_cpu_ptr(&rcu_data)->dynticks); 374 /* It is illegal to call this from idle state. */ 375 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 376 rcu_preempt_deferred_qs(current); 377 } 378 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 379 380 /** 381 * rcu_is_cpu_rrupt_from_idle - see if interrupted from idle 382 * 383 * If the current CPU is idle and running at a first-level (not nested) 384 * interrupt from idle, return true. The caller must have at least 385 * disabled preemption. 386 */ 387 static int rcu_is_cpu_rrupt_from_idle(void) 388 { 389 /* Called only from within the scheduling-clock interrupt */ 390 lockdep_assert_in_irq(); 391 392 /* Check for counter underflows */ 393 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, 394 "RCU dynticks_nesting counter underflow!"); 395 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, 396 "RCU dynticks_nmi_nesting counter underflow/zero!"); 397 398 /* Are we at first interrupt nesting level? */ 399 if (__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 1) 400 return false; 401 402 /* Does CPU appear to be idle from an RCU standpoint? */ 403 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; 404 } 405 406 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */ 407 #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */ 408 static long blimit = DEFAULT_RCU_BLIMIT; 409 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ 410 static long qhimark = DEFAULT_RCU_QHIMARK; 411 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ 412 static long qlowmark = DEFAULT_RCU_QLOMARK; 413 414 module_param(blimit, long, 0444); 415 module_param(qhimark, long, 0444); 416 module_param(qlowmark, long, 0444); 417 418 static ulong jiffies_till_first_fqs = ULONG_MAX; 419 static ulong jiffies_till_next_fqs = ULONG_MAX; 420 static bool rcu_kick_kthreads; 421 static int rcu_divisor = 7; 422 module_param(rcu_divisor, int, 0644); 423 424 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 425 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 426 module_param(rcu_resched_ns, long, 0644); 427 428 /* 429 * How long the grace period must be before we start recruiting 430 * quiescent-state help from rcu_note_context_switch(). 431 */ 432 static ulong jiffies_till_sched_qs = ULONG_MAX; 433 module_param(jiffies_till_sched_qs, ulong, 0444); 434 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 435 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 436 437 /* 438 * Make sure that we give the grace-period kthread time to detect any 439 * idle CPUs before taking active measures to force quiescent states. 440 * However, don't go below 100 milliseconds, adjusted upwards for really 441 * large systems. 442 */ 443 static void adjust_jiffies_till_sched_qs(void) 444 { 445 unsigned long j; 446 447 /* If jiffies_till_sched_qs was specified, respect the request. */ 448 if (jiffies_till_sched_qs != ULONG_MAX) { 449 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 450 return; 451 } 452 /* Otherwise, set to third fqs scan, but bound below on large system. */ 453 j = READ_ONCE(jiffies_till_first_fqs) + 454 2 * READ_ONCE(jiffies_till_next_fqs); 455 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 456 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 457 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 458 WRITE_ONCE(jiffies_to_sched_qs, j); 459 } 460 461 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 462 { 463 ulong j; 464 int ret = kstrtoul(val, 0, &j); 465 466 if (!ret) { 467 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 468 adjust_jiffies_till_sched_qs(); 469 } 470 return ret; 471 } 472 473 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 474 { 475 ulong j; 476 int ret = kstrtoul(val, 0, &j); 477 478 if (!ret) { 479 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 480 adjust_jiffies_till_sched_qs(); 481 } 482 return ret; 483 } 484 485 static struct kernel_param_ops first_fqs_jiffies_ops = { 486 .set = param_set_first_fqs_jiffies, 487 .get = param_get_ulong, 488 }; 489 490 static struct kernel_param_ops next_fqs_jiffies_ops = { 491 .set = param_set_next_fqs_jiffies, 492 .get = param_get_ulong, 493 }; 494 495 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 496 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 497 module_param(rcu_kick_kthreads, bool, 0644); 498 499 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 500 static int rcu_pending(int user); 501 502 /* 503 * Return the number of RCU GPs completed thus far for debug & stats. 504 */ 505 unsigned long rcu_get_gp_seq(void) 506 { 507 return READ_ONCE(rcu_state.gp_seq); 508 } 509 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 510 511 /* 512 * Return the number of RCU expedited batches completed thus far for 513 * debug & stats. Odd numbers mean that a batch is in progress, even 514 * numbers mean idle. The value returned will thus be roughly double 515 * the cumulative batches since boot. 516 */ 517 unsigned long rcu_exp_batches_completed(void) 518 { 519 return rcu_state.expedited_sequence; 520 } 521 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 522 523 /* 524 * Return the root node of the rcu_state structure. 525 */ 526 static struct rcu_node *rcu_get_root(void) 527 { 528 return &rcu_state.node[0]; 529 } 530 531 /* 532 * Convert a ->gp_state value to a character string. 533 */ 534 static const char *gp_state_getname(short gs) 535 { 536 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) 537 return "???"; 538 return gp_state_names[gs]; 539 } 540 541 /* 542 * Send along grace-period-related data for rcutorture diagnostics. 543 */ 544 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 545 unsigned long *gp_seq) 546 { 547 switch (test_type) { 548 case RCU_FLAVOR: 549 *flags = READ_ONCE(rcu_state.gp_flags); 550 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 551 break; 552 default: 553 break; 554 } 555 } 556 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 557 558 /* 559 * Enter an RCU extended quiescent state, which can be either the 560 * idle loop or adaptive-tickless usermode execution. 561 * 562 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 563 * the possibility of usermode upcalls having messed up our count 564 * of interrupt nesting level during the prior busy period. 565 */ 566 static void rcu_eqs_enter(bool user) 567 { 568 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 569 570 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); 571 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); 572 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 573 rdp->dynticks_nesting == 0); 574 if (rdp->dynticks_nesting != 1) { 575 rdp->dynticks_nesting--; 576 return; 577 } 578 579 lockdep_assert_irqs_disabled(); 580 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks); 581 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 582 rdp = this_cpu_ptr(&rcu_data); 583 do_nocb_deferred_wakeup(rdp); 584 rcu_prepare_for_idle(); 585 rcu_preempt_deferred_qs(current); 586 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 587 rcu_dynticks_eqs_enter(); 588 rcu_dynticks_task_enter(); 589 } 590 591 /** 592 * rcu_idle_enter - inform RCU that current CPU is entering idle 593 * 594 * Enter idle mode, in other words, -leave- the mode in which RCU 595 * read-side critical sections can occur. (Though RCU read-side 596 * critical sections can occur in irq handlers in idle, a possibility 597 * handled by irq_enter() and irq_exit().) 598 * 599 * If you add or remove a call to rcu_idle_enter(), be sure to test with 600 * CONFIG_RCU_EQS_DEBUG=y. 601 */ 602 void rcu_idle_enter(void) 603 { 604 lockdep_assert_irqs_disabled(); 605 rcu_eqs_enter(false); 606 } 607 608 #ifdef CONFIG_NO_HZ_FULL 609 /** 610 * rcu_user_enter - inform RCU that we are resuming userspace. 611 * 612 * Enter RCU idle mode right before resuming userspace. No use of RCU 613 * is permitted between this call and rcu_user_exit(). This way the 614 * CPU doesn't need to maintain the tick for RCU maintenance purposes 615 * when the CPU runs in userspace. 616 * 617 * If you add or remove a call to rcu_user_enter(), be sure to test with 618 * CONFIG_RCU_EQS_DEBUG=y. 619 */ 620 void rcu_user_enter(void) 621 { 622 lockdep_assert_irqs_disabled(); 623 rcu_eqs_enter(true); 624 } 625 #endif /* CONFIG_NO_HZ_FULL */ 626 627 /* 628 * If we are returning from the outermost NMI handler that interrupted an 629 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting 630 * to let the RCU grace-period handling know that the CPU is back to 631 * being RCU-idle. 632 * 633 * If you add or remove a call to rcu_nmi_exit_common(), be sure to test 634 * with CONFIG_RCU_EQS_DEBUG=y. 635 */ 636 static __always_inline void rcu_nmi_exit_common(bool irq) 637 { 638 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 639 640 /* 641 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 642 * (We are exiting an NMI handler, so RCU better be paying attention 643 * to us!) 644 */ 645 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); 646 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 647 648 /* 649 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 650 * leave it in non-RCU-idle state. 651 */ 652 if (rdp->dynticks_nmi_nesting != 1) { 653 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks); 654 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ 655 rdp->dynticks_nmi_nesting - 2); 656 return; 657 } 658 659 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 660 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks); 661 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 662 663 if (irq) 664 rcu_prepare_for_idle(); 665 666 rcu_dynticks_eqs_enter(); 667 668 if (irq) 669 rcu_dynticks_task_enter(); 670 } 671 672 /** 673 * rcu_nmi_exit - inform RCU of exit from NMI context 674 * 675 * If you add or remove a call to rcu_nmi_exit(), be sure to test 676 * with CONFIG_RCU_EQS_DEBUG=y. 677 */ 678 void rcu_nmi_exit(void) 679 { 680 rcu_nmi_exit_common(false); 681 } 682 683 /** 684 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 685 * 686 * Exit from an interrupt handler, which might possibly result in entering 687 * idle mode, in other words, leaving the mode in which read-side critical 688 * sections can occur. The caller must have disabled interrupts. 689 * 690 * This code assumes that the idle loop never does anything that might 691 * result in unbalanced calls to irq_enter() and irq_exit(). If your 692 * architecture's idle loop violates this assumption, RCU will give you what 693 * you deserve, good and hard. But very infrequently and irreproducibly. 694 * 695 * Use things like work queues to work around this limitation. 696 * 697 * You have been warned. 698 * 699 * If you add or remove a call to rcu_irq_exit(), be sure to test with 700 * CONFIG_RCU_EQS_DEBUG=y. 701 */ 702 void rcu_irq_exit(void) 703 { 704 lockdep_assert_irqs_disabled(); 705 rcu_nmi_exit_common(true); 706 } 707 708 /* 709 * Wrapper for rcu_irq_exit() where interrupts are enabled. 710 * 711 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 712 * with CONFIG_RCU_EQS_DEBUG=y. 713 */ 714 void rcu_irq_exit_irqson(void) 715 { 716 unsigned long flags; 717 718 local_irq_save(flags); 719 rcu_irq_exit(); 720 local_irq_restore(flags); 721 } 722 723 /* 724 * Exit an RCU extended quiescent state, which can be either the 725 * idle loop or adaptive-tickless usermode execution. 726 * 727 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 728 * allow for the possibility of usermode upcalls messing up our count of 729 * interrupt nesting level during the busy period that is just now starting. 730 */ 731 static void rcu_eqs_exit(bool user) 732 { 733 struct rcu_data *rdp; 734 long oldval; 735 736 lockdep_assert_irqs_disabled(); 737 rdp = this_cpu_ptr(&rcu_data); 738 oldval = rdp->dynticks_nesting; 739 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 740 if (oldval) { 741 rdp->dynticks_nesting++; 742 return; 743 } 744 rcu_dynticks_task_exit(); 745 rcu_dynticks_eqs_exit(); 746 rcu_cleanup_after_idle(); 747 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks); 748 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 749 WRITE_ONCE(rdp->dynticks_nesting, 1); 750 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); 751 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 752 } 753 754 /** 755 * rcu_idle_exit - inform RCU that current CPU is leaving idle 756 * 757 * Exit idle mode, in other words, -enter- the mode in which RCU 758 * read-side critical sections can occur. 759 * 760 * If you add or remove a call to rcu_idle_exit(), be sure to test with 761 * CONFIG_RCU_EQS_DEBUG=y. 762 */ 763 void rcu_idle_exit(void) 764 { 765 unsigned long flags; 766 767 local_irq_save(flags); 768 rcu_eqs_exit(false); 769 local_irq_restore(flags); 770 } 771 772 #ifdef CONFIG_NO_HZ_FULL 773 /** 774 * rcu_user_exit - inform RCU that we are exiting userspace. 775 * 776 * Exit RCU idle mode while entering the kernel because it can 777 * run a RCU read side critical section anytime. 778 * 779 * If you add or remove a call to rcu_user_exit(), be sure to test with 780 * CONFIG_RCU_EQS_DEBUG=y. 781 */ 782 void rcu_user_exit(void) 783 { 784 rcu_eqs_exit(1); 785 } 786 #endif /* CONFIG_NO_HZ_FULL */ 787 788 /** 789 * rcu_nmi_enter_common - inform RCU of entry to NMI context 790 * @irq: Is this call from rcu_irq_enter? 791 * 792 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and 793 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know 794 * that the CPU is active. This implementation permits nested NMIs, as 795 * long as the nesting level does not overflow an int. (You will probably 796 * run out of stack space first.) 797 * 798 * If you add or remove a call to rcu_nmi_enter_common(), be sure to test 799 * with CONFIG_RCU_EQS_DEBUG=y. 800 */ 801 static __always_inline void rcu_nmi_enter_common(bool irq) 802 { 803 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 804 long incby = 2; 805 806 /* Complain about underflow. */ 807 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); 808 809 /* 810 * If idle from RCU viewpoint, atomically increment ->dynticks 811 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 812 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 813 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 814 * to be in the outermost NMI handler that interrupted an RCU-idle 815 * period (observation due to Andy Lutomirski). 816 */ 817 if (rcu_dynticks_curr_cpu_in_eqs()) { 818 819 if (irq) 820 rcu_dynticks_task_exit(); 821 822 rcu_dynticks_eqs_exit(); 823 824 if (irq) 825 rcu_cleanup_after_idle(); 826 827 incby = 1; 828 } else if (tick_nohz_full_cpu(rdp->cpu) && 829 rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE && 830 READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { 831 rdp->rcu_forced_tick = true; 832 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 833 } 834 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 835 rdp->dynticks_nmi_nesting, 836 rdp->dynticks_nmi_nesting + incby, rdp->dynticks); 837 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ 838 rdp->dynticks_nmi_nesting + incby); 839 barrier(); 840 } 841 842 /** 843 * rcu_nmi_enter - inform RCU of entry to NMI context 844 */ 845 void rcu_nmi_enter(void) 846 { 847 rcu_nmi_enter_common(false); 848 } 849 NOKPROBE_SYMBOL(rcu_nmi_enter); 850 851 /** 852 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 853 * 854 * Enter an interrupt handler, which might possibly result in exiting 855 * idle mode, in other words, entering the mode in which read-side critical 856 * sections can occur. The caller must have disabled interrupts. 857 * 858 * Note that the Linux kernel is fully capable of entering an interrupt 859 * handler that it never exits, for example when doing upcalls to user mode! 860 * This code assumes that the idle loop never does upcalls to user mode. 861 * If your architecture's idle loop does do upcalls to user mode (or does 862 * anything else that results in unbalanced calls to the irq_enter() and 863 * irq_exit() functions), RCU will give you what you deserve, good and hard. 864 * But very infrequently and irreproducibly. 865 * 866 * Use things like work queues to work around this limitation. 867 * 868 * You have been warned. 869 * 870 * If you add or remove a call to rcu_irq_enter(), be sure to test with 871 * CONFIG_RCU_EQS_DEBUG=y. 872 */ 873 void rcu_irq_enter(void) 874 { 875 lockdep_assert_irqs_disabled(); 876 rcu_nmi_enter_common(true); 877 } 878 879 /* 880 * Wrapper for rcu_irq_enter() where interrupts are enabled. 881 * 882 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 883 * with CONFIG_RCU_EQS_DEBUG=y. 884 */ 885 void rcu_irq_enter_irqson(void) 886 { 887 unsigned long flags; 888 889 local_irq_save(flags); 890 rcu_irq_enter(); 891 local_irq_restore(flags); 892 } 893 894 /* 895 * If any sort of urgency was applied to the current CPU (for example, 896 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 897 * to get to a quiescent state, disable it. 898 */ 899 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 900 { 901 WRITE_ONCE(rdp->rcu_urgent_qs, false); 902 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 903 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 904 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 905 rdp->rcu_forced_tick = false; 906 } 907 } 908 909 /** 910 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 911 * 912 * Return true if RCU is watching the running CPU, which means that this 913 * CPU can safely enter RCU read-side critical sections. In other words, 914 * if the current CPU is not in its idle loop or is in an interrupt or 915 * NMI handler, return true. 916 */ 917 bool notrace rcu_is_watching(void) 918 { 919 bool ret; 920 921 preempt_disable_notrace(); 922 ret = !rcu_dynticks_curr_cpu_in_eqs(); 923 preempt_enable_notrace(); 924 return ret; 925 } 926 EXPORT_SYMBOL_GPL(rcu_is_watching); 927 928 /* 929 * If a holdout task is actually running, request an urgent quiescent 930 * state from its CPU. This is unsynchronized, so migrations can cause 931 * the request to go to the wrong CPU. Which is OK, all that will happen 932 * is that the CPU's next context switch will be a bit slower and next 933 * time around this task will generate another request. 934 */ 935 void rcu_request_urgent_qs_task(struct task_struct *t) 936 { 937 int cpu; 938 939 barrier(); 940 cpu = task_cpu(t); 941 if (!task_curr(t)) 942 return; /* This task is not running on that CPU. */ 943 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 944 } 945 946 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 947 948 /* 949 * Is the current CPU online as far as RCU is concerned? 950 * 951 * Disable preemption to avoid false positives that could otherwise 952 * happen due to the current CPU number being sampled, this task being 953 * preempted, its old CPU being taken offline, resuming on some other CPU, 954 * then determining that its old CPU is now offline. 955 * 956 * Disable checking if in an NMI handler because we cannot safely 957 * report errors from NMI handlers anyway. In addition, it is OK to use 958 * RCU on an offline processor during initial boot, hence the check for 959 * rcu_scheduler_fully_active. 960 */ 961 bool rcu_lockdep_current_cpu_online(void) 962 { 963 struct rcu_data *rdp; 964 struct rcu_node *rnp; 965 bool ret = false; 966 967 if (in_nmi() || !rcu_scheduler_fully_active) 968 return true; 969 preempt_disable(); 970 rdp = this_cpu_ptr(&rcu_data); 971 rnp = rdp->mynode; 972 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) 973 ret = true; 974 preempt_enable(); 975 return ret; 976 } 977 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 978 979 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 980 981 /* 982 * We are reporting a quiescent state on behalf of some other CPU, so 983 * it is our responsibility to check for and handle potential overflow 984 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 985 * After all, the CPU might be in deep idle state, and thus executing no 986 * code whatsoever. 987 */ 988 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 989 { 990 raw_lockdep_assert_held_rcu_node(rnp); 991 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 992 rnp->gp_seq)) 993 WRITE_ONCE(rdp->gpwrap, true); 994 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 995 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 996 } 997 998 /* 999 * Snapshot the specified CPU's dynticks counter so that we can later 1000 * credit them with an implicit quiescent state. Return 1 if this CPU 1001 * is in dynticks idle mode, which is an extended quiescent state. 1002 */ 1003 static int dyntick_save_progress_counter(struct rcu_data *rdp) 1004 { 1005 rdp->dynticks_snap = rcu_dynticks_snap(rdp); 1006 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1007 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1008 rcu_gpnum_ovf(rdp->mynode, rdp); 1009 return 1; 1010 } 1011 return 0; 1012 } 1013 1014 /* 1015 * Return true if the specified CPU has passed through a quiescent 1016 * state by virtue of being in or having passed through an dynticks 1017 * idle state since the last call to dyntick_save_progress_counter() 1018 * for this same CPU, or by virtue of having been offline. 1019 */ 1020 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 1021 { 1022 unsigned long jtsq; 1023 bool *rnhqp; 1024 bool *ruqp; 1025 struct rcu_node *rnp = rdp->mynode; 1026 1027 /* 1028 * If the CPU passed through or entered a dynticks idle phase with 1029 * no active irq/NMI handlers, then we can safely pretend that the CPU 1030 * already acknowledged the request to pass through a quiescent 1031 * state. Either way, that CPU cannot possibly be in an RCU 1032 * read-side critical section that started before the beginning 1033 * of the current RCU grace period. 1034 */ 1035 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 1036 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1037 rcu_gpnum_ovf(rnp, rdp); 1038 return 1; 1039 } 1040 1041 /* If waiting too long on an offline CPU, complain. */ 1042 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && 1043 time_after(jiffies, rcu_state.gp_start + HZ)) { 1044 bool onl; 1045 struct rcu_node *rnp1; 1046 1047 WARN_ON(1); /* Offline CPUs are supposed to report QS! */ 1048 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 1049 __func__, rnp->grplo, rnp->grphi, rnp->level, 1050 (long)rnp->gp_seq, (long)rnp->completedqs); 1051 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 1052 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 1053 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 1054 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 1055 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 1056 __func__, rdp->cpu, ".o"[onl], 1057 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 1058 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 1059 return 1; /* Break things loose after complaining. */ 1060 } 1061 1062 /* 1063 * A CPU running for an extended time within the kernel can 1064 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 1065 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 1066 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 1067 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 1068 * variable are safe because the assignments are repeated if this 1069 * CPU failed to pass through a quiescent state. This code 1070 * also checks .jiffies_resched in case jiffies_to_sched_qs 1071 * is set way high. 1072 */ 1073 jtsq = READ_ONCE(jiffies_to_sched_qs); 1074 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); 1075 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); 1076 if (!READ_ONCE(*rnhqp) && 1077 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 1078 time_after(jiffies, rcu_state.jiffies_resched))) { 1079 WRITE_ONCE(*rnhqp, true); 1080 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1081 smp_store_release(ruqp, true); 1082 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 1083 WRITE_ONCE(*ruqp, true); 1084 } 1085 1086 /* 1087 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 1088 * The above code handles this, but only for straight cond_resched(). 1089 * And some in-kernel loops check need_resched() before calling 1090 * cond_resched(), which defeats the above code for CPUs that are 1091 * running in-kernel with scheduling-clock interrupts disabled. 1092 * So hit them over the head with the resched_cpu() hammer! 1093 */ 1094 if (tick_nohz_full_cpu(rdp->cpu) && 1095 time_after(jiffies, 1096 READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) { 1097 WRITE_ONCE(*ruqp, true); 1098 resched_cpu(rdp->cpu); 1099 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1100 } 1101 1102 /* 1103 * If more than halfway to RCU CPU stall-warning time, invoke 1104 * resched_cpu() more frequently to try to loosen things up a bit. 1105 * Also check to see if the CPU is getting hammered with interrupts, 1106 * but only once per grace period, just to keep the IPIs down to 1107 * a dull roar. 1108 */ 1109 if (time_after(jiffies, rcu_state.jiffies_resched)) { 1110 if (time_after(jiffies, 1111 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 1112 resched_cpu(rdp->cpu); 1113 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1114 } 1115 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1116 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 1117 (rnp->ffmask & rdp->grpmask)) { 1118 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1119 rdp->rcu_iw_pending = true; 1120 rdp->rcu_iw_gp_seq = rnp->gp_seq; 1121 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1122 } 1123 } 1124 1125 return 0; 1126 } 1127 1128 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1129 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1130 unsigned long gp_seq_req, const char *s) 1131 { 1132 trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req, 1133 rnp->level, rnp->grplo, rnp->grphi, s); 1134 } 1135 1136 /* 1137 * rcu_start_this_gp - Request the start of a particular grace period 1138 * @rnp_start: The leaf node of the CPU from which to start. 1139 * @rdp: The rcu_data corresponding to the CPU from which to start. 1140 * @gp_seq_req: The gp_seq of the grace period to start. 1141 * 1142 * Start the specified grace period, as needed to handle newly arrived 1143 * callbacks. The required future grace periods are recorded in each 1144 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1145 * is reason to awaken the grace-period kthread. 1146 * 1147 * The caller must hold the specified rcu_node structure's ->lock, which 1148 * is why the caller is responsible for waking the grace-period kthread. 1149 * 1150 * Returns true if the GP thread needs to be awakened else false. 1151 */ 1152 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1153 unsigned long gp_seq_req) 1154 { 1155 bool ret = false; 1156 struct rcu_node *rnp; 1157 1158 /* 1159 * Use funnel locking to either acquire the root rcu_node 1160 * structure's lock or bail out if the need for this grace period 1161 * has already been recorded -- or if that grace period has in 1162 * fact already started. If there is already a grace period in 1163 * progress in a non-leaf node, no recording is needed because the 1164 * end of the grace period will scan the leaf rcu_node structures. 1165 * Note that rnp_start->lock must not be released. 1166 */ 1167 raw_lockdep_assert_held_rcu_node(rnp_start); 1168 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1169 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1170 if (rnp != rnp_start) 1171 raw_spin_lock_rcu_node(rnp); 1172 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1173 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1174 (rnp != rnp_start && 1175 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1176 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1177 TPS("Prestarted")); 1178 goto unlock_out; 1179 } 1180 rnp->gp_seq_needed = gp_seq_req; 1181 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1182 /* 1183 * We just marked the leaf or internal node, and a 1184 * grace period is in progress, which means that 1185 * rcu_gp_cleanup() will see the marking. Bail to 1186 * reduce contention. 1187 */ 1188 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1189 TPS("Startedleaf")); 1190 goto unlock_out; 1191 } 1192 if (rnp != rnp_start && rnp->parent != NULL) 1193 raw_spin_unlock_rcu_node(rnp); 1194 if (!rnp->parent) 1195 break; /* At root, and perhaps also leaf. */ 1196 } 1197 1198 /* If GP already in progress, just leave, otherwise start one. */ 1199 if (rcu_gp_in_progress()) { 1200 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1201 goto unlock_out; 1202 } 1203 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1204 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1205 rcu_state.gp_req_activity = jiffies; 1206 if (!rcu_state.gp_kthread) { 1207 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1208 goto unlock_out; 1209 } 1210 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq")); 1211 ret = true; /* Caller must wake GP kthread. */ 1212 unlock_out: 1213 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1214 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1215 rnp_start->gp_seq_needed = rnp->gp_seq_needed; 1216 rdp->gp_seq_needed = rnp->gp_seq_needed; 1217 } 1218 if (rnp != rnp_start) 1219 raw_spin_unlock_rcu_node(rnp); 1220 return ret; 1221 } 1222 1223 /* 1224 * Clean up any old requests for the just-ended grace period. Also return 1225 * whether any additional grace periods have been requested. 1226 */ 1227 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1228 { 1229 bool needmore; 1230 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1231 1232 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1233 if (!needmore) 1234 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1235 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1236 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1237 return needmore; 1238 } 1239 1240 /* 1241 * Awaken the grace-period kthread. Don't do a self-awaken (unless in 1242 * an interrupt or softirq handler), and don't bother awakening when there 1243 * is nothing for the grace-period kthread to do (as in several CPUs raced 1244 * to awaken, and we lost), and finally don't try to awaken a kthread that 1245 * has not yet been created. If all those checks are passed, track some 1246 * debug information and awaken. 1247 * 1248 * So why do the self-wakeup when in an interrupt or softirq handler 1249 * in the grace-period kthread's context? Because the kthread might have 1250 * been interrupted just as it was going to sleep, and just after the final 1251 * pre-sleep check of the awaken condition. In this case, a wakeup really 1252 * is required, and is therefore supplied. 1253 */ 1254 static void rcu_gp_kthread_wake(void) 1255 { 1256 if ((current == rcu_state.gp_kthread && 1257 !in_irq() && !in_serving_softirq()) || 1258 !READ_ONCE(rcu_state.gp_flags) || 1259 !rcu_state.gp_kthread) 1260 return; 1261 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1262 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1263 swake_up_one(&rcu_state.gp_wq); 1264 } 1265 1266 /* 1267 * If there is room, assign a ->gp_seq number to any callbacks on this 1268 * CPU that have not already been assigned. Also accelerate any callbacks 1269 * that were previously assigned a ->gp_seq number that has since proven 1270 * to be too conservative, which can happen if callbacks get assigned a 1271 * ->gp_seq number while RCU is idle, but with reference to a non-root 1272 * rcu_node structure. This function is idempotent, so it does not hurt 1273 * to call it repeatedly. Returns an flag saying that we should awaken 1274 * the RCU grace-period kthread. 1275 * 1276 * The caller must hold rnp->lock with interrupts disabled. 1277 */ 1278 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1279 { 1280 unsigned long gp_seq_req; 1281 bool ret = false; 1282 1283 rcu_lockdep_assert_cblist_protected(rdp); 1284 raw_lockdep_assert_held_rcu_node(rnp); 1285 1286 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1287 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1288 return false; 1289 1290 /* 1291 * Callbacks are often registered with incomplete grace-period 1292 * information. Something about the fact that getting exact 1293 * information requires acquiring a global lock... RCU therefore 1294 * makes a conservative estimate of the grace period number at which 1295 * a given callback will become ready to invoke. The following 1296 * code checks this estimate and improves it when possible, thus 1297 * accelerating callback invocation to an earlier grace-period 1298 * number. 1299 */ 1300 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1301 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1302 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1303 1304 /* Trace depending on how much we were able to accelerate. */ 1305 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1306 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); 1307 else 1308 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); 1309 return ret; 1310 } 1311 1312 /* 1313 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1314 * rcu_node structure's ->lock be held. It consults the cached value 1315 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1316 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1317 * while holding the leaf rcu_node structure's ->lock. 1318 */ 1319 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1320 struct rcu_data *rdp) 1321 { 1322 unsigned long c; 1323 bool needwake; 1324 1325 rcu_lockdep_assert_cblist_protected(rdp); 1326 c = rcu_seq_snap(&rcu_state.gp_seq); 1327 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1328 /* Old request still live, so mark recent callbacks. */ 1329 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1330 return; 1331 } 1332 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1333 needwake = rcu_accelerate_cbs(rnp, rdp); 1334 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1335 if (needwake) 1336 rcu_gp_kthread_wake(); 1337 } 1338 1339 /* 1340 * Move any callbacks whose grace period has completed to the 1341 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1342 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1343 * sublist. This function is idempotent, so it does not hurt to 1344 * invoke it repeatedly. As long as it is not invoked -too- often... 1345 * Returns true if the RCU grace-period kthread needs to be awakened. 1346 * 1347 * The caller must hold rnp->lock with interrupts disabled. 1348 */ 1349 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1350 { 1351 rcu_lockdep_assert_cblist_protected(rdp); 1352 raw_lockdep_assert_held_rcu_node(rnp); 1353 1354 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1355 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1356 return false; 1357 1358 /* 1359 * Find all callbacks whose ->gp_seq numbers indicate that they 1360 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1361 */ 1362 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1363 1364 /* Classify any remaining callbacks. */ 1365 return rcu_accelerate_cbs(rnp, rdp); 1366 } 1367 1368 /* 1369 * Move and classify callbacks, but only if doing so won't require 1370 * that the RCU grace-period kthread be awakened. 1371 */ 1372 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1373 struct rcu_data *rdp) 1374 { 1375 rcu_lockdep_assert_cblist_protected(rdp); 1376 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || 1377 !raw_spin_trylock_rcu_node(rnp)) 1378 return; 1379 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1380 raw_spin_unlock_rcu_node(rnp); 1381 } 1382 1383 /* 1384 * Update CPU-local rcu_data state to record the beginnings and ends of 1385 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1386 * structure corresponding to the current CPU, and must have irqs disabled. 1387 * Returns true if the grace-period kthread needs to be awakened. 1388 */ 1389 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1390 { 1391 bool ret = false; 1392 bool need_gp; 1393 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 1394 rcu_segcblist_is_offloaded(&rdp->cblist); 1395 1396 raw_lockdep_assert_held_rcu_node(rnp); 1397 1398 if (rdp->gp_seq == rnp->gp_seq) 1399 return false; /* Nothing to do. */ 1400 1401 /* Handle the ends of any preceding grace periods first. */ 1402 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1403 unlikely(READ_ONCE(rdp->gpwrap))) { 1404 if (!offloaded) 1405 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1406 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1407 } else { 1408 if (!offloaded) 1409 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1410 } 1411 1412 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1413 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1414 unlikely(READ_ONCE(rdp->gpwrap))) { 1415 /* 1416 * If the current grace period is waiting for this CPU, 1417 * set up to detect a quiescent state, otherwise don't 1418 * go looking for one. 1419 */ 1420 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1421 need_gp = !!(rnp->qsmask & rdp->grpmask); 1422 rdp->cpu_no_qs.b.norm = need_gp; 1423 rdp->core_needs_qs = need_gp; 1424 zero_cpu_stall_ticks(rdp); 1425 } 1426 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1427 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1428 rdp->gp_seq_needed = rnp->gp_seq_needed; 1429 WRITE_ONCE(rdp->gpwrap, false); 1430 rcu_gpnum_ovf(rnp, rdp); 1431 return ret; 1432 } 1433 1434 static void note_gp_changes(struct rcu_data *rdp) 1435 { 1436 unsigned long flags; 1437 bool needwake; 1438 struct rcu_node *rnp; 1439 1440 local_irq_save(flags); 1441 rnp = rdp->mynode; 1442 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1443 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1444 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1445 local_irq_restore(flags); 1446 return; 1447 } 1448 needwake = __note_gp_changes(rnp, rdp); 1449 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1450 if (needwake) 1451 rcu_gp_kthread_wake(); 1452 } 1453 1454 static void rcu_gp_slow(int delay) 1455 { 1456 if (delay > 0 && 1457 !(rcu_seq_ctr(rcu_state.gp_seq) % 1458 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1459 schedule_timeout_uninterruptible(delay); 1460 } 1461 1462 /* 1463 * Initialize a new grace period. Return false if no grace period required. 1464 */ 1465 static bool rcu_gp_init(void) 1466 { 1467 unsigned long flags; 1468 unsigned long oldmask; 1469 unsigned long mask; 1470 struct rcu_data *rdp; 1471 struct rcu_node *rnp = rcu_get_root(); 1472 1473 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1474 raw_spin_lock_irq_rcu_node(rnp); 1475 if (!READ_ONCE(rcu_state.gp_flags)) { 1476 /* Spurious wakeup, tell caller to go back to sleep. */ 1477 raw_spin_unlock_irq_rcu_node(rnp); 1478 return false; 1479 } 1480 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1481 1482 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1483 /* 1484 * Grace period already in progress, don't start another. 1485 * Not supposed to be able to happen. 1486 */ 1487 raw_spin_unlock_irq_rcu_node(rnp); 1488 return false; 1489 } 1490 1491 /* Advance to a new grace period and initialize state. */ 1492 record_gp_stall_check_time(); 1493 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1494 rcu_seq_start(&rcu_state.gp_seq); 1495 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1496 raw_spin_unlock_irq_rcu_node(rnp); 1497 1498 /* 1499 * Apply per-leaf buffered online and offline operations to the 1500 * rcu_node tree. Note that this new grace period need not wait 1501 * for subsequent online CPUs, and that quiescent-state forcing 1502 * will handle subsequent offline CPUs. 1503 */ 1504 rcu_state.gp_state = RCU_GP_ONOFF; 1505 rcu_for_each_leaf_node(rnp) { 1506 raw_spin_lock(&rcu_state.ofl_lock); 1507 raw_spin_lock_irq_rcu_node(rnp); 1508 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1509 !rnp->wait_blkd_tasks) { 1510 /* Nothing to do on this leaf rcu_node structure. */ 1511 raw_spin_unlock_irq_rcu_node(rnp); 1512 raw_spin_unlock(&rcu_state.ofl_lock); 1513 continue; 1514 } 1515 1516 /* Record old state, apply changes to ->qsmaskinit field. */ 1517 oldmask = rnp->qsmaskinit; 1518 rnp->qsmaskinit = rnp->qsmaskinitnext; 1519 1520 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1521 if (!oldmask != !rnp->qsmaskinit) { 1522 if (!oldmask) { /* First online CPU for rcu_node. */ 1523 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1524 rcu_init_new_rnp(rnp); 1525 } else if (rcu_preempt_has_tasks(rnp)) { 1526 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1527 } else { /* Last offline CPU and can propagate. */ 1528 rcu_cleanup_dead_rnp(rnp); 1529 } 1530 } 1531 1532 /* 1533 * If all waited-on tasks from prior grace period are 1534 * done, and if all this rcu_node structure's CPUs are 1535 * still offline, propagate up the rcu_node tree and 1536 * clear ->wait_blkd_tasks. Otherwise, if one of this 1537 * rcu_node structure's CPUs has since come back online, 1538 * simply clear ->wait_blkd_tasks. 1539 */ 1540 if (rnp->wait_blkd_tasks && 1541 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1542 rnp->wait_blkd_tasks = false; 1543 if (!rnp->qsmaskinit) 1544 rcu_cleanup_dead_rnp(rnp); 1545 } 1546 1547 raw_spin_unlock_irq_rcu_node(rnp); 1548 raw_spin_unlock(&rcu_state.ofl_lock); 1549 } 1550 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1551 1552 /* 1553 * Set the quiescent-state-needed bits in all the rcu_node 1554 * structures for all currently online CPUs in breadth-first 1555 * order, starting from the root rcu_node structure, relying on the 1556 * layout of the tree within the rcu_state.node[] array. Note that 1557 * other CPUs will access only the leaves of the hierarchy, thus 1558 * seeing that no grace period is in progress, at least until the 1559 * corresponding leaf node has been initialized. 1560 * 1561 * The grace period cannot complete until the initialization 1562 * process finishes, because this kthread handles both. 1563 */ 1564 rcu_state.gp_state = RCU_GP_INIT; 1565 rcu_for_each_node_breadth_first(rnp) { 1566 rcu_gp_slow(gp_init_delay); 1567 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1568 rdp = this_cpu_ptr(&rcu_data); 1569 rcu_preempt_check_blocked_tasks(rnp); 1570 rnp->qsmask = rnp->qsmaskinit; 1571 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1572 if (rnp == rdp->mynode) 1573 (void)__note_gp_changes(rnp, rdp); 1574 rcu_preempt_boost_start_gp(rnp); 1575 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1576 rnp->level, rnp->grplo, 1577 rnp->grphi, rnp->qsmask); 1578 /* Quiescent states for tasks on any now-offline CPUs. */ 1579 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1580 rnp->rcu_gp_init_mask = mask; 1581 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1582 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1583 else 1584 raw_spin_unlock_irq_rcu_node(rnp); 1585 cond_resched_tasks_rcu_qs(); 1586 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1587 } 1588 1589 return true; 1590 } 1591 1592 /* 1593 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1594 * time. 1595 */ 1596 static bool rcu_gp_fqs_check_wake(int *gfp) 1597 { 1598 struct rcu_node *rnp = rcu_get_root(); 1599 1600 /* Someone like call_rcu() requested a force-quiescent-state scan. */ 1601 *gfp = READ_ONCE(rcu_state.gp_flags); 1602 if (*gfp & RCU_GP_FLAG_FQS) 1603 return true; 1604 1605 /* The current grace period has completed. */ 1606 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1607 return true; 1608 1609 return false; 1610 } 1611 1612 /* 1613 * Do one round of quiescent-state forcing. 1614 */ 1615 static void rcu_gp_fqs(bool first_time) 1616 { 1617 struct rcu_node *rnp = rcu_get_root(); 1618 1619 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1620 rcu_state.n_force_qs++; 1621 if (first_time) { 1622 /* Collect dyntick-idle snapshots. */ 1623 force_qs_rnp(dyntick_save_progress_counter); 1624 } else { 1625 /* Handle dyntick-idle and offline CPUs. */ 1626 force_qs_rnp(rcu_implicit_dynticks_qs); 1627 } 1628 /* Clear flag to prevent immediate re-entry. */ 1629 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1630 raw_spin_lock_irq_rcu_node(rnp); 1631 WRITE_ONCE(rcu_state.gp_flags, 1632 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1633 raw_spin_unlock_irq_rcu_node(rnp); 1634 } 1635 } 1636 1637 /* 1638 * Loop doing repeated quiescent-state forcing until the grace period ends. 1639 */ 1640 static void rcu_gp_fqs_loop(void) 1641 { 1642 bool first_gp_fqs; 1643 int gf; 1644 unsigned long j; 1645 int ret; 1646 struct rcu_node *rnp = rcu_get_root(); 1647 1648 first_gp_fqs = true; 1649 j = READ_ONCE(jiffies_till_first_fqs); 1650 ret = 0; 1651 for (;;) { 1652 if (!ret) { 1653 rcu_state.jiffies_force_qs = jiffies + j; 1654 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1655 jiffies + (j ? 3 * j : 2)); 1656 } 1657 trace_rcu_grace_period(rcu_state.name, 1658 READ_ONCE(rcu_state.gp_seq), 1659 TPS("fqswait")); 1660 rcu_state.gp_state = RCU_GP_WAIT_FQS; 1661 ret = swait_event_idle_timeout_exclusive( 1662 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); 1663 rcu_state.gp_state = RCU_GP_DOING_FQS; 1664 /* Locking provides needed memory barriers. */ 1665 /* If grace period done, leave loop. */ 1666 if (!READ_ONCE(rnp->qsmask) && 1667 !rcu_preempt_blocked_readers_cgp(rnp)) 1668 break; 1669 /* If time for quiescent-state forcing, do it. */ 1670 if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) || 1671 (gf & RCU_GP_FLAG_FQS)) { 1672 trace_rcu_grace_period(rcu_state.name, 1673 READ_ONCE(rcu_state.gp_seq), 1674 TPS("fqsstart")); 1675 rcu_gp_fqs(first_gp_fqs); 1676 first_gp_fqs = false; 1677 trace_rcu_grace_period(rcu_state.name, 1678 READ_ONCE(rcu_state.gp_seq), 1679 TPS("fqsend")); 1680 cond_resched_tasks_rcu_qs(); 1681 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1682 ret = 0; /* Force full wait till next FQS. */ 1683 j = READ_ONCE(jiffies_till_next_fqs); 1684 } else { 1685 /* Deal with stray signal. */ 1686 cond_resched_tasks_rcu_qs(); 1687 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1688 WARN_ON(signal_pending(current)); 1689 trace_rcu_grace_period(rcu_state.name, 1690 READ_ONCE(rcu_state.gp_seq), 1691 TPS("fqswaitsig")); 1692 ret = 1; /* Keep old FQS timing. */ 1693 j = jiffies; 1694 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1695 j = 1; 1696 else 1697 j = rcu_state.jiffies_force_qs - j; 1698 } 1699 } 1700 } 1701 1702 /* 1703 * Clean up after the old grace period. 1704 */ 1705 static void rcu_gp_cleanup(void) 1706 { 1707 unsigned long gp_duration; 1708 bool needgp = false; 1709 unsigned long new_gp_seq; 1710 bool offloaded; 1711 struct rcu_data *rdp; 1712 struct rcu_node *rnp = rcu_get_root(); 1713 struct swait_queue_head *sq; 1714 1715 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1716 raw_spin_lock_irq_rcu_node(rnp); 1717 rcu_state.gp_end = jiffies; 1718 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 1719 if (gp_duration > rcu_state.gp_max) 1720 rcu_state.gp_max = gp_duration; 1721 1722 /* 1723 * We know the grace period is complete, but to everyone else 1724 * it appears to still be ongoing. But it is also the case 1725 * that to everyone else it looks like there is nothing that 1726 * they can do to advance the grace period. It is therefore 1727 * safe for us to drop the lock in order to mark the grace 1728 * period as completed in all of the rcu_node structures. 1729 */ 1730 raw_spin_unlock_irq_rcu_node(rnp); 1731 1732 /* 1733 * Propagate new ->gp_seq value to rcu_node structures so that 1734 * other CPUs don't have to wait until the start of the next grace 1735 * period to process their callbacks. This also avoids some nasty 1736 * RCU grace-period initialization races by forcing the end of 1737 * the current grace period to be completely recorded in all of 1738 * the rcu_node structures before the beginning of the next grace 1739 * period is recorded in any of the rcu_node structures. 1740 */ 1741 new_gp_seq = rcu_state.gp_seq; 1742 rcu_seq_end(&new_gp_seq); 1743 rcu_for_each_node_breadth_first(rnp) { 1744 raw_spin_lock_irq_rcu_node(rnp); 1745 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 1746 dump_blkd_tasks(rnp, 10); 1747 WARN_ON_ONCE(rnp->qsmask); 1748 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 1749 rdp = this_cpu_ptr(&rcu_data); 1750 if (rnp == rdp->mynode) 1751 needgp = __note_gp_changes(rnp, rdp) || needgp; 1752 /* smp_mb() provided by prior unlock-lock pair. */ 1753 needgp = rcu_future_gp_cleanup(rnp) || needgp; 1754 sq = rcu_nocb_gp_get(rnp); 1755 raw_spin_unlock_irq_rcu_node(rnp); 1756 rcu_nocb_gp_cleanup(sq); 1757 cond_resched_tasks_rcu_qs(); 1758 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1759 rcu_gp_slow(gp_cleanup_delay); 1760 } 1761 rnp = rcu_get_root(); 1762 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 1763 1764 /* Declare grace period done, trace first to use old GP number. */ 1765 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 1766 rcu_seq_end(&rcu_state.gp_seq); 1767 rcu_state.gp_state = RCU_GP_IDLE; 1768 /* Check for GP requests since above loop. */ 1769 rdp = this_cpu_ptr(&rcu_data); 1770 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 1771 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 1772 TPS("CleanupMore")); 1773 needgp = true; 1774 } 1775 /* Advance CBs to reduce false positives below. */ 1776 offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 1777 rcu_segcblist_is_offloaded(&rdp->cblist); 1778 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 1779 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 1780 rcu_state.gp_req_activity = jiffies; 1781 trace_rcu_grace_period(rcu_state.name, 1782 READ_ONCE(rcu_state.gp_seq), 1783 TPS("newreq")); 1784 } else { 1785 WRITE_ONCE(rcu_state.gp_flags, 1786 rcu_state.gp_flags & RCU_GP_FLAG_INIT); 1787 } 1788 raw_spin_unlock_irq_rcu_node(rnp); 1789 } 1790 1791 /* 1792 * Body of kthread that handles grace periods. 1793 */ 1794 static int __noreturn rcu_gp_kthread(void *unused) 1795 { 1796 rcu_bind_gp_kthread(); 1797 for (;;) { 1798 1799 /* Handle grace-period start. */ 1800 for (;;) { 1801 trace_rcu_grace_period(rcu_state.name, 1802 READ_ONCE(rcu_state.gp_seq), 1803 TPS("reqwait")); 1804 rcu_state.gp_state = RCU_GP_WAIT_GPS; 1805 swait_event_idle_exclusive(rcu_state.gp_wq, 1806 READ_ONCE(rcu_state.gp_flags) & 1807 RCU_GP_FLAG_INIT); 1808 rcu_state.gp_state = RCU_GP_DONE_GPS; 1809 /* Locking provides needed memory barrier. */ 1810 if (rcu_gp_init()) 1811 break; 1812 cond_resched_tasks_rcu_qs(); 1813 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1814 WARN_ON(signal_pending(current)); 1815 trace_rcu_grace_period(rcu_state.name, 1816 READ_ONCE(rcu_state.gp_seq), 1817 TPS("reqwaitsig")); 1818 } 1819 1820 /* Handle quiescent-state forcing. */ 1821 rcu_gp_fqs_loop(); 1822 1823 /* Handle grace-period end. */ 1824 rcu_state.gp_state = RCU_GP_CLEANUP; 1825 rcu_gp_cleanup(); 1826 rcu_state.gp_state = RCU_GP_CLEANED; 1827 } 1828 } 1829 1830 /* 1831 * Report a full set of quiescent states to the rcu_state data structure. 1832 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 1833 * another grace period is required. Whether we wake the grace-period 1834 * kthread or it awakens itself for the next round of quiescent-state 1835 * forcing, that kthread will clean up after the just-completed grace 1836 * period. Note that the caller must hold rnp->lock, which is released 1837 * before return. 1838 */ 1839 static void rcu_report_qs_rsp(unsigned long flags) 1840 __releases(rcu_get_root()->lock) 1841 { 1842 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 1843 WARN_ON_ONCE(!rcu_gp_in_progress()); 1844 WRITE_ONCE(rcu_state.gp_flags, 1845 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 1846 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 1847 rcu_gp_kthread_wake(); 1848 } 1849 1850 /* 1851 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 1852 * Allows quiescent states for a group of CPUs to be reported at one go 1853 * to the specified rcu_node structure, though all the CPUs in the group 1854 * must be represented by the same rcu_node structure (which need not be a 1855 * leaf rcu_node structure, though it often will be). The gps parameter 1856 * is the grace-period snapshot, which means that the quiescent states 1857 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 1858 * must be held upon entry, and it is released before return. 1859 * 1860 * As a special case, if mask is zero, the bit-already-cleared check is 1861 * disabled. This allows propagating quiescent state due to resumed tasks 1862 * during grace-period initialization. 1863 */ 1864 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 1865 unsigned long gps, unsigned long flags) 1866 __releases(rnp->lock) 1867 { 1868 unsigned long oldmask = 0; 1869 struct rcu_node *rnp_c; 1870 1871 raw_lockdep_assert_held_rcu_node(rnp); 1872 1873 /* Walk up the rcu_node hierarchy. */ 1874 for (;;) { 1875 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 1876 1877 /* 1878 * Our bit has already been cleared, or the 1879 * relevant grace period is already over, so done. 1880 */ 1881 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1882 return; 1883 } 1884 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 1885 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 1886 rcu_preempt_blocked_readers_cgp(rnp)); 1887 rnp->qsmask &= ~mask; 1888 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 1889 mask, rnp->qsmask, rnp->level, 1890 rnp->grplo, rnp->grphi, 1891 !!rnp->gp_tasks); 1892 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 1893 1894 /* Other bits still set at this level, so done. */ 1895 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1896 return; 1897 } 1898 rnp->completedqs = rnp->gp_seq; 1899 mask = rnp->grpmask; 1900 if (rnp->parent == NULL) { 1901 1902 /* No more levels. Exit loop holding root lock. */ 1903 1904 break; 1905 } 1906 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1907 rnp_c = rnp; 1908 rnp = rnp->parent; 1909 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1910 oldmask = rnp_c->qsmask; 1911 } 1912 1913 /* 1914 * Get here if we are the last CPU to pass through a quiescent 1915 * state for this grace period. Invoke rcu_report_qs_rsp() 1916 * to clean up and start the next grace period if one is needed. 1917 */ 1918 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 1919 } 1920 1921 /* 1922 * Record a quiescent state for all tasks that were previously queued 1923 * on the specified rcu_node structure and that were blocking the current 1924 * RCU grace period. The caller must hold the corresponding rnp->lock with 1925 * irqs disabled, and this lock is released upon return, but irqs remain 1926 * disabled. 1927 */ 1928 static void __maybe_unused 1929 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 1930 __releases(rnp->lock) 1931 { 1932 unsigned long gps; 1933 unsigned long mask; 1934 struct rcu_node *rnp_p; 1935 1936 raw_lockdep_assert_held_rcu_node(rnp); 1937 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) || 1938 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 1939 rnp->qsmask != 0) { 1940 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1941 return; /* Still need more quiescent states! */ 1942 } 1943 1944 rnp->completedqs = rnp->gp_seq; 1945 rnp_p = rnp->parent; 1946 if (rnp_p == NULL) { 1947 /* 1948 * Only one rcu_node structure in the tree, so don't 1949 * try to report up to its nonexistent parent! 1950 */ 1951 rcu_report_qs_rsp(flags); 1952 return; 1953 } 1954 1955 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 1956 gps = rnp->gp_seq; 1957 mask = rnp->grpmask; 1958 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1959 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 1960 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 1961 } 1962 1963 /* 1964 * Record a quiescent state for the specified CPU to that CPU's rcu_data 1965 * structure. This must be called from the specified CPU. 1966 */ 1967 static void 1968 rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) 1969 { 1970 unsigned long flags; 1971 unsigned long mask; 1972 bool needwake = false; 1973 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 1974 rcu_segcblist_is_offloaded(&rdp->cblist); 1975 struct rcu_node *rnp; 1976 1977 rnp = rdp->mynode; 1978 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1979 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 1980 rdp->gpwrap) { 1981 1982 /* 1983 * The grace period in which this quiescent state was 1984 * recorded has ended, so don't report it upwards. 1985 * We will instead need a new quiescent state that lies 1986 * within the current grace period. 1987 */ 1988 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 1989 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1990 return; 1991 } 1992 mask = rdp->grpmask; 1993 if ((rnp->qsmask & mask) == 0) { 1994 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1995 } else { 1996 /* 1997 * This GP can't end until cpu checks in, so all of our 1998 * callbacks can be processed during the next GP. 1999 */ 2000 if (!offloaded) 2001 needwake = rcu_accelerate_cbs(rnp, rdp); 2002 2003 rcu_disable_urgency_upon_qs(rdp); 2004 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2005 /* ^^^ Released rnp->lock */ 2006 if (needwake) 2007 rcu_gp_kthread_wake(); 2008 } 2009 } 2010 2011 /* 2012 * Check to see if there is a new grace period of which this CPU 2013 * is not yet aware, and if so, set up local rcu_data state for it. 2014 * Otherwise, see if this CPU has just passed through its first 2015 * quiescent state for this grace period, and record that fact if so. 2016 */ 2017 static void 2018 rcu_check_quiescent_state(struct rcu_data *rdp) 2019 { 2020 /* Check for grace-period ends and beginnings. */ 2021 note_gp_changes(rdp); 2022 2023 /* 2024 * Does this CPU still need to do its part for current grace period? 2025 * If no, return and let the other CPUs do their part as well. 2026 */ 2027 if (!rdp->core_needs_qs) 2028 return; 2029 2030 /* 2031 * Was there a quiescent state since the beginning of the grace 2032 * period? If no, then exit and wait for the next call. 2033 */ 2034 if (rdp->cpu_no_qs.b.norm) 2035 return; 2036 2037 /* 2038 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2039 * judge of that). 2040 */ 2041 rcu_report_qs_rdp(rdp->cpu, rdp); 2042 } 2043 2044 /* 2045 * Near the end of the offline process. Trace the fact that this CPU 2046 * is going offline. 2047 */ 2048 int rcutree_dying_cpu(unsigned int cpu) 2049 { 2050 bool blkd; 2051 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2052 struct rcu_node *rnp = rdp->mynode; 2053 2054 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2055 return 0; 2056 2057 blkd = !!(rnp->qsmask & rdp->grpmask); 2058 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, 2059 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 2060 return 0; 2061 } 2062 2063 /* 2064 * All CPUs for the specified rcu_node structure have gone offline, 2065 * and all tasks that were preempted within an RCU read-side critical 2066 * section while running on one of those CPUs have since exited their RCU 2067 * read-side critical section. Some other CPU is reporting this fact with 2068 * the specified rcu_node structure's ->lock held and interrupts disabled. 2069 * This function therefore goes up the tree of rcu_node structures, 2070 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2071 * the leaf rcu_node structure's ->qsmaskinit field has already been 2072 * updated. 2073 * 2074 * This function does check that the specified rcu_node structure has 2075 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2076 * prematurely. That said, invoking it after the fact will cost you 2077 * a needless lock acquisition. So once it has done its work, don't 2078 * invoke it again. 2079 */ 2080 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2081 { 2082 long mask; 2083 struct rcu_node *rnp = rnp_leaf; 2084 2085 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2086 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2087 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2088 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2089 return; 2090 for (;;) { 2091 mask = rnp->grpmask; 2092 rnp = rnp->parent; 2093 if (!rnp) 2094 break; 2095 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2096 rnp->qsmaskinit &= ~mask; 2097 /* Between grace periods, so better already be zero! */ 2098 WARN_ON_ONCE(rnp->qsmask); 2099 if (rnp->qsmaskinit) { 2100 raw_spin_unlock_rcu_node(rnp); 2101 /* irqs remain disabled. */ 2102 return; 2103 } 2104 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2105 } 2106 } 2107 2108 /* 2109 * The CPU has been completely removed, and some other CPU is reporting 2110 * this fact from process context. Do the remainder of the cleanup. 2111 * There can only be one CPU hotplug operation at a time, so no need for 2112 * explicit locking. 2113 */ 2114 int rcutree_dead_cpu(unsigned int cpu) 2115 { 2116 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2117 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2118 2119 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2120 return 0; 2121 2122 /* Adjust any no-longer-needed kthreads. */ 2123 rcu_boost_kthread_setaffinity(rnp, -1); 2124 /* Do any needed no-CB deferred wakeups from this CPU. */ 2125 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); 2126 2127 // Stop-machine done, so allow nohz_full to disable tick. 2128 tick_dep_clear(TICK_DEP_BIT_RCU); 2129 return 0; 2130 } 2131 2132 /* 2133 * Invoke any RCU callbacks that have made it to the end of their grace 2134 * period. Thottle as specified by rdp->blimit. 2135 */ 2136 static void rcu_do_batch(struct rcu_data *rdp) 2137 { 2138 unsigned long flags; 2139 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2140 rcu_segcblist_is_offloaded(&rdp->cblist); 2141 struct rcu_head *rhp; 2142 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2143 long bl, count; 2144 long pending, tlimit = 0; 2145 2146 /* If no callbacks are ready, just return. */ 2147 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2148 trace_rcu_batch_start(rcu_state.name, 2149 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2150 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2151 trace_rcu_batch_end(rcu_state.name, 0, 2152 !rcu_segcblist_empty(&rdp->cblist), 2153 need_resched(), is_idle_task(current), 2154 rcu_is_callbacks_kthread()); 2155 return; 2156 } 2157 2158 /* 2159 * Extract the list of ready callbacks, disabling to prevent 2160 * races with call_rcu() from interrupt handlers. Leave the 2161 * callback counts, as rcu_barrier() needs to be conservative. 2162 */ 2163 local_irq_save(flags); 2164 rcu_nocb_lock(rdp); 2165 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2166 pending = rcu_segcblist_n_cbs(&rdp->cblist); 2167 bl = max(rdp->blimit, pending >> rcu_divisor); 2168 if (unlikely(bl > 100)) 2169 tlimit = local_clock() + rcu_resched_ns; 2170 trace_rcu_batch_start(rcu_state.name, 2171 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2172 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2173 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2174 if (offloaded) 2175 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2176 rcu_nocb_unlock_irqrestore(rdp, flags); 2177 2178 /* Invoke callbacks. */ 2179 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2180 rhp = rcu_cblist_dequeue(&rcl); 2181 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2182 debug_rcu_head_unqueue(rhp); 2183 if (__rcu_reclaim(rcu_state.name, rhp)) 2184 rcu_cblist_dequeued_lazy(&rcl); 2185 /* 2186 * Stop only if limit reached and CPU has something to do. 2187 * Note: The rcl structure counts down from zero. 2188 */ 2189 if (-rcl.len >= bl && !offloaded && 2190 (need_resched() || 2191 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2192 break; 2193 if (unlikely(tlimit)) { 2194 /* only call local_clock() every 32 callbacks */ 2195 if (likely((-rcl.len & 31) || local_clock() < tlimit)) 2196 continue; 2197 /* Exceeded the time limit, so leave. */ 2198 break; 2199 } 2200 if (offloaded) { 2201 WARN_ON_ONCE(in_serving_softirq()); 2202 local_bh_enable(); 2203 lockdep_assert_irqs_enabled(); 2204 cond_resched_tasks_rcu_qs(); 2205 lockdep_assert_irqs_enabled(); 2206 local_bh_disable(); 2207 } 2208 } 2209 2210 local_irq_save(flags); 2211 rcu_nocb_lock(rdp); 2212 count = -rcl.len; 2213 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2214 is_idle_task(current), rcu_is_callbacks_kthread()); 2215 2216 /* Update counts and requeue any remaining callbacks. */ 2217 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2218 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2219 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2220 2221 /* Reinstate batch limit if we have worked down the excess. */ 2222 count = rcu_segcblist_n_cbs(&rdp->cblist); 2223 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2224 rdp->blimit = blimit; 2225 2226 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2227 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2228 rdp->qlen_last_fqs_check = 0; 2229 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2230 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2231 rdp->qlen_last_fqs_check = count; 2232 2233 /* 2234 * The following usually indicates a double call_rcu(). To track 2235 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2236 */ 2237 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); 2238 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2239 count != 0 && rcu_segcblist_empty(&rdp->cblist)); 2240 2241 rcu_nocb_unlock_irqrestore(rdp, flags); 2242 2243 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2244 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) 2245 invoke_rcu_core(); 2246 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2247 } 2248 2249 /* 2250 * This function is invoked from each scheduling-clock interrupt, 2251 * and checks to see if this CPU is in a non-context-switch quiescent 2252 * state, for example, user mode or idle loop. It also schedules RCU 2253 * core processing. If the current grace period has gone on too long, 2254 * it will ask the scheduler to manufacture a context switch for the sole 2255 * purpose of providing a providing the needed quiescent state. 2256 */ 2257 void rcu_sched_clock_irq(int user) 2258 { 2259 trace_rcu_utilization(TPS("Start scheduler-tick")); 2260 raw_cpu_inc(rcu_data.ticks_this_gp); 2261 /* The load-acquire pairs with the store-release setting to true. */ 2262 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2263 /* Idle and userspace execution already are quiescent states. */ 2264 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2265 set_tsk_need_resched(current); 2266 set_preempt_need_resched(); 2267 } 2268 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2269 } 2270 rcu_flavor_sched_clock_irq(user); 2271 if (rcu_pending(user)) 2272 invoke_rcu_core(); 2273 2274 trace_rcu_utilization(TPS("End scheduler-tick")); 2275 } 2276 2277 /* 2278 * Scan the leaf rcu_node structures. For each structure on which all 2279 * CPUs have reported a quiescent state and on which there are tasks 2280 * blocking the current grace period, initiate RCU priority boosting. 2281 * Otherwise, invoke the specified function to check dyntick state for 2282 * each CPU that has not yet reported a quiescent state. 2283 */ 2284 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2285 { 2286 int cpu; 2287 unsigned long flags; 2288 unsigned long mask; 2289 struct rcu_data *rdp; 2290 struct rcu_node *rnp; 2291 2292 rcu_for_each_leaf_node(rnp) { 2293 cond_resched_tasks_rcu_qs(); 2294 mask = 0; 2295 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2296 if (rnp->qsmask == 0) { 2297 if (!IS_ENABLED(CONFIG_PREEMPTION) || 2298 rcu_preempt_blocked_readers_cgp(rnp)) { 2299 /* 2300 * No point in scanning bits because they 2301 * are all zero. But we might need to 2302 * priority-boost blocked readers. 2303 */ 2304 rcu_initiate_boost(rnp, flags); 2305 /* rcu_initiate_boost() releases rnp->lock */ 2306 continue; 2307 } 2308 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2309 continue; 2310 } 2311 for_each_leaf_node_possible_cpu(rnp, cpu) { 2312 unsigned long bit = leaf_node_cpu_bit(rnp, cpu); 2313 if ((rnp->qsmask & bit) != 0) { 2314 rdp = per_cpu_ptr(&rcu_data, cpu); 2315 if (f(rdp)) { 2316 mask |= bit; 2317 rcu_disable_urgency_upon_qs(rdp); 2318 } 2319 } 2320 } 2321 if (mask != 0) { 2322 /* Idle/offline CPUs, report (releases rnp->lock). */ 2323 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2324 } else { 2325 /* Nothing to do here, so just drop the lock. */ 2326 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2327 } 2328 } 2329 } 2330 2331 /* 2332 * Force quiescent states on reluctant CPUs, and also detect which 2333 * CPUs are in dyntick-idle mode. 2334 */ 2335 void rcu_force_quiescent_state(void) 2336 { 2337 unsigned long flags; 2338 bool ret; 2339 struct rcu_node *rnp; 2340 struct rcu_node *rnp_old = NULL; 2341 2342 /* Funnel through hierarchy to reduce memory contention. */ 2343 rnp = __this_cpu_read(rcu_data.mynode); 2344 for (; rnp != NULL; rnp = rnp->parent) { 2345 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2346 !raw_spin_trylock(&rnp->fqslock); 2347 if (rnp_old != NULL) 2348 raw_spin_unlock(&rnp_old->fqslock); 2349 if (ret) 2350 return; 2351 rnp_old = rnp; 2352 } 2353 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2354 2355 /* Reached the root of the rcu_node tree, acquire lock. */ 2356 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2357 raw_spin_unlock(&rnp_old->fqslock); 2358 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2359 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2360 return; /* Someone beat us to it. */ 2361 } 2362 WRITE_ONCE(rcu_state.gp_flags, 2363 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2364 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2365 rcu_gp_kthread_wake(); 2366 } 2367 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2368 2369 /* Perform RCU core processing work for the current CPU. */ 2370 static __latent_entropy void rcu_core(void) 2371 { 2372 unsigned long flags; 2373 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2374 struct rcu_node *rnp = rdp->mynode; 2375 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2376 rcu_segcblist_is_offloaded(&rdp->cblist); 2377 2378 if (cpu_is_offline(smp_processor_id())) 2379 return; 2380 trace_rcu_utilization(TPS("Start RCU core")); 2381 WARN_ON_ONCE(!rdp->beenonline); 2382 2383 /* Report any deferred quiescent states if preemption enabled. */ 2384 if (!(preempt_count() & PREEMPT_MASK)) { 2385 rcu_preempt_deferred_qs(current); 2386 } else if (rcu_preempt_need_deferred_qs(current)) { 2387 set_tsk_need_resched(current); 2388 set_preempt_need_resched(); 2389 } 2390 2391 /* Update RCU state based on any recent quiescent states. */ 2392 rcu_check_quiescent_state(rdp); 2393 2394 /* No grace period and unregistered callbacks? */ 2395 if (!rcu_gp_in_progress() && 2396 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { 2397 local_irq_save(flags); 2398 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2399 rcu_accelerate_cbs_unlocked(rnp, rdp); 2400 local_irq_restore(flags); 2401 } 2402 2403 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2404 2405 /* If there are callbacks ready, invoke them. */ 2406 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && 2407 likely(READ_ONCE(rcu_scheduler_fully_active))) 2408 rcu_do_batch(rdp); 2409 2410 /* Do any needed deferred wakeups of rcuo kthreads. */ 2411 do_nocb_deferred_wakeup(rdp); 2412 trace_rcu_utilization(TPS("End RCU core")); 2413 } 2414 2415 static void rcu_core_si(struct softirq_action *h) 2416 { 2417 rcu_core(); 2418 } 2419 2420 static void rcu_wake_cond(struct task_struct *t, int status) 2421 { 2422 /* 2423 * If the thread is yielding, only wake it when this 2424 * is invoked from idle 2425 */ 2426 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2427 wake_up_process(t); 2428 } 2429 2430 static void invoke_rcu_core_kthread(void) 2431 { 2432 struct task_struct *t; 2433 unsigned long flags; 2434 2435 local_irq_save(flags); 2436 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2437 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2438 if (t != NULL && t != current) 2439 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2440 local_irq_restore(flags); 2441 } 2442 2443 /* 2444 * Wake up this CPU's rcuc kthread to do RCU core processing. 2445 */ 2446 static void invoke_rcu_core(void) 2447 { 2448 if (!cpu_online(smp_processor_id())) 2449 return; 2450 if (use_softirq) 2451 raise_softirq(RCU_SOFTIRQ); 2452 else 2453 invoke_rcu_core_kthread(); 2454 } 2455 2456 static void rcu_cpu_kthread_park(unsigned int cpu) 2457 { 2458 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2459 } 2460 2461 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2462 { 2463 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2464 } 2465 2466 /* 2467 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2468 * the RCU softirq used in configurations of RCU that do not support RCU 2469 * priority boosting. 2470 */ 2471 static void rcu_cpu_kthread(unsigned int cpu) 2472 { 2473 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2474 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2475 int spincnt; 2476 2477 for (spincnt = 0; spincnt < 10; spincnt++) { 2478 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); 2479 local_bh_disable(); 2480 *statusp = RCU_KTHREAD_RUNNING; 2481 local_irq_disable(); 2482 work = *workp; 2483 *workp = 0; 2484 local_irq_enable(); 2485 if (work) 2486 rcu_core(); 2487 local_bh_enable(); 2488 if (*workp == 0) { 2489 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2490 *statusp = RCU_KTHREAD_WAITING; 2491 return; 2492 } 2493 } 2494 *statusp = RCU_KTHREAD_YIELDING; 2495 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2496 schedule_timeout_interruptible(2); 2497 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2498 *statusp = RCU_KTHREAD_WAITING; 2499 } 2500 2501 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2502 .store = &rcu_data.rcu_cpu_kthread_task, 2503 .thread_should_run = rcu_cpu_kthread_should_run, 2504 .thread_fn = rcu_cpu_kthread, 2505 .thread_comm = "rcuc/%u", 2506 .setup = rcu_cpu_kthread_setup, 2507 .park = rcu_cpu_kthread_park, 2508 }; 2509 2510 /* 2511 * Spawn per-CPU RCU core processing kthreads. 2512 */ 2513 static int __init rcu_spawn_core_kthreads(void) 2514 { 2515 int cpu; 2516 2517 for_each_possible_cpu(cpu) 2518 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2519 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq) 2520 return 0; 2521 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2522 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2523 return 0; 2524 } 2525 early_initcall(rcu_spawn_core_kthreads); 2526 2527 /* 2528 * Handle any core-RCU processing required by a call_rcu() invocation. 2529 */ 2530 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2531 unsigned long flags) 2532 { 2533 /* 2534 * If called from an extended quiescent state, invoke the RCU 2535 * core in order to force a re-evaluation of RCU's idleness. 2536 */ 2537 if (!rcu_is_watching()) 2538 invoke_rcu_core(); 2539 2540 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2541 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2542 return; 2543 2544 /* 2545 * Force the grace period if too many callbacks or too long waiting. 2546 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2547 * if some other CPU has recently done so. Also, don't bother 2548 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2549 * is the only one waiting for a grace period to complete. 2550 */ 2551 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2552 rdp->qlen_last_fqs_check + qhimark)) { 2553 2554 /* Are we ignoring a completed grace period? */ 2555 note_gp_changes(rdp); 2556 2557 /* Start a new grace period if one not already started. */ 2558 if (!rcu_gp_in_progress()) { 2559 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2560 } else { 2561 /* Give the grace period a kick. */ 2562 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 2563 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2564 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2565 rcu_force_quiescent_state(); 2566 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2567 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2568 } 2569 } 2570 } 2571 2572 /* 2573 * RCU callback function to leak a callback. 2574 */ 2575 static void rcu_leak_callback(struct rcu_head *rhp) 2576 { 2577 } 2578 2579 /* 2580 * Helper function for call_rcu() and friends. The cpu argument will 2581 * normally be -1, indicating "currently running CPU". It may specify 2582 * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier() 2583 * is expected to specify a CPU. 2584 */ 2585 static void 2586 __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy) 2587 { 2588 unsigned long flags; 2589 struct rcu_data *rdp; 2590 bool was_alldone; 2591 2592 /* Misaligned rcu_head! */ 2593 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2594 2595 if (debug_rcu_head_queue(head)) { 2596 /* 2597 * Probable double call_rcu(), so leak the callback. 2598 * Use rcu:rcu_callback trace event to find the previous 2599 * time callback was passed to __call_rcu(). 2600 */ 2601 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2602 head, head->func); 2603 WRITE_ONCE(head->func, rcu_leak_callback); 2604 return; 2605 } 2606 head->func = func; 2607 head->next = NULL; 2608 local_irq_save(flags); 2609 rdp = this_cpu_ptr(&rcu_data); 2610 2611 /* Add the callback to our list. */ 2612 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 2613 // This can trigger due to call_rcu() from offline CPU: 2614 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 2615 WARN_ON_ONCE(!rcu_is_watching()); 2616 // Very early boot, before rcu_init(). Initialize if needed 2617 // and then drop through to queue the callback. 2618 if (rcu_segcblist_empty(&rdp->cblist)) 2619 rcu_segcblist_init(&rdp->cblist); 2620 } 2621 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) 2622 return; // Enqueued onto ->nocb_bypass, so just leave. 2623 /* If we get here, rcu_nocb_try_bypass() acquired ->nocb_lock. */ 2624 rcu_segcblist_enqueue(&rdp->cblist, head, lazy); 2625 if (__is_kfree_rcu_offset((unsigned long)func)) 2626 trace_rcu_kfree_callback(rcu_state.name, head, 2627 (unsigned long)func, 2628 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2629 rcu_segcblist_n_cbs(&rdp->cblist)); 2630 else 2631 trace_rcu_callback(rcu_state.name, head, 2632 rcu_segcblist_n_lazy_cbs(&rdp->cblist), 2633 rcu_segcblist_n_cbs(&rdp->cblist)); 2634 2635 /* Go handle any RCU core processing required. */ 2636 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2637 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { 2638 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2639 } else { 2640 __call_rcu_core(rdp, head, flags); 2641 local_irq_restore(flags); 2642 } 2643 } 2644 2645 /** 2646 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2647 * @head: structure to be used for queueing the RCU updates. 2648 * @func: actual callback function to be invoked after the grace period 2649 * 2650 * The callback function will be invoked some time after a full grace 2651 * period elapses, in other words after all pre-existing RCU read-side 2652 * critical sections have completed. However, the callback function 2653 * might well execute concurrently with RCU read-side critical sections 2654 * that started after call_rcu() was invoked. RCU read-side critical 2655 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and 2656 * may be nested. In addition, regions of code across which interrupts, 2657 * preemption, or softirqs have been disabled also serve as RCU read-side 2658 * critical sections. This includes hardware interrupt handlers, softirq 2659 * handlers, and NMI handlers. 2660 * 2661 * Note that all CPUs must agree that the grace period extended beyond 2662 * all pre-existing RCU read-side critical section. On systems with more 2663 * than one CPU, this means that when "func()" is invoked, each CPU is 2664 * guaranteed to have executed a full memory barrier since the end of its 2665 * last RCU read-side critical section whose beginning preceded the call 2666 * to call_rcu(). It also means that each CPU executing an RCU read-side 2667 * critical section that continues beyond the start of "func()" must have 2668 * executed a memory barrier after the call_rcu() but before the beginning 2669 * of that RCU read-side critical section. Note that these guarantees 2670 * include CPUs that are offline, idle, or executing in user mode, as 2671 * well as CPUs that are executing in the kernel. 2672 * 2673 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 2674 * resulting RCU callback function "func()", then both CPU A and CPU B are 2675 * guaranteed to execute a full memory barrier during the time interval 2676 * between the call to call_rcu() and the invocation of "func()" -- even 2677 * if CPU A and CPU B are the same CPU (but again only if the system has 2678 * more than one CPU). 2679 */ 2680 void call_rcu(struct rcu_head *head, rcu_callback_t func) 2681 { 2682 __call_rcu(head, func, 0); 2683 } 2684 EXPORT_SYMBOL_GPL(call_rcu); 2685 2686 /* 2687 * Queue an RCU callback for lazy invocation after a grace period. 2688 * This will likely be later named something like "call_rcu_lazy()", 2689 * but this change will require some way of tagging the lazy RCU 2690 * callbacks in the list of pending callbacks. Until then, this 2691 * function may only be called from __kfree_rcu(). 2692 */ 2693 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 2694 { 2695 __call_rcu(head, func, 1); 2696 } 2697 EXPORT_SYMBOL_GPL(kfree_call_rcu); 2698 2699 /* 2700 * During early boot, any blocking grace-period wait automatically 2701 * implies a grace period. Later on, this is never the case for PREEMPT. 2702 * 2703 * Howevr, because a context switch is a grace period for !PREEMPT, any 2704 * blocking grace-period wait automatically implies a grace period if 2705 * there is only one CPU online at any point time during execution of 2706 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 2707 * occasionally incorrectly indicate that there are multiple CPUs online 2708 * when there was in fact only one the whole time, as this just adds some 2709 * overhead: RCU still operates correctly. 2710 */ 2711 static int rcu_blocking_is_gp(void) 2712 { 2713 int ret; 2714 2715 if (IS_ENABLED(CONFIG_PREEMPTION)) 2716 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 2717 might_sleep(); /* Check for RCU read-side critical section. */ 2718 preempt_disable(); 2719 ret = num_online_cpus() <= 1; 2720 preempt_enable(); 2721 return ret; 2722 } 2723 2724 /** 2725 * synchronize_rcu - wait until a grace period has elapsed. 2726 * 2727 * Control will return to the caller some time after a full grace 2728 * period has elapsed, in other words after all currently executing RCU 2729 * read-side critical sections have completed. Note, however, that 2730 * upon return from synchronize_rcu(), the caller might well be executing 2731 * concurrently with new RCU read-side critical sections that began while 2732 * synchronize_rcu() was waiting. RCU read-side critical sections are 2733 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 2734 * In addition, regions of code across which interrupts, preemption, or 2735 * softirqs have been disabled also serve as RCU read-side critical 2736 * sections. This includes hardware interrupt handlers, softirq handlers, 2737 * and NMI handlers. 2738 * 2739 * Note that this guarantee implies further memory-ordering guarantees. 2740 * On systems with more than one CPU, when synchronize_rcu() returns, 2741 * each CPU is guaranteed to have executed a full memory barrier since 2742 * the end of its last RCU read-side critical section whose beginning 2743 * preceded the call to synchronize_rcu(). In addition, each CPU having 2744 * an RCU read-side critical section that extends beyond the return from 2745 * synchronize_rcu() is guaranteed to have executed a full memory barrier 2746 * after the beginning of synchronize_rcu() and before the beginning of 2747 * that RCU read-side critical section. Note that these guarantees include 2748 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 2749 * that are executing in the kernel. 2750 * 2751 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 2752 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 2753 * to have executed a full memory barrier during the execution of 2754 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 2755 * again only if the system has more than one CPU). 2756 */ 2757 void synchronize_rcu(void) 2758 { 2759 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 2760 lock_is_held(&rcu_lock_map) || 2761 lock_is_held(&rcu_sched_lock_map), 2762 "Illegal synchronize_rcu() in RCU read-side critical section"); 2763 if (rcu_blocking_is_gp()) 2764 return; 2765 if (rcu_gp_is_expedited()) 2766 synchronize_rcu_expedited(); 2767 else 2768 wait_rcu_gp(call_rcu); 2769 } 2770 EXPORT_SYMBOL_GPL(synchronize_rcu); 2771 2772 /** 2773 * get_state_synchronize_rcu - Snapshot current RCU state 2774 * 2775 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 2776 * to determine whether or not a full grace period has elapsed in the 2777 * meantime. 2778 */ 2779 unsigned long get_state_synchronize_rcu(void) 2780 { 2781 /* 2782 * Any prior manipulation of RCU-protected data must happen 2783 * before the load from ->gp_seq. 2784 */ 2785 smp_mb(); /* ^^^ */ 2786 return rcu_seq_snap(&rcu_state.gp_seq); 2787 } 2788 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 2789 2790 /** 2791 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 2792 * 2793 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 2794 * 2795 * If a full RCU grace period has elapsed since the earlier call to 2796 * get_state_synchronize_rcu(), just return. Otherwise, invoke 2797 * synchronize_rcu() to wait for a full grace period. 2798 * 2799 * Yes, this function does not take counter wrap into account. But 2800 * counter wrap is harmless. If the counter wraps, we have waited for 2801 * more than 2 billion grace periods (and way more on a 64-bit system!), 2802 * so waiting for one additional grace period should be just fine. 2803 */ 2804 void cond_synchronize_rcu(unsigned long oldstate) 2805 { 2806 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) 2807 synchronize_rcu(); 2808 else 2809 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 2810 } 2811 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 2812 2813 /* 2814 * Check to see if there is any immediate RCU-related work to be done by 2815 * the current CPU, returning 1 if so and zero otherwise. The checks are 2816 * in order of increasing expense: checks that can be carried out against 2817 * CPU-local state are performed first. However, we must check for CPU 2818 * stalls first, else we might not get a chance. 2819 */ 2820 static int rcu_pending(int user) 2821 { 2822 bool gp_in_progress; 2823 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2824 struct rcu_node *rnp = rdp->mynode; 2825 2826 /* Check for CPU stalls, if enabled. */ 2827 check_cpu_stall(rdp); 2828 2829 /* Does this CPU need a deferred NOCB wakeup? */ 2830 if (rcu_nocb_need_deferred_wakeup(rdp)) 2831 return 1; 2832 2833 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 2834 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) 2835 return 0; 2836 2837 /* Is the RCU core waiting for a quiescent state from this CPU? */ 2838 gp_in_progress = rcu_gp_in_progress(); 2839 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 2840 return 1; 2841 2842 /* Does this CPU have callbacks ready to invoke? */ 2843 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2844 return 1; 2845 2846 /* Has RCU gone idle with this CPU needing another grace period? */ 2847 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 2848 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) || 2849 !rcu_segcblist_is_offloaded(&rdp->cblist)) && 2850 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2851 return 1; 2852 2853 /* Have RCU grace period completed or started? */ 2854 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 2855 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 2856 return 1; 2857 2858 /* nothing to do */ 2859 return 0; 2860 } 2861 2862 /* 2863 * Helper function for rcu_barrier() tracing. If tracing is disabled, 2864 * the compiler is expected to optimize this away. 2865 */ 2866 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 2867 { 2868 trace_rcu_barrier(rcu_state.name, s, cpu, 2869 atomic_read(&rcu_state.barrier_cpu_count), done); 2870 } 2871 2872 /* 2873 * RCU callback function for rcu_barrier(). If we are last, wake 2874 * up the task executing rcu_barrier(). 2875 */ 2876 static void rcu_barrier_callback(struct rcu_head *rhp) 2877 { 2878 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 2879 rcu_barrier_trace(TPS("LastCB"), -1, 2880 rcu_state.barrier_sequence); 2881 complete(&rcu_state.barrier_completion); 2882 } else { 2883 rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence); 2884 } 2885 } 2886 2887 /* 2888 * Called with preemption disabled, and from cross-cpu IRQ context. 2889 */ 2890 static void rcu_barrier_func(void *unused) 2891 { 2892 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2893 2894 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 2895 rdp->barrier_head.func = rcu_barrier_callback; 2896 debug_rcu_head_queue(&rdp->barrier_head); 2897 rcu_nocb_lock(rdp); 2898 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); 2899 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { 2900 atomic_inc(&rcu_state.barrier_cpu_count); 2901 } else { 2902 debug_rcu_head_unqueue(&rdp->barrier_head); 2903 rcu_barrier_trace(TPS("IRQNQ"), -1, 2904 rcu_state.barrier_sequence); 2905 } 2906 rcu_nocb_unlock(rdp); 2907 } 2908 2909 /** 2910 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 2911 * 2912 * Note that this primitive does not necessarily wait for an RCU grace period 2913 * to complete. For example, if there are no RCU callbacks queued anywhere 2914 * in the system, then rcu_barrier() is within its rights to return 2915 * immediately, without waiting for anything, much less an RCU grace period. 2916 */ 2917 void rcu_barrier(void) 2918 { 2919 int cpu; 2920 struct rcu_data *rdp; 2921 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 2922 2923 rcu_barrier_trace(TPS("Begin"), -1, s); 2924 2925 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 2926 mutex_lock(&rcu_state.barrier_mutex); 2927 2928 /* Did someone else do our work for us? */ 2929 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 2930 rcu_barrier_trace(TPS("EarlyExit"), -1, 2931 rcu_state.barrier_sequence); 2932 smp_mb(); /* caller's subsequent code after above check. */ 2933 mutex_unlock(&rcu_state.barrier_mutex); 2934 return; 2935 } 2936 2937 /* Mark the start of the barrier operation. */ 2938 rcu_seq_start(&rcu_state.barrier_sequence); 2939 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 2940 2941 /* 2942 * Initialize the count to one rather than to zero in order to 2943 * avoid a too-soon return to zero in case of a short grace period 2944 * (or preemption of this task). Exclude CPU-hotplug operations 2945 * to ensure that no offline CPU has callbacks queued. 2946 */ 2947 init_completion(&rcu_state.barrier_completion); 2948 atomic_set(&rcu_state.barrier_cpu_count, 1); 2949 get_online_cpus(); 2950 2951 /* 2952 * Force each CPU with callbacks to register a new callback. 2953 * When that callback is invoked, we will know that all of the 2954 * corresponding CPU's preceding callbacks have been invoked. 2955 */ 2956 for_each_possible_cpu(cpu) { 2957 rdp = per_cpu_ptr(&rcu_data, cpu); 2958 if (!cpu_online(cpu) && 2959 !rcu_segcblist_is_offloaded(&rdp->cblist)) 2960 continue; 2961 if (rcu_segcblist_n_cbs(&rdp->cblist)) { 2962 rcu_barrier_trace(TPS("OnlineQ"), cpu, 2963 rcu_state.barrier_sequence); 2964 smp_call_function_single(cpu, rcu_barrier_func, NULL, 1); 2965 } else { 2966 rcu_barrier_trace(TPS("OnlineNQ"), cpu, 2967 rcu_state.barrier_sequence); 2968 } 2969 } 2970 put_online_cpus(); 2971 2972 /* 2973 * Now that we have an rcu_barrier_callback() callback on each 2974 * CPU, and thus each counted, remove the initial count. 2975 */ 2976 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) 2977 complete(&rcu_state.barrier_completion); 2978 2979 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 2980 wait_for_completion(&rcu_state.barrier_completion); 2981 2982 /* Mark the end of the barrier operation. */ 2983 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 2984 rcu_seq_end(&rcu_state.barrier_sequence); 2985 2986 /* Other rcu_barrier() invocations can now safely proceed. */ 2987 mutex_unlock(&rcu_state.barrier_mutex); 2988 } 2989 EXPORT_SYMBOL_GPL(rcu_barrier); 2990 2991 /* 2992 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 2993 * first CPU in a given leaf rcu_node structure coming online. The caller 2994 * must hold the corresponding leaf rcu_node ->lock with interrrupts 2995 * disabled. 2996 */ 2997 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 2998 { 2999 long mask; 3000 long oldmask; 3001 struct rcu_node *rnp = rnp_leaf; 3002 3003 raw_lockdep_assert_held_rcu_node(rnp_leaf); 3004 WARN_ON_ONCE(rnp->wait_blkd_tasks); 3005 for (;;) { 3006 mask = rnp->grpmask; 3007 rnp = rnp->parent; 3008 if (rnp == NULL) 3009 return; 3010 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3011 oldmask = rnp->qsmaskinit; 3012 rnp->qsmaskinit |= mask; 3013 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3014 if (oldmask) 3015 return; 3016 } 3017 } 3018 3019 /* 3020 * Do boot-time initialization of a CPU's per-CPU RCU data. 3021 */ 3022 static void __init 3023 rcu_boot_init_percpu_data(int cpu) 3024 { 3025 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3026 3027 /* Set up local state, ensuring consistent view of global state. */ 3028 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3029 WARN_ON_ONCE(rdp->dynticks_nesting != 1); 3030 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); 3031 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 3032 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 3033 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 3034 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 3035 rdp->cpu = cpu; 3036 rcu_boot_init_nocb_percpu_data(rdp); 3037 } 3038 3039 /* 3040 * Invoked early in the CPU-online process, when pretty much all services 3041 * are available. The incoming CPU is not present. 3042 * 3043 * Initializes a CPU's per-CPU RCU data. Note that only one online or 3044 * offline event can be happening at a given time. Note also that we can 3045 * accept some slop in the rsp->gp_seq access due to the fact that this 3046 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 3047 * And any offloaded callbacks are being numbered elsewhere. 3048 */ 3049 int rcutree_prepare_cpu(unsigned int cpu) 3050 { 3051 unsigned long flags; 3052 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3053 struct rcu_node *rnp = rcu_get_root(); 3054 3055 /* Set up local state, ensuring consistent view of global state. */ 3056 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3057 rdp->qlen_last_fqs_check = 0; 3058 rdp->n_force_qs_snap = rcu_state.n_force_qs; 3059 rdp->blimit = blimit; 3060 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 3061 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3062 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 3063 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ 3064 rcu_dynticks_eqs_online(); 3065 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 3066 3067 /* 3068 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 3069 * propagation up the rcu_node tree will happen at the beginning 3070 * of the next grace period. 3071 */ 3072 rnp = rdp->mynode; 3073 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3074 rdp->beenonline = true; /* We have now been online. */ 3075 rdp->gp_seq = rnp->gp_seq; 3076 rdp->gp_seq_needed = rnp->gp_seq; 3077 rdp->cpu_no_qs.b.norm = true; 3078 rdp->core_needs_qs = false; 3079 rdp->rcu_iw_pending = false; 3080 rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; 3081 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 3082 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3083 rcu_prepare_kthreads(cpu); 3084 rcu_spawn_cpu_nocb_kthread(cpu); 3085 3086 return 0; 3087 } 3088 3089 /* 3090 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 3091 */ 3092 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 3093 { 3094 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3095 3096 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 3097 } 3098 3099 /* 3100 * Near the end of the CPU-online process. Pretty much all services 3101 * enabled, and the CPU is now very much alive. 3102 */ 3103 int rcutree_online_cpu(unsigned int cpu) 3104 { 3105 unsigned long flags; 3106 struct rcu_data *rdp; 3107 struct rcu_node *rnp; 3108 3109 rdp = per_cpu_ptr(&rcu_data, cpu); 3110 rnp = rdp->mynode; 3111 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3112 rnp->ffmask |= rdp->grpmask; 3113 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3114 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 3115 return 0; /* Too early in boot for scheduler work. */ 3116 sync_sched_exp_online_cleanup(cpu); 3117 rcutree_affinity_setting(cpu, -1); 3118 3119 // Stop-machine done, so allow nohz_full to disable tick. 3120 tick_dep_clear(TICK_DEP_BIT_RCU); 3121 return 0; 3122 } 3123 3124 /* 3125 * Near the beginning of the process. The CPU is still very much alive 3126 * with pretty much all services enabled. 3127 */ 3128 int rcutree_offline_cpu(unsigned int cpu) 3129 { 3130 unsigned long flags; 3131 struct rcu_data *rdp; 3132 struct rcu_node *rnp; 3133 3134 rdp = per_cpu_ptr(&rcu_data, cpu); 3135 rnp = rdp->mynode; 3136 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3137 rnp->ffmask &= ~rdp->grpmask; 3138 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3139 3140 rcutree_affinity_setting(cpu, cpu); 3141 3142 // nohz_full CPUs need the tick for stop-machine to work quickly 3143 tick_dep_set(TICK_DEP_BIT_RCU); 3144 return 0; 3145 } 3146 3147 static DEFINE_PER_CPU(int, rcu_cpu_started); 3148 3149 /* 3150 * Mark the specified CPU as being online so that subsequent grace periods 3151 * (both expedited and normal) will wait on it. Note that this means that 3152 * incoming CPUs are not allowed to use RCU read-side critical sections 3153 * until this function is called. Failing to observe this restriction 3154 * will result in lockdep splats. 3155 * 3156 * Note that this function is special in that it is invoked directly 3157 * from the incoming CPU rather than from the cpuhp_step mechanism. 3158 * This is because this function must be invoked at a precise location. 3159 */ 3160 void rcu_cpu_starting(unsigned int cpu) 3161 { 3162 unsigned long flags; 3163 unsigned long mask; 3164 int nbits; 3165 unsigned long oldmask; 3166 struct rcu_data *rdp; 3167 struct rcu_node *rnp; 3168 3169 if (per_cpu(rcu_cpu_started, cpu)) 3170 return; 3171 3172 per_cpu(rcu_cpu_started, cpu) = 1; 3173 3174 rdp = per_cpu_ptr(&rcu_data, cpu); 3175 rnp = rdp->mynode; 3176 mask = rdp->grpmask; 3177 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3178 rnp->qsmaskinitnext |= mask; 3179 oldmask = rnp->expmaskinitnext; 3180 rnp->expmaskinitnext |= mask; 3181 oldmask ^= rnp->expmaskinitnext; 3182 nbits = bitmap_weight(&oldmask, BITS_PER_LONG); 3183 /* Allow lockless access for expedited grace periods. */ 3184 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */ 3185 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 3186 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 3187 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 3188 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ 3189 rcu_disable_urgency_upon_qs(rdp); 3190 /* Report QS -after- changing ->qsmaskinitnext! */ 3191 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 3192 } else { 3193 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3194 } 3195 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 3196 } 3197 3198 #ifdef CONFIG_HOTPLUG_CPU 3199 /* 3200 * The outgoing function has no further need of RCU, so remove it from 3201 * the rcu_node tree's ->qsmaskinitnext bit masks. 3202 * 3203 * Note that this function is special in that it is invoked directly 3204 * from the outgoing CPU rather than from the cpuhp_step mechanism. 3205 * This is because this function must be invoked at a precise location. 3206 */ 3207 void rcu_report_dead(unsigned int cpu) 3208 { 3209 unsigned long flags; 3210 unsigned long mask; 3211 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3212 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 3213 3214 /* QS for any half-done expedited grace period. */ 3215 preempt_disable(); 3216 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 3217 preempt_enable(); 3218 rcu_preempt_deferred_qs(current); 3219 3220 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 3221 mask = rdp->grpmask; 3222 raw_spin_lock(&rcu_state.ofl_lock); 3223 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 3224 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 3225 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 3226 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 3227 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 3228 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 3229 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3230 } 3231 rnp->qsmaskinitnext &= ~mask; 3232 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3233 raw_spin_unlock(&rcu_state.ofl_lock); 3234 3235 per_cpu(rcu_cpu_started, cpu) = 0; 3236 } 3237 3238 /* 3239 * The outgoing CPU has just passed through the dying-idle state, and we 3240 * are being invoked from the CPU that was IPIed to continue the offline 3241 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 3242 */ 3243 void rcutree_migrate_callbacks(int cpu) 3244 { 3245 unsigned long flags; 3246 struct rcu_data *my_rdp; 3247 struct rcu_node *my_rnp; 3248 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3249 bool needwake; 3250 3251 if (rcu_segcblist_is_offloaded(&rdp->cblist) || 3252 rcu_segcblist_empty(&rdp->cblist)) 3253 return; /* No callbacks to migrate. */ 3254 3255 local_irq_save(flags); 3256 my_rdp = this_cpu_ptr(&rcu_data); 3257 my_rnp = my_rdp->mynode; 3258 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 3259 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies)); 3260 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 3261 /* Leverage recent GPs and set GP for new callbacks. */ 3262 needwake = rcu_advance_cbs(my_rnp, rdp) || 3263 rcu_advance_cbs(my_rnp, my_rdp); 3264 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 3265 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 3266 rcu_segcblist_disable(&rdp->cblist); 3267 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 3268 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 3269 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) { 3270 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 3271 __call_rcu_nocb_wake(my_rdp, true, flags); 3272 } else { 3273 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 3274 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); 3275 } 3276 if (needwake) 3277 rcu_gp_kthread_wake(); 3278 lockdep_assert_irqs_enabled(); 3279 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 3280 !rcu_segcblist_empty(&rdp->cblist), 3281 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 3282 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 3283 rcu_segcblist_first_cb(&rdp->cblist)); 3284 } 3285 #endif 3286 3287 /* 3288 * On non-huge systems, use expedited RCU grace periods to make suspend 3289 * and hibernation run faster. 3290 */ 3291 static int rcu_pm_notify(struct notifier_block *self, 3292 unsigned long action, void *hcpu) 3293 { 3294 switch (action) { 3295 case PM_HIBERNATION_PREPARE: 3296 case PM_SUSPEND_PREPARE: 3297 rcu_expedite_gp(); 3298 break; 3299 case PM_POST_HIBERNATION: 3300 case PM_POST_SUSPEND: 3301 rcu_unexpedite_gp(); 3302 break; 3303 default: 3304 break; 3305 } 3306 return NOTIFY_OK; 3307 } 3308 3309 /* 3310 * Spawn the kthreads that handle RCU's grace periods. 3311 */ 3312 static int __init rcu_spawn_gp_kthread(void) 3313 { 3314 unsigned long flags; 3315 int kthread_prio_in = kthread_prio; 3316 struct rcu_node *rnp; 3317 struct sched_param sp; 3318 struct task_struct *t; 3319 3320 /* Force priority into range. */ 3321 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 3322 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 3323 kthread_prio = 2; 3324 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 3325 kthread_prio = 1; 3326 else if (kthread_prio < 0) 3327 kthread_prio = 0; 3328 else if (kthread_prio > 99) 3329 kthread_prio = 99; 3330 3331 if (kthread_prio != kthread_prio_in) 3332 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 3333 kthread_prio, kthread_prio_in); 3334 3335 rcu_scheduler_fully_active = 1; 3336 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 3337 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 3338 return 0; 3339 if (kthread_prio) { 3340 sp.sched_priority = kthread_prio; 3341 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 3342 } 3343 rnp = rcu_get_root(); 3344 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3345 rcu_state.gp_kthread = t; 3346 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3347 wake_up_process(t); 3348 rcu_spawn_nocb_kthreads(); 3349 rcu_spawn_boost_kthreads(); 3350 return 0; 3351 } 3352 early_initcall(rcu_spawn_gp_kthread); 3353 3354 /* 3355 * This function is invoked towards the end of the scheduler's 3356 * initialization process. Before this is called, the idle task might 3357 * contain synchronous grace-period primitives (during which time, this idle 3358 * task is booting the system, and such primitives are no-ops). After this 3359 * function is called, any synchronous grace-period primitives are run as 3360 * expedited, with the requesting task driving the grace period forward. 3361 * A later core_initcall() rcu_set_runtime_mode() will switch to full 3362 * runtime RCU functionality. 3363 */ 3364 void rcu_scheduler_starting(void) 3365 { 3366 WARN_ON(num_online_cpus() != 1); 3367 WARN_ON(nr_context_switches() > 0); 3368 rcu_test_sync_prims(); 3369 rcu_scheduler_active = RCU_SCHEDULER_INIT; 3370 rcu_test_sync_prims(); 3371 } 3372 3373 /* 3374 * Helper function for rcu_init() that initializes the rcu_state structure. 3375 */ 3376 static void __init rcu_init_one(void) 3377 { 3378 static const char * const buf[] = RCU_NODE_NAME_INIT; 3379 static const char * const fqs[] = RCU_FQS_NAME_INIT; 3380 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 3381 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 3382 3383 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 3384 int cpustride = 1; 3385 int i; 3386 int j; 3387 struct rcu_node *rnp; 3388 3389 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 3390 3391 /* Silence gcc 4.8 false positive about array index out of range. */ 3392 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 3393 panic("rcu_init_one: rcu_num_lvls out of range"); 3394 3395 /* Initialize the level-tracking arrays. */ 3396 3397 for (i = 1; i < rcu_num_lvls; i++) 3398 rcu_state.level[i] = 3399 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 3400 rcu_init_levelspread(levelspread, num_rcu_lvl); 3401 3402 /* Initialize the elements themselves, starting from the leaves. */ 3403 3404 for (i = rcu_num_lvls - 1; i >= 0; i--) { 3405 cpustride *= levelspread[i]; 3406 rnp = rcu_state.level[i]; 3407 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 3408 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 3409 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 3410 &rcu_node_class[i], buf[i]); 3411 raw_spin_lock_init(&rnp->fqslock); 3412 lockdep_set_class_and_name(&rnp->fqslock, 3413 &rcu_fqs_class[i], fqs[i]); 3414 rnp->gp_seq = rcu_state.gp_seq; 3415 rnp->gp_seq_needed = rcu_state.gp_seq; 3416 rnp->completedqs = rcu_state.gp_seq; 3417 rnp->qsmask = 0; 3418 rnp->qsmaskinit = 0; 3419 rnp->grplo = j * cpustride; 3420 rnp->grphi = (j + 1) * cpustride - 1; 3421 if (rnp->grphi >= nr_cpu_ids) 3422 rnp->grphi = nr_cpu_ids - 1; 3423 if (i == 0) { 3424 rnp->grpnum = 0; 3425 rnp->grpmask = 0; 3426 rnp->parent = NULL; 3427 } else { 3428 rnp->grpnum = j % levelspread[i - 1]; 3429 rnp->grpmask = BIT(rnp->grpnum); 3430 rnp->parent = rcu_state.level[i - 1] + 3431 j / levelspread[i - 1]; 3432 } 3433 rnp->level = i; 3434 INIT_LIST_HEAD(&rnp->blkd_tasks); 3435 rcu_init_one_nocb(rnp); 3436 init_waitqueue_head(&rnp->exp_wq[0]); 3437 init_waitqueue_head(&rnp->exp_wq[1]); 3438 init_waitqueue_head(&rnp->exp_wq[2]); 3439 init_waitqueue_head(&rnp->exp_wq[3]); 3440 spin_lock_init(&rnp->exp_lock); 3441 } 3442 } 3443 3444 init_swait_queue_head(&rcu_state.gp_wq); 3445 init_swait_queue_head(&rcu_state.expedited_wq); 3446 rnp = rcu_first_leaf_node(); 3447 for_each_possible_cpu(i) { 3448 while (i > rnp->grphi) 3449 rnp++; 3450 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 3451 rcu_boot_init_percpu_data(i); 3452 } 3453 } 3454 3455 /* 3456 * Compute the rcu_node tree geometry from kernel parameters. This cannot 3457 * replace the definitions in tree.h because those are needed to size 3458 * the ->node array in the rcu_state structure. 3459 */ 3460 static void __init rcu_init_geometry(void) 3461 { 3462 ulong d; 3463 int i; 3464 int rcu_capacity[RCU_NUM_LVLS]; 3465 3466 /* 3467 * Initialize any unspecified boot parameters. 3468 * The default values of jiffies_till_first_fqs and 3469 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 3470 * value, which is a function of HZ, then adding one for each 3471 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 3472 */ 3473 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 3474 if (jiffies_till_first_fqs == ULONG_MAX) 3475 jiffies_till_first_fqs = d; 3476 if (jiffies_till_next_fqs == ULONG_MAX) 3477 jiffies_till_next_fqs = d; 3478 adjust_jiffies_till_sched_qs(); 3479 3480 /* If the compile-time values are accurate, just leave. */ 3481 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 3482 nr_cpu_ids == NR_CPUS) 3483 return; 3484 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 3485 rcu_fanout_leaf, nr_cpu_ids); 3486 3487 /* 3488 * The boot-time rcu_fanout_leaf parameter must be at least two 3489 * and cannot exceed the number of bits in the rcu_node masks. 3490 * Complain and fall back to the compile-time values if this 3491 * limit is exceeded. 3492 */ 3493 if (rcu_fanout_leaf < 2 || 3494 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 3495 rcu_fanout_leaf = RCU_FANOUT_LEAF; 3496 WARN_ON(1); 3497 return; 3498 } 3499 3500 /* 3501 * Compute number of nodes that can be handled an rcu_node tree 3502 * with the given number of levels. 3503 */ 3504 rcu_capacity[0] = rcu_fanout_leaf; 3505 for (i = 1; i < RCU_NUM_LVLS; i++) 3506 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 3507 3508 /* 3509 * The tree must be able to accommodate the configured number of CPUs. 3510 * If this limit is exceeded, fall back to the compile-time values. 3511 */ 3512 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 3513 rcu_fanout_leaf = RCU_FANOUT_LEAF; 3514 WARN_ON(1); 3515 return; 3516 } 3517 3518 /* Calculate the number of levels in the tree. */ 3519 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 3520 } 3521 rcu_num_lvls = i + 1; 3522 3523 /* Calculate the number of rcu_nodes at each level of the tree. */ 3524 for (i = 0; i < rcu_num_lvls; i++) { 3525 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 3526 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 3527 } 3528 3529 /* Calculate the total number of rcu_node structures. */ 3530 rcu_num_nodes = 0; 3531 for (i = 0; i < rcu_num_lvls; i++) 3532 rcu_num_nodes += num_rcu_lvl[i]; 3533 } 3534 3535 /* 3536 * Dump out the structure of the rcu_node combining tree associated 3537 * with the rcu_state structure. 3538 */ 3539 static void __init rcu_dump_rcu_node_tree(void) 3540 { 3541 int level = 0; 3542 struct rcu_node *rnp; 3543 3544 pr_info("rcu_node tree layout dump\n"); 3545 pr_info(" "); 3546 rcu_for_each_node_breadth_first(rnp) { 3547 if (rnp->level != level) { 3548 pr_cont("\n"); 3549 pr_info(" "); 3550 level = rnp->level; 3551 } 3552 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 3553 } 3554 pr_cont("\n"); 3555 } 3556 3557 struct workqueue_struct *rcu_gp_wq; 3558 struct workqueue_struct *rcu_par_gp_wq; 3559 3560 void __init rcu_init(void) 3561 { 3562 int cpu; 3563 3564 rcu_early_boot_tests(); 3565 3566 rcu_bootup_announce(); 3567 rcu_init_geometry(); 3568 rcu_init_one(); 3569 if (dump_tree) 3570 rcu_dump_rcu_node_tree(); 3571 if (use_softirq) 3572 open_softirq(RCU_SOFTIRQ, rcu_core_si); 3573 3574 /* 3575 * We don't need protection against CPU-hotplug here because 3576 * this is called early in boot, before either interrupts 3577 * or the scheduler are operational. 3578 */ 3579 pm_notifier(rcu_pm_notify, 0); 3580 for_each_online_cpu(cpu) { 3581 rcutree_prepare_cpu(cpu); 3582 rcu_cpu_starting(cpu); 3583 rcutree_online_cpu(cpu); 3584 } 3585 3586 /* Create workqueue for expedited GPs and for Tree SRCU. */ 3587 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 3588 WARN_ON(!rcu_gp_wq); 3589 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 3590 WARN_ON(!rcu_par_gp_wq); 3591 srcu_init(); 3592 } 3593 3594 #include "tree_stall.h" 3595 #include "tree_exp.h" 3596 #include "tree_plugin.h" 3597