1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/percpu.h> 36 #include <linux/notifier.h> 37 #include <linux/cpu.h> 38 #include <linux/mutex.h> 39 #include <linux/time.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/wait.h> 42 #include <linux/kthread.h> 43 #include <uapi/linux/sched/types.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 #include <linux/random.h> 47 #include <linux/trace_events.h> 48 #include <linux/suspend.h> 49 #include <linux/ftrace.h> 50 #include <linux/tick.h> 51 #include <linux/sysrq.h> 52 #include <linux/kprobes.h> 53 #include <linux/gfp.h> 54 #include <linux/oom.h> 55 #include <linux/smpboot.h> 56 #include <linux/jiffies.h> 57 #include <linux/slab.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/sched/clock.h> 60 #include "../time/tick-internal.h" 61 62 #include "tree.h" 63 #include "rcu.h" 64 65 #ifdef MODULE_PARAM_PREFIX 66 #undef MODULE_PARAM_PREFIX 67 #endif 68 #define MODULE_PARAM_PREFIX "rcutree." 69 70 #ifndef data_race 71 #define data_race(expr) \ 72 ({ \ 73 expr; \ 74 }) 75 #endif 76 #ifndef ASSERT_EXCLUSIVE_WRITER 77 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0) 78 #endif 79 #ifndef ASSERT_EXCLUSIVE_ACCESS 80 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0) 81 #endif 82 83 /* Data structures. */ 84 85 /* 86 * Steal a bit from the bottom of ->dynticks for idle entry/exit 87 * control. Initially this is for TLB flushing. 88 */ 89 #define RCU_DYNTICK_CTRL_MASK 0x1 90 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 91 92 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 93 .dynticks_nesting = 1, 94 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 95 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 96 }; 97 static struct rcu_state rcu_state = { 98 .level = { &rcu_state.node[0] }, 99 .gp_state = RCU_GP_IDLE, 100 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 101 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 102 .name = RCU_NAME, 103 .abbr = RCU_ABBR, 104 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 105 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 106 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 107 }; 108 109 /* Dump rcu_node combining tree at boot to verify correct setup. */ 110 static bool dump_tree; 111 module_param(dump_tree, bool, 0444); 112 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 113 static bool use_softirq = true; 114 module_param(use_softirq, bool, 0444); 115 /* Control rcu_node-tree auto-balancing at boot time. */ 116 static bool rcu_fanout_exact; 117 module_param(rcu_fanout_exact, bool, 0444); 118 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 119 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 120 module_param(rcu_fanout_leaf, int, 0444); 121 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 122 /* Number of rcu_nodes at specified level. */ 123 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 124 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 125 126 /* 127 * The rcu_scheduler_active variable is initialized to the value 128 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 129 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 130 * RCU can assume that there is but one task, allowing RCU to (for example) 131 * optimize synchronize_rcu() to a simple barrier(). When this variable 132 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 133 * to detect real grace periods. This variable is also used to suppress 134 * boot-time false positives from lockdep-RCU error checking. Finally, it 135 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 136 * is fully initialized, including all of its kthreads having been spawned. 137 */ 138 int rcu_scheduler_active __read_mostly; 139 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 140 141 /* 142 * The rcu_scheduler_fully_active variable transitions from zero to one 143 * during the early_initcall() processing, which is after the scheduler 144 * is capable of creating new tasks. So RCU processing (for example, 145 * creating tasks for RCU priority boosting) must be delayed until after 146 * rcu_scheduler_fully_active transitions from zero to one. We also 147 * currently delay invocation of any RCU callbacks until after this point. 148 * 149 * It might later prove better for people registering RCU callbacks during 150 * early boot to take responsibility for these callbacks, but one step at 151 * a time. 152 */ 153 static int rcu_scheduler_fully_active __read_mostly; 154 155 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 156 unsigned long gps, unsigned long flags); 157 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 158 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 159 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 160 static void invoke_rcu_core(void); 161 static void rcu_report_exp_rdp(struct rcu_data *rdp); 162 static void sync_sched_exp_online_cleanup(int cpu); 163 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 164 165 /* rcuc/rcub kthread realtime priority */ 166 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 167 module_param(kthread_prio, int, 0444); 168 169 /* Delay in jiffies for grace-period initialization delays, debug only. */ 170 171 static int gp_preinit_delay; 172 module_param(gp_preinit_delay, int, 0444); 173 static int gp_init_delay; 174 module_param(gp_init_delay, int, 0444); 175 static int gp_cleanup_delay; 176 module_param(gp_cleanup_delay, int, 0444); 177 178 /* Retrieve RCU kthreads priority for rcutorture */ 179 int rcu_get_gp_kthreads_prio(void) 180 { 181 return kthread_prio; 182 } 183 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 184 185 /* 186 * Number of grace periods between delays, normalized by the duration of 187 * the delay. The longer the delay, the more the grace periods between 188 * each delay. The reason for this normalization is that it means that, 189 * for non-zero delays, the overall slowdown of grace periods is constant 190 * regardless of the duration of the delay. This arrangement balances 191 * the need for long delays to increase some race probabilities with the 192 * need for fast grace periods to increase other race probabilities. 193 */ 194 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 195 196 /* 197 * Compute the mask of online CPUs for the specified rcu_node structure. 198 * This will not be stable unless the rcu_node structure's ->lock is 199 * held, but the bit corresponding to the current CPU will be stable 200 * in most contexts. 201 */ 202 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 203 { 204 return READ_ONCE(rnp->qsmaskinitnext); 205 } 206 207 /* 208 * Return true if an RCU grace period is in progress. The READ_ONCE()s 209 * permit this function to be invoked without holding the root rcu_node 210 * structure's ->lock, but of course results can be subject to change. 211 */ 212 static int rcu_gp_in_progress(void) 213 { 214 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 215 } 216 217 /* 218 * Return the number of callbacks queued on the specified CPU. 219 * Handles both the nocbs and normal cases. 220 */ 221 static long rcu_get_n_cbs_cpu(int cpu) 222 { 223 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 224 225 if (rcu_segcblist_is_enabled(&rdp->cblist)) 226 return rcu_segcblist_n_cbs(&rdp->cblist); 227 return 0; 228 } 229 230 void rcu_softirq_qs(void) 231 { 232 rcu_qs(); 233 rcu_preempt_deferred_qs(current); 234 } 235 236 /* 237 * Record entry into an extended quiescent state. This is only to be 238 * called when not already in an extended quiescent state, that is, 239 * RCU is watching prior to the call to this function and is no longer 240 * watching upon return. 241 */ 242 static noinstr void rcu_dynticks_eqs_enter(void) 243 { 244 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 245 int seq; 246 247 /* 248 * CPUs seeing atomic_add_return() must see prior RCU read-side 249 * critical sections, and we also must force ordering with the 250 * next idle sojourn. 251 */ 252 rcu_dynticks_task_trace_enter(); // Before ->dynticks update! 253 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 254 // RCU is no longer watching. Better be in extended quiescent state! 255 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 256 (seq & RCU_DYNTICK_CTRL_CTR)); 257 /* Better not have special action (TLB flush) pending! */ 258 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 259 (seq & RCU_DYNTICK_CTRL_MASK)); 260 } 261 262 /* 263 * Record exit from an extended quiescent state. This is only to be 264 * called from an extended quiescent state, that is, RCU is not watching 265 * prior to the call to this function and is watching upon return. 266 */ 267 static noinstr void rcu_dynticks_eqs_exit(void) 268 { 269 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 270 int seq; 271 272 /* 273 * CPUs seeing atomic_add_return() must see prior idle sojourns, 274 * and we also must force ordering with the next RCU read-side 275 * critical section. 276 */ 277 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 278 // RCU is now watching. Better not be in an extended quiescent state! 279 rcu_dynticks_task_trace_exit(); // After ->dynticks update! 280 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 281 !(seq & RCU_DYNTICK_CTRL_CTR)); 282 if (seq & RCU_DYNTICK_CTRL_MASK) { 283 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); 284 smp_mb__after_atomic(); /* _exit after clearing mask. */ 285 } 286 } 287 288 /* 289 * Reset the current CPU's ->dynticks counter to indicate that the 290 * newly onlined CPU is no longer in an extended quiescent state. 291 * This will either leave the counter unchanged, or increment it 292 * to the next non-quiescent value. 293 * 294 * The non-atomic test/increment sequence works because the upper bits 295 * of the ->dynticks counter are manipulated only by the corresponding CPU, 296 * or when the corresponding CPU is offline. 297 */ 298 static void rcu_dynticks_eqs_online(void) 299 { 300 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 301 302 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) 303 return; 304 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 305 } 306 307 /* 308 * Is the current CPU in an extended quiescent state? 309 * 310 * No ordering, as we are sampling CPU-local information. 311 */ 312 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) 313 { 314 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 315 316 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); 317 } 318 319 /* 320 * Snapshot the ->dynticks counter with full ordering so as to allow 321 * stable comparison of this counter with past and future snapshots. 322 */ 323 static int rcu_dynticks_snap(struct rcu_data *rdp) 324 { 325 int snap = atomic_add_return(0, &rdp->dynticks); 326 327 return snap & ~RCU_DYNTICK_CTRL_MASK; 328 } 329 330 /* 331 * Return true if the snapshot returned from rcu_dynticks_snap() 332 * indicates that RCU is in an extended quiescent state. 333 */ 334 static bool rcu_dynticks_in_eqs(int snap) 335 { 336 return !(snap & RCU_DYNTICK_CTRL_CTR); 337 } 338 339 /* 340 * Return true if the CPU corresponding to the specified rcu_data 341 * structure has spent some time in an extended quiescent state since 342 * rcu_dynticks_snap() returned the specified snapshot. 343 */ 344 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 345 { 346 return snap != rcu_dynticks_snap(rdp); 347 } 348 349 /* 350 * Return true if the referenced integer is zero while the specified 351 * CPU remains within a single extended quiescent state. 352 */ 353 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) 354 { 355 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 356 int snap; 357 358 // If not quiescent, force back to earlier extended quiescent state. 359 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK | 360 RCU_DYNTICK_CTRL_CTR); 361 362 smp_rmb(); // Order ->dynticks and *vp reads. 363 if (READ_ONCE(*vp)) 364 return false; // Non-zero, so report failure; 365 smp_rmb(); // Order *vp read and ->dynticks re-read. 366 367 // If still in the same extended quiescent state, we are good! 368 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK); 369 } 370 371 /* 372 * Set the special (bottom) bit of the specified CPU so that it 373 * will take special action (such as flushing its TLB) on the 374 * next exit from an extended quiescent state. Returns true if 375 * the bit was successfully set, or false if the CPU was not in 376 * an extended quiescent state. 377 */ 378 bool rcu_eqs_special_set(int cpu) 379 { 380 int old; 381 int new; 382 int new_old; 383 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 384 385 new_old = atomic_read(&rdp->dynticks); 386 do { 387 old = new_old; 388 if (old & RCU_DYNTICK_CTRL_CTR) 389 return false; 390 new = old | RCU_DYNTICK_CTRL_MASK; 391 new_old = atomic_cmpxchg(&rdp->dynticks, old, new); 392 } while (new_old != old); 393 return true; 394 } 395 396 /* 397 * Let the RCU core know that this CPU has gone through the scheduler, 398 * which is a quiescent state. This is called when the need for a 399 * quiescent state is urgent, so we burn an atomic operation and full 400 * memory barriers to let the RCU core know about it, regardless of what 401 * this CPU might (or might not) do in the near future. 402 * 403 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 404 * 405 * The caller must have disabled interrupts and must not be idle. 406 */ 407 void rcu_momentary_dyntick_idle(void) 408 { 409 int special; 410 411 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 412 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 413 &this_cpu_ptr(&rcu_data)->dynticks); 414 /* It is illegal to call this from idle state. */ 415 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 416 rcu_preempt_deferred_qs(current); 417 } 418 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 419 420 /** 421 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 422 * 423 * If the current CPU is idle and running at a first-level (not nested) 424 * interrupt, or directly, from idle, return true. 425 * 426 * The caller must have at least disabled IRQs. 427 */ 428 static int rcu_is_cpu_rrupt_from_idle(void) 429 { 430 long nesting; 431 432 /* 433 * Usually called from the tick; but also used from smp_function_call() 434 * for expedited grace periods. This latter can result in running from 435 * the idle task, instead of an actual IPI. 436 */ 437 lockdep_assert_irqs_disabled(); 438 439 /* Check for counter underflows */ 440 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, 441 "RCU dynticks_nesting counter underflow!"); 442 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, 443 "RCU dynticks_nmi_nesting counter underflow/zero!"); 444 445 /* Are we at first interrupt nesting level? */ 446 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting); 447 if (nesting > 1) 448 return false; 449 450 /* 451 * If we're not in an interrupt, we must be in the idle task! 452 */ 453 WARN_ON_ONCE(!nesting && !is_idle_task(current)); 454 455 /* Does CPU appear to be idle from an RCU standpoint? */ 456 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; 457 } 458 459 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */ 460 #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */ 461 static long blimit = DEFAULT_RCU_BLIMIT; 462 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ 463 static long qhimark = DEFAULT_RCU_QHIMARK; 464 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ 465 static long qlowmark = DEFAULT_RCU_QLOMARK; 466 #define DEFAULT_RCU_QOVLD_MULT 2 467 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 468 static long qovld = DEFAULT_RCU_QOVLD; /* If this many pending, hammer QS. */ 469 static long qovld_calc = -1; /* No pre-initialization lock acquisitions! */ 470 471 module_param(blimit, long, 0444); 472 module_param(qhimark, long, 0444); 473 module_param(qlowmark, long, 0444); 474 module_param(qovld, long, 0444); 475 476 static ulong jiffies_till_first_fqs = ULONG_MAX; 477 static ulong jiffies_till_next_fqs = ULONG_MAX; 478 static bool rcu_kick_kthreads; 479 static int rcu_divisor = 7; 480 module_param(rcu_divisor, int, 0644); 481 482 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 483 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 484 module_param(rcu_resched_ns, long, 0644); 485 486 /* 487 * How long the grace period must be before we start recruiting 488 * quiescent-state help from rcu_note_context_switch(). 489 */ 490 static ulong jiffies_till_sched_qs = ULONG_MAX; 491 module_param(jiffies_till_sched_qs, ulong, 0444); 492 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 493 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 494 495 /* 496 * Make sure that we give the grace-period kthread time to detect any 497 * idle CPUs before taking active measures to force quiescent states. 498 * However, don't go below 100 milliseconds, adjusted upwards for really 499 * large systems. 500 */ 501 static void adjust_jiffies_till_sched_qs(void) 502 { 503 unsigned long j; 504 505 /* If jiffies_till_sched_qs was specified, respect the request. */ 506 if (jiffies_till_sched_qs != ULONG_MAX) { 507 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 508 return; 509 } 510 /* Otherwise, set to third fqs scan, but bound below on large system. */ 511 j = READ_ONCE(jiffies_till_first_fqs) + 512 2 * READ_ONCE(jiffies_till_next_fqs); 513 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 514 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 515 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 516 WRITE_ONCE(jiffies_to_sched_qs, j); 517 } 518 519 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 520 { 521 ulong j; 522 int ret = kstrtoul(val, 0, &j); 523 524 if (!ret) { 525 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 526 adjust_jiffies_till_sched_qs(); 527 } 528 return ret; 529 } 530 531 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 532 { 533 ulong j; 534 int ret = kstrtoul(val, 0, &j); 535 536 if (!ret) { 537 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 538 adjust_jiffies_till_sched_qs(); 539 } 540 return ret; 541 } 542 543 static struct kernel_param_ops first_fqs_jiffies_ops = { 544 .set = param_set_first_fqs_jiffies, 545 .get = param_get_ulong, 546 }; 547 548 static struct kernel_param_ops next_fqs_jiffies_ops = { 549 .set = param_set_next_fqs_jiffies, 550 .get = param_get_ulong, 551 }; 552 553 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 554 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 555 module_param(rcu_kick_kthreads, bool, 0644); 556 557 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 558 static int rcu_pending(int user); 559 560 /* 561 * Return the number of RCU GPs completed thus far for debug & stats. 562 */ 563 unsigned long rcu_get_gp_seq(void) 564 { 565 return READ_ONCE(rcu_state.gp_seq); 566 } 567 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 568 569 /* 570 * Return the number of RCU expedited batches completed thus far for 571 * debug & stats. Odd numbers mean that a batch is in progress, even 572 * numbers mean idle. The value returned will thus be roughly double 573 * the cumulative batches since boot. 574 */ 575 unsigned long rcu_exp_batches_completed(void) 576 { 577 return rcu_state.expedited_sequence; 578 } 579 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 580 581 /* 582 * Return the root node of the rcu_state structure. 583 */ 584 static struct rcu_node *rcu_get_root(void) 585 { 586 return &rcu_state.node[0]; 587 } 588 589 /* 590 * Send along grace-period-related data for rcutorture diagnostics. 591 */ 592 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 593 unsigned long *gp_seq) 594 { 595 switch (test_type) { 596 case RCU_FLAVOR: 597 *flags = READ_ONCE(rcu_state.gp_flags); 598 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 599 break; 600 default: 601 break; 602 } 603 } 604 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 605 606 /* 607 * Enter an RCU extended quiescent state, which can be either the 608 * idle loop or adaptive-tickless usermode execution. 609 * 610 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 611 * the possibility of usermode upcalls having messed up our count 612 * of interrupt nesting level during the prior busy period. 613 */ 614 static noinstr void rcu_eqs_enter(bool user) 615 { 616 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 617 618 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); 619 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); 620 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 621 rdp->dynticks_nesting == 0); 622 if (rdp->dynticks_nesting != 1) { 623 // RCU will still be watching, so just do accounting and leave. 624 rdp->dynticks_nesting--; 625 return; 626 } 627 628 lockdep_assert_irqs_disabled(); 629 instrumentation_begin(); 630 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); 631 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 632 rdp = this_cpu_ptr(&rcu_data); 633 do_nocb_deferred_wakeup(rdp); 634 rcu_prepare_for_idle(); 635 rcu_preempt_deferred_qs(current); 636 637 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 638 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 639 640 instrumentation_end(); 641 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 642 // RCU is watching here ... 643 rcu_dynticks_eqs_enter(); 644 // ... but is no longer watching here. 645 rcu_dynticks_task_enter(); 646 } 647 648 /** 649 * rcu_idle_enter - inform RCU that current CPU is entering idle 650 * 651 * Enter idle mode, in other words, -leave- the mode in which RCU 652 * read-side critical sections can occur. (Though RCU read-side 653 * critical sections can occur in irq handlers in idle, a possibility 654 * handled by irq_enter() and irq_exit().) 655 * 656 * If you add or remove a call to rcu_idle_enter(), be sure to test with 657 * CONFIG_RCU_EQS_DEBUG=y. 658 */ 659 void rcu_idle_enter(void) 660 { 661 lockdep_assert_irqs_disabled(); 662 rcu_eqs_enter(false); 663 } 664 665 #ifdef CONFIG_NO_HZ_FULL 666 /** 667 * rcu_user_enter - inform RCU that we are resuming userspace. 668 * 669 * Enter RCU idle mode right before resuming userspace. No use of RCU 670 * is permitted between this call and rcu_user_exit(). This way the 671 * CPU doesn't need to maintain the tick for RCU maintenance purposes 672 * when the CPU runs in userspace. 673 * 674 * If you add or remove a call to rcu_user_enter(), be sure to test with 675 * CONFIG_RCU_EQS_DEBUG=y. 676 */ 677 noinstr void rcu_user_enter(void) 678 { 679 lockdep_assert_irqs_disabled(); 680 rcu_eqs_enter(true); 681 } 682 #endif /* CONFIG_NO_HZ_FULL */ 683 684 /** 685 * rcu_nmi_exit - inform RCU of exit from NMI context 686 * 687 * If we are returning from the outermost NMI handler that interrupted an 688 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting 689 * to let the RCU grace-period handling know that the CPU is back to 690 * being RCU-idle. 691 * 692 * If you add or remove a call to rcu_nmi_exit(), be sure to test 693 * with CONFIG_RCU_EQS_DEBUG=y. 694 */ 695 noinstr void rcu_nmi_exit(void) 696 { 697 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 698 699 instrumentation_begin(); 700 /* 701 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 702 * (We are exiting an NMI handler, so RCU better be paying attention 703 * to us!) 704 */ 705 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); 706 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 707 708 /* 709 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 710 * leave it in non-RCU-idle state. 711 */ 712 if (rdp->dynticks_nmi_nesting != 1) { 713 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, 714 atomic_read(&rdp->dynticks)); 715 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ 716 rdp->dynticks_nmi_nesting - 2); 717 instrumentation_end(); 718 return; 719 } 720 721 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 722 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); 723 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 724 725 if (!in_nmi()) 726 rcu_prepare_for_idle(); 727 728 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 729 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 730 instrumentation_end(); 731 732 // RCU is watching here ... 733 rcu_dynticks_eqs_enter(); 734 // ... but is no longer watching here. 735 736 if (!in_nmi()) 737 rcu_dynticks_task_enter(); 738 } 739 740 /** 741 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 742 * 743 * Exit from an interrupt handler, which might possibly result in entering 744 * idle mode, in other words, leaving the mode in which read-side critical 745 * sections can occur. The caller must have disabled interrupts. 746 * 747 * This code assumes that the idle loop never does anything that might 748 * result in unbalanced calls to irq_enter() and irq_exit(). If your 749 * architecture's idle loop violates this assumption, RCU will give you what 750 * you deserve, good and hard. But very infrequently and irreproducibly. 751 * 752 * Use things like work queues to work around this limitation. 753 * 754 * You have been warned. 755 * 756 * If you add or remove a call to rcu_irq_exit(), be sure to test with 757 * CONFIG_RCU_EQS_DEBUG=y. 758 */ 759 void noinstr rcu_irq_exit(void) 760 { 761 lockdep_assert_irqs_disabled(); 762 rcu_nmi_exit(); 763 } 764 765 /** 766 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq 767 * towards in kernel preemption 768 * 769 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe 770 * from RCU point of view. Invoked from return from interrupt before kernel 771 * preemption. 772 */ 773 void rcu_irq_exit_preempt(void) 774 { 775 lockdep_assert_irqs_disabled(); 776 rcu_nmi_exit(); 777 778 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 779 "RCU dynticks_nesting counter underflow/zero!"); 780 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 781 DYNTICK_IRQ_NONIDLE, 782 "Bad RCU dynticks_nmi_nesting counter\n"); 783 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 784 "RCU in extended quiescent state!"); 785 } 786 787 #ifdef CONFIG_PROVE_RCU 788 /** 789 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 790 */ 791 void rcu_irq_exit_check_preempt(void) 792 { 793 lockdep_assert_irqs_disabled(); 794 795 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 796 "RCU dynticks_nesting counter underflow/zero!"); 797 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 798 DYNTICK_IRQ_NONIDLE, 799 "Bad RCU dynticks_nmi_nesting counter\n"); 800 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 801 "RCU in extended quiescent state!"); 802 } 803 #endif /* #ifdef CONFIG_PROVE_RCU */ 804 805 /* 806 * Wrapper for rcu_irq_exit() where interrupts are enabled. 807 * 808 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 809 * with CONFIG_RCU_EQS_DEBUG=y. 810 */ 811 void rcu_irq_exit_irqson(void) 812 { 813 unsigned long flags; 814 815 local_irq_save(flags); 816 rcu_irq_exit(); 817 local_irq_restore(flags); 818 } 819 820 /* 821 * Exit an RCU extended quiescent state, which can be either the 822 * idle loop or adaptive-tickless usermode execution. 823 * 824 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 825 * allow for the possibility of usermode upcalls messing up our count of 826 * interrupt nesting level during the busy period that is just now starting. 827 */ 828 static void noinstr rcu_eqs_exit(bool user) 829 { 830 struct rcu_data *rdp; 831 long oldval; 832 833 lockdep_assert_irqs_disabled(); 834 rdp = this_cpu_ptr(&rcu_data); 835 oldval = rdp->dynticks_nesting; 836 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 837 if (oldval) { 838 // RCU was already watching, so just do accounting and leave. 839 rdp->dynticks_nesting++; 840 return; 841 } 842 rcu_dynticks_task_exit(); 843 // RCU is not watching here ... 844 rcu_dynticks_eqs_exit(); 845 // ... but is watching here. 846 instrumentation_begin(); 847 848 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 849 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 850 851 rcu_cleanup_after_idle(); 852 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); 853 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 854 WRITE_ONCE(rdp->dynticks_nesting, 1); 855 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); 856 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 857 instrumentation_end(); 858 } 859 860 /** 861 * rcu_idle_exit - inform RCU that current CPU is leaving idle 862 * 863 * Exit idle mode, in other words, -enter- the mode in which RCU 864 * read-side critical sections can occur. 865 * 866 * If you add or remove a call to rcu_idle_exit(), be sure to test with 867 * CONFIG_RCU_EQS_DEBUG=y. 868 */ 869 void rcu_idle_exit(void) 870 { 871 unsigned long flags; 872 873 local_irq_save(flags); 874 rcu_eqs_exit(false); 875 local_irq_restore(flags); 876 } 877 878 #ifdef CONFIG_NO_HZ_FULL 879 /** 880 * rcu_user_exit - inform RCU that we are exiting userspace. 881 * 882 * Exit RCU idle mode while entering the kernel because it can 883 * run a RCU read side critical section anytime. 884 * 885 * If you add or remove a call to rcu_user_exit(), be sure to test with 886 * CONFIG_RCU_EQS_DEBUG=y. 887 */ 888 void noinstr rcu_user_exit(void) 889 { 890 rcu_eqs_exit(1); 891 } 892 893 /** 894 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 895 * 896 * The scheduler tick is not normally enabled when CPUs enter the kernel 897 * from nohz_full userspace execution. After all, nohz_full userspace 898 * execution is an RCU quiescent state and the time executing in the kernel 899 * is quite short. Except of course when it isn't. And it is not hard to 900 * cause a large system to spend tens of seconds or even minutes looping 901 * in the kernel, which can cause a number of problems, include RCU CPU 902 * stall warnings. 903 * 904 * Therefore, if a nohz_full CPU fails to report a quiescent state 905 * in a timely manner, the RCU grace-period kthread sets that CPU's 906 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 907 * exception will invoke this function, which will turn on the scheduler 908 * tick, which will enable RCU to detect that CPU's quiescent states, 909 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 910 * The tick will be disabled once a quiescent state is reported for 911 * this CPU. 912 * 913 * Of course, in carefully tuned systems, there might never be an 914 * interrupt or exception. In that case, the RCU grace-period kthread 915 * will eventually cause one to happen. However, in less carefully 916 * controlled environments, this function allows RCU to get what it 917 * needs without creating otherwise useless interruptions. 918 */ 919 void __rcu_irq_enter_check_tick(void) 920 { 921 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 922 923 // Enabling the tick is unsafe in NMI handlers. 924 if (WARN_ON_ONCE(in_nmi())) 925 return; 926 927 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 928 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 929 930 if (!tick_nohz_full_cpu(rdp->cpu) || 931 !READ_ONCE(rdp->rcu_urgent_qs) || 932 READ_ONCE(rdp->rcu_forced_tick)) { 933 // RCU doesn't need nohz_full help from this CPU, or it is 934 // already getting that help. 935 return; 936 } 937 938 // We get here only when not in an extended quiescent state and 939 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 940 // already watching and (2) The fact that we are in an interrupt 941 // handler and that the rcu_node lock is an irq-disabled lock 942 // prevents self-deadlock. So we can safely recheck under the lock. 943 // Note that the nohz_full state currently cannot change. 944 raw_spin_lock_rcu_node(rdp->mynode); 945 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { 946 // A nohz_full CPU is in the kernel and RCU needs a 947 // quiescent state. Turn on the tick! 948 WRITE_ONCE(rdp->rcu_forced_tick, true); 949 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 950 } 951 raw_spin_unlock_rcu_node(rdp->mynode); 952 } 953 #endif /* CONFIG_NO_HZ_FULL */ 954 955 /** 956 * rcu_nmi_enter - inform RCU of entry to NMI context 957 * @irq: Is this call from rcu_irq_enter? 958 * 959 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and 960 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know 961 * that the CPU is active. This implementation permits nested NMIs, as 962 * long as the nesting level does not overflow an int. (You will probably 963 * run out of stack space first.) 964 * 965 * If you add or remove a call to rcu_nmi_enter(), be sure to test 966 * with CONFIG_RCU_EQS_DEBUG=y. 967 */ 968 noinstr void rcu_nmi_enter(void) 969 { 970 long incby = 2; 971 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 972 973 /* Complain about underflow. */ 974 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); 975 976 /* 977 * If idle from RCU viewpoint, atomically increment ->dynticks 978 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 979 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 980 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 981 * to be in the outermost NMI handler that interrupted an RCU-idle 982 * period (observation due to Andy Lutomirski). 983 */ 984 if (rcu_dynticks_curr_cpu_in_eqs()) { 985 986 if (!in_nmi()) 987 rcu_dynticks_task_exit(); 988 989 // RCU is not watching here ... 990 rcu_dynticks_eqs_exit(); 991 // ... but is watching here. 992 993 if (!in_nmi()) 994 rcu_cleanup_after_idle(); 995 996 instrumentation_begin(); 997 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() 998 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); 999 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 1000 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 1001 1002 incby = 1; 1003 } else if (!in_nmi()) { 1004 instrumentation_begin(); 1005 rcu_irq_enter_check_tick(); 1006 instrumentation_end(); 1007 } else { 1008 instrumentation_begin(); 1009 } 1010 1011 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 1012 rdp->dynticks_nmi_nesting, 1013 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); 1014 instrumentation_end(); 1015 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ 1016 rdp->dynticks_nmi_nesting + incby); 1017 barrier(); 1018 } 1019 1020 /** 1021 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 1022 * 1023 * Enter an interrupt handler, which might possibly result in exiting 1024 * idle mode, in other words, entering the mode in which read-side critical 1025 * sections can occur. The caller must have disabled interrupts. 1026 * 1027 * Note that the Linux kernel is fully capable of entering an interrupt 1028 * handler that it never exits, for example when doing upcalls to user mode! 1029 * This code assumes that the idle loop never does upcalls to user mode. 1030 * If your architecture's idle loop does do upcalls to user mode (or does 1031 * anything else that results in unbalanced calls to the irq_enter() and 1032 * irq_exit() functions), RCU will give you what you deserve, good and hard. 1033 * But very infrequently and irreproducibly. 1034 * 1035 * Use things like work queues to work around this limitation. 1036 * 1037 * You have been warned. 1038 * 1039 * If you add or remove a call to rcu_irq_enter(), be sure to test with 1040 * CONFIG_RCU_EQS_DEBUG=y. 1041 */ 1042 noinstr void rcu_irq_enter(void) 1043 { 1044 lockdep_assert_irqs_disabled(); 1045 rcu_nmi_enter(); 1046 } 1047 1048 /* 1049 * Wrapper for rcu_irq_enter() where interrupts are enabled. 1050 * 1051 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 1052 * with CONFIG_RCU_EQS_DEBUG=y. 1053 */ 1054 void rcu_irq_enter_irqson(void) 1055 { 1056 unsigned long flags; 1057 1058 local_irq_save(flags); 1059 rcu_irq_enter(); 1060 local_irq_restore(flags); 1061 } 1062 1063 /* 1064 * If any sort of urgency was applied to the current CPU (for example, 1065 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 1066 * to get to a quiescent state, disable it. 1067 */ 1068 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 1069 { 1070 raw_lockdep_assert_held_rcu_node(rdp->mynode); 1071 WRITE_ONCE(rdp->rcu_urgent_qs, false); 1072 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 1073 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 1074 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 1075 WRITE_ONCE(rdp->rcu_forced_tick, false); 1076 } 1077 } 1078 1079 noinstr bool __rcu_is_watching(void) 1080 { 1081 return !rcu_dynticks_curr_cpu_in_eqs(); 1082 } 1083 1084 /** 1085 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 1086 * 1087 * Return true if RCU is watching the running CPU, which means that this 1088 * CPU can safely enter RCU read-side critical sections. In other words, 1089 * if the current CPU is not in its idle loop or is in an interrupt or 1090 * NMI handler, return true. 1091 */ 1092 bool rcu_is_watching(void) 1093 { 1094 bool ret; 1095 1096 preempt_disable_notrace(); 1097 ret = !rcu_dynticks_curr_cpu_in_eqs(); 1098 preempt_enable_notrace(); 1099 return ret; 1100 } 1101 EXPORT_SYMBOL_GPL(rcu_is_watching); 1102 1103 /* 1104 * If a holdout task is actually running, request an urgent quiescent 1105 * state from its CPU. This is unsynchronized, so migrations can cause 1106 * the request to go to the wrong CPU. Which is OK, all that will happen 1107 * is that the CPU's next context switch will be a bit slower and next 1108 * time around this task will generate another request. 1109 */ 1110 void rcu_request_urgent_qs_task(struct task_struct *t) 1111 { 1112 int cpu; 1113 1114 barrier(); 1115 cpu = task_cpu(t); 1116 if (!task_curr(t)) 1117 return; /* This task is not running on that CPU. */ 1118 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 1119 } 1120 1121 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1122 1123 /* 1124 * Is the current CPU online as far as RCU is concerned? 1125 * 1126 * Disable preemption to avoid false positives that could otherwise 1127 * happen due to the current CPU number being sampled, this task being 1128 * preempted, its old CPU being taken offline, resuming on some other CPU, 1129 * then determining that its old CPU is now offline. 1130 * 1131 * Disable checking if in an NMI handler because we cannot safely 1132 * report errors from NMI handlers anyway. In addition, it is OK to use 1133 * RCU on an offline processor during initial boot, hence the check for 1134 * rcu_scheduler_fully_active. 1135 */ 1136 bool rcu_lockdep_current_cpu_online(void) 1137 { 1138 struct rcu_data *rdp; 1139 struct rcu_node *rnp; 1140 bool ret = false; 1141 1142 if (in_nmi() || !rcu_scheduler_fully_active) 1143 return true; 1144 preempt_disable_notrace(); 1145 rdp = this_cpu_ptr(&rcu_data); 1146 rnp = rdp->mynode; 1147 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) 1148 ret = true; 1149 preempt_enable_notrace(); 1150 return ret; 1151 } 1152 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1153 1154 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 1155 1156 /* 1157 * We are reporting a quiescent state on behalf of some other CPU, so 1158 * it is our responsibility to check for and handle potential overflow 1159 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 1160 * After all, the CPU might be in deep idle state, and thus executing no 1161 * code whatsoever. 1162 */ 1163 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1164 { 1165 raw_lockdep_assert_held_rcu_node(rnp); 1166 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 1167 rnp->gp_seq)) 1168 WRITE_ONCE(rdp->gpwrap, true); 1169 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 1170 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 1171 } 1172 1173 /* 1174 * Snapshot the specified CPU's dynticks counter so that we can later 1175 * credit them with an implicit quiescent state. Return 1 if this CPU 1176 * is in dynticks idle mode, which is an extended quiescent state. 1177 */ 1178 static int dyntick_save_progress_counter(struct rcu_data *rdp) 1179 { 1180 rdp->dynticks_snap = rcu_dynticks_snap(rdp); 1181 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1182 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1183 rcu_gpnum_ovf(rdp->mynode, rdp); 1184 return 1; 1185 } 1186 return 0; 1187 } 1188 1189 /* 1190 * Return true if the specified CPU has passed through a quiescent 1191 * state by virtue of being in or having passed through an dynticks 1192 * idle state since the last call to dyntick_save_progress_counter() 1193 * for this same CPU, or by virtue of having been offline. 1194 */ 1195 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 1196 { 1197 unsigned long jtsq; 1198 bool *rnhqp; 1199 bool *ruqp; 1200 struct rcu_node *rnp = rdp->mynode; 1201 1202 /* 1203 * If the CPU passed through or entered a dynticks idle phase with 1204 * no active irq/NMI handlers, then we can safely pretend that the CPU 1205 * already acknowledged the request to pass through a quiescent 1206 * state. Either way, that CPU cannot possibly be in an RCU 1207 * read-side critical section that started before the beginning 1208 * of the current RCU grace period. 1209 */ 1210 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 1211 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1212 rcu_gpnum_ovf(rnp, rdp); 1213 return 1; 1214 } 1215 1216 /* If waiting too long on an offline CPU, complain. */ 1217 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && 1218 time_after(jiffies, rcu_state.gp_start + HZ)) { 1219 bool onl; 1220 struct rcu_node *rnp1; 1221 1222 WARN_ON(1); /* Offline CPUs are supposed to report QS! */ 1223 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 1224 __func__, rnp->grplo, rnp->grphi, rnp->level, 1225 (long)rnp->gp_seq, (long)rnp->completedqs); 1226 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 1227 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 1228 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 1229 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 1230 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 1231 __func__, rdp->cpu, ".o"[onl], 1232 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 1233 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 1234 return 1; /* Break things loose after complaining. */ 1235 } 1236 1237 /* 1238 * A CPU running for an extended time within the kernel can 1239 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 1240 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 1241 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 1242 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 1243 * variable are safe because the assignments are repeated if this 1244 * CPU failed to pass through a quiescent state. This code 1245 * also checks .jiffies_resched in case jiffies_to_sched_qs 1246 * is set way high. 1247 */ 1248 jtsq = READ_ONCE(jiffies_to_sched_qs); 1249 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); 1250 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); 1251 if (!READ_ONCE(*rnhqp) && 1252 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 1253 time_after(jiffies, rcu_state.jiffies_resched) || 1254 rcu_state.cbovld)) { 1255 WRITE_ONCE(*rnhqp, true); 1256 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1257 smp_store_release(ruqp, true); 1258 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 1259 WRITE_ONCE(*ruqp, true); 1260 } 1261 1262 /* 1263 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 1264 * The above code handles this, but only for straight cond_resched(). 1265 * And some in-kernel loops check need_resched() before calling 1266 * cond_resched(), which defeats the above code for CPUs that are 1267 * running in-kernel with scheduling-clock interrupts disabled. 1268 * So hit them over the head with the resched_cpu() hammer! 1269 */ 1270 if (tick_nohz_full_cpu(rdp->cpu) && 1271 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 1272 rcu_state.cbovld)) { 1273 WRITE_ONCE(*ruqp, true); 1274 resched_cpu(rdp->cpu); 1275 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1276 } 1277 1278 /* 1279 * If more than halfway to RCU CPU stall-warning time, invoke 1280 * resched_cpu() more frequently to try to loosen things up a bit. 1281 * Also check to see if the CPU is getting hammered with interrupts, 1282 * but only once per grace period, just to keep the IPIs down to 1283 * a dull roar. 1284 */ 1285 if (time_after(jiffies, rcu_state.jiffies_resched)) { 1286 if (time_after(jiffies, 1287 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 1288 resched_cpu(rdp->cpu); 1289 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1290 } 1291 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1292 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 1293 (rnp->ffmask & rdp->grpmask)) { 1294 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1295 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ); 1296 rdp->rcu_iw_pending = true; 1297 rdp->rcu_iw_gp_seq = rnp->gp_seq; 1298 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1299 } 1300 } 1301 1302 return 0; 1303 } 1304 1305 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1306 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1307 unsigned long gp_seq_req, const char *s) 1308 { 1309 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 1310 gp_seq_req, rnp->level, 1311 rnp->grplo, rnp->grphi, s); 1312 } 1313 1314 /* 1315 * rcu_start_this_gp - Request the start of a particular grace period 1316 * @rnp_start: The leaf node of the CPU from which to start. 1317 * @rdp: The rcu_data corresponding to the CPU from which to start. 1318 * @gp_seq_req: The gp_seq of the grace period to start. 1319 * 1320 * Start the specified grace period, as needed to handle newly arrived 1321 * callbacks. The required future grace periods are recorded in each 1322 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1323 * is reason to awaken the grace-period kthread. 1324 * 1325 * The caller must hold the specified rcu_node structure's ->lock, which 1326 * is why the caller is responsible for waking the grace-period kthread. 1327 * 1328 * Returns true if the GP thread needs to be awakened else false. 1329 */ 1330 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1331 unsigned long gp_seq_req) 1332 { 1333 bool ret = false; 1334 struct rcu_node *rnp; 1335 1336 /* 1337 * Use funnel locking to either acquire the root rcu_node 1338 * structure's lock or bail out if the need for this grace period 1339 * has already been recorded -- or if that grace period has in 1340 * fact already started. If there is already a grace period in 1341 * progress in a non-leaf node, no recording is needed because the 1342 * end of the grace period will scan the leaf rcu_node structures. 1343 * Note that rnp_start->lock must not be released. 1344 */ 1345 raw_lockdep_assert_held_rcu_node(rnp_start); 1346 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1347 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1348 if (rnp != rnp_start) 1349 raw_spin_lock_rcu_node(rnp); 1350 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1351 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1352 (rnp != rnp_start && 1353 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1354 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1355 TPS("Prestarted")); 1356 goto unlock_out; 1357 } 1358 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1359 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1360 /* 1361 * We just marked the leaf or internal node, and a 1362 * grace period is in progress, which means that 1363 * rcu_gp_cleanup() will see the marking. Bail to 1364 * reduce contention. 1365 */ 1366 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1367 TPS("Startedleaf")); 1368 goto unlock_out; 1369 } 1370 if (rnp != rnp_start && rnp->parent != NULL) 1371 raw_spin_unlock_rcu_node(rnp); 1372 if (!rnp->parent) 1373 break; /* At root, and perhaps also leaf. */ 1374 } 1375 1376 /* If GP already in progress, just leave, otherwise start one. */ 1377 if (rcu_gp_in_progress()) { 1378 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1379 goto unlock_out; 1380 } 1381 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1382 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1383 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1384 if (!READ_ONCE(rcu_state.gp_kthread)) { 1385 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1386 goto unlock_out; 1387 } 1388 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1389 ret = true; /* Caller must wake GP kthread. */ 1390 unlock_out: 1391 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1392 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1393 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1394 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1395 } 1396 if (rnp != rnp_start) 1397 raw_spin_unlock_rcu_node(rnp); 1398 return ret; 1399 } 1400 1401 /* 1402 * Clean up any old requests for the just-ended grace period. Also return 1403 * whether any additional grace periods have been requested. 1404 */ 1405 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1406 { 1407 bool needmore; 1408 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1409 1410 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1411 if (!needmore) 1412 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1413 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1414 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1415 return needmore; 1416 } 1417 1418 /* 1419 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1420 * interrupt or softirq handler, in which case we just might immediately 1421 * sleep upon return, resulting in a grace-period hang), and don't bother 1422 * awakening when there is nothing for the grace-period kthread to do 1423 * (as in several CPUs raced to awaken, we lost), and finally don't try 1424 * to awaken a kthread that has not yet been created. If all those checks 1425 * are passed, track some debug information and awaken. 1426 * 1427 * So why do the self-wakeup when in an interrupt or softirq handler 1428 * in the grace-period kthread's context? Because the kthread might have 1429 * been interrupted just as it was going to sleep, and just after the final 1430 * pre-sleep check of the awaken condition. In this case, a wakeup really 1431 * is required, and is therefore supplied. 1432 */ 1433 static void rcu_gp_kthread_wake(void) 1434 { 1435 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1436 1437 if ((current == t && !in_irq() && !in_serving_softirq()) || 1438 !READ_ONCE(rcu_state.gp_flags) || !t) 1439 return; 1440 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1441 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1442 swake_up_one(&rcu_state.gp_wq); 1443 } 1444 1445 /* 1446 * If there is room, assign a ->gp_seq number to any callbacks on this 1447 * CPU that have not already been assigned. Also accelerate any callbacks 1448 * that were previously assigned a ->gp_seq number that has since proven 1449 * to be too conservative, which can happen if callbacks get assigned a 1450 * ->gp_seq number while RCU is idle, but with reference to a non-root 1451 * rcu_node structure. This function is idempotent, so it does not hurt 1452 * to call it repeatedly. Returns an flag saying that we should awaken 1453 * the RCU grace-period kthread. 1454 * 1455 * The caller must hold rnp->lock with interrupts disabled. 1456 */ 1457 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1458 { 1459 unsigned long gp_seq_req; 1460 bool ret = false; 1461 1462 rcu_lockdep_assert_cblist_protected(rdp); 1463 raw_lockdep_assert_held_rcu_node(rnp); 1464 1465 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1466 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1467 return false; 1468 1469 /* 1470 * Callbacks are often registered with incomplete grace-period 1471 * information. Something about the fact that getting exact 1472 * information requires acquiring a global lock... RCU therefore 1473 * makes a conservative estimate of the grace period number at which 1474 * a given callback will become ready to invoke. The following 1475 * code checks this estimate and improves it when possible, thus 1476 * accelerating callback invocation to an earlier grace-period 1477 * number. 1478 */ 1479 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1480 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1481 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1482 1483 /* Trace depending on how much we were able to accelerate. */ 1484 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1485 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); 1486 else 1487 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); 1488 return ret; 1489 } 1490 1491 /* 1492 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1493 * rcu_node structure's ->lock be held. It consults the cached value 1494 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1495 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1496 * while holding the leaf rcu_node structure's ->lock. 1497 */ 1498 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1499 struct rcu_data *rdp) 1500 { 1501 unsigned long c; 1502 bool needwake; 1503 1504 rcu_lockdep_assert_cblist_protected(rdp); 1505 c = rcu_seq_snap(&rcu_state.gp_seq); 1506 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1507 /* Old request still live, so mark recent callbacks. */ 1508 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1509 return; 1510 } 1511 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1512 needwake = rcu_accelerate_cbs(rnp, rdp); 1513 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1514 if (needwake) 1515 rcu_gp_kthread_wake(); 1516 } 1517 1518 /* 1519 * Move any callbacks whose grace period has completed to the 1520 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1521 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1522 * sublist. This function is idempotent, so it does not hurt to 1523 * invoke it repeatedly. As long as it is not invoked -too- often... 1524 * Returns true if the RCU grace-period kthread needs to be awakened. 1525 * 1526 * The caller must hold rnp->lock with interrupts disabled. 1527 */ 1528 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1529 { 1530 rcu_lockdep_assert_cblist_protected(rdp); 1531 raw_lockdep_assert_held_rcu_node(rnp); 1532 1533 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1534 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1535 return false; 1536 1537 /* 1538 * Find all callbacks whose ->gp_seq numbers indicate that they 1539 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1540 */ 1541 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1542 1543 /* Classify any remaining callbacks. */ 1544 return rcu_accelerate_cbs(rnp, rdp); 1545 } 1546 1547 /* 1548 * Move and classify callbacks, but only if doing so won't require 1549 * that the RCU grace-period kthread be awakened. 1550 */ 1551 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1552 struct rcu_data *rdp) 1553 { 1554 rcu_lockdep_assert_cblist_protected(rdp); 1555 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || 1556 !raw_spin_trylock_rcu_node(rnp)) 1557 return; 1558 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1559 raw_spin_unlock_rcu_node(rnp); 1560 } 1561 1562 /* 1563 * Update CPU-local rcu_data state to record the beginnings and ends of 1564 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1565 * structure corresponding to the current CPU, and must have irqs disabled. 1566 * Returns true if the grace-period kthread needs to be awakened. 1567 */ 1568 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1569 { 1570 bool ret = false; 1571 bool need_qs; 1572 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 1573 rcu_segcblist_is_offloaded(&rdp->cblist); 1574 1575 raw_lockdep_assert_held_rcu_node(rnp); 1576 1577 if (rdp->gp_seq == rnp->gp_seq) 1578 return false; /* Nothing to do. */ 1579 1580 /* Handle the ends of any preceding grace periods first. */ 1581 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1582 unlikely(READ_ONCE(rdp->gpwrap))) { 1583 if (!offloaded) 1584 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1585 rdp->core_needs_qs = false; 1586 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1587 } else { 1588 if (!offloaded) 1589 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1590 if (rdp->core_needs_qs) 1591 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1592 } 1593 1594 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1595 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1596 unlikely(READ_ONCE(rdp->gpwrap))) { 1597 /* 1598 * If the current grace period is waiting for this CPU, 1599 * set up to detect a quiescent state, otherwise don't 1600 * go looking for one. 1601 */ 1602 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1603 need_qs = !!(rnp->qsmask & rdp->grpmask); 1604 rdp->cpu_no_qs.b.norm = need_qs; 1605 rdp->core_needs_qs = need_qs; 1606 zero_cpu_stall_ticks(rdp); 1607 } 1608 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1609 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1610 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1611 WRITE_ONCE(rdp->gpwrap, false); 1612 rcu_gpnum_ovf(rnp, rdp); 1613 return ret; 1614 } 1615 1616 static void note_gp_changes(struct rcu_data *rdp) 1617 { 1618 unsigned long flags; 1619 bool needwake; 1620 struct rcu_node *rnp; 1621 1622 local_irq_save(flags); 1623 rnp = rdp->mynode; 1624 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1625 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1626 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1627 local_irq_restore(flags); 1628 return; 1629 } 1630 needwake = __note_gp_changes(rnp, rdp); 1631 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1632 if (needwake) 1633 rcu_gp_kthread_wake(); 1634 } 1635 1636 static void rcu_gp_slow(int delay) 1637 { 1638 if (delay > 0 && 1639 !(rcu_seq_ctr(rcu_state.gp_seq) % 1640 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1641 schedule_timeout_uninterruptible(delay); 1642 } 1643 1644 static unsigned long sleep_duration; 1645 1646 /* Allow rcutorture to stall the grace-period kthread. */ 1647 void rcu_gp_set_torture_wait(int duration) 1648 { 1649 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1650 WRITE_ONCE(sleep_duration, duration); 1651 } 1652 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1653 1654 /* Actually implement the aforementioned wait. */ 1655 static void rcu_gp_torture_wait(void) 1656 { 1657 unsigned long duration; 1658 1659 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1660 return; 1661 duration = xchg(&sleep_duration, 0UL); 1662 if (duration > 0) { 1663 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1664 schedule_timeout_uninterruptible(duration); 1665 pr_alert("%s: Wait complete\n", __func__); 1666 } 1667 } 1668 1669 /* 1670 * Initialize a new grace period. Return false if no grace period required. 1671 */ 1672 static bool rcu_gp_init(void) 1673 { 1674 unsigned long flags; 1675 unsigned long oldmask; 1676 unsigned long mask; 1677 struct rcu_data *rdp; 1678 struct rcu_node *rnp = rcu_get_root(); 1679 1680 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1681 raw_spin_lock_irq_rcu_node(rnp); 1682 if (!READ_ONCE(rcu_state.gp_flags)) { 1683 /* Spurious wakeup, tell caller to go back to sleep. */ 1684 raw_spin_unlock_irq_rcu_node(rnp); 1685 return false; 1686 } 1687 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1688 1689 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1690 /* 1691 * Grace period already in progress, don't start another. 1692 * Not supposed to be able to happen. 1693 */ 1694 raw_spin_unlock_irq_rcu_node(rnp); 1695 return false; 1696 } 1697 1698 /* Advance to a new grace period and initialize state. */ 1699 record_gp_stall_check_time(); 1700 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1701 rcu_seq_start(&rcu_state.gp_seq); 1702 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1703 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1704 raw_spin_unlock_irq_rcu_node(rnp); 1705 1706 /* 1707 * Apply per-leaf buffered online and offline operations to the 1708 * rcu_node tree. Note that this new grace period need not wait 1709 * for subsequent online CPUs, and that quiescent-state forcing 1710 * will handle subsequent offline CPUs. 1711 */ 1712 rcu_state.gp_state = RCU_GP_ONOFF; 1713 rcu_for_each_leaf_node(rnp) { 1714 raw_spin_lock(&rcu_state.ofl_lock); 1715 raw_spin_lock_irq_rcu_node(rnp); 1716 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1717 !rnp->wait_blkd_tasks) { 1718 /* Nothing to do on this leaf rcu_node structure. */ 1719 raw_spin_unlock_irq_rcu_node(rnp); 1720 raw_spin_unlock(&rcu_state.ofl_lock); 1721 continue; 1722 } 1723 1724 /* Record old state, apply changes to ->qsmaskinit field. */ 1725 oldmask = rnp->qsmaskinit; 1726 rnp->qsmaskinit = rnp->qsmaskinitnext; 1727 1728 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1729 if (!oldmask != !rnp->qsmaskinit) { 1730 if (!oldmask) { /* First online CPU for rcu_node. */ 1731 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1732 rcu_init_new_rnp(rnp); 1733 } else if (rcu_preempt_has_tasks(rnp)) { 1734 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1735 } else { /* Last offline CPU and can propagate. */ 1736 rcu_cleanup_dead_rnp(rnp); 1737 } 1738 } 1739 1740 /* 1741 * If all waited-on tasks from prior grace period are 1742 * done, and if all this rcu_node structure's CPUs are 1743 * still offline, propagate up the rcu_node tree and 1744 * clear ->wait_blkd_tasks. Otherwise, if one of this 1745 * rcu_node structure's CPUs has since come back online, 1746 * simply clear ->wait_blkd_tasks. 1747 */ 1748 if (rnp->wait_blkd_tasks && 1749 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1750 rnp->wait_blkd_tasks = false; 1751 if (!rnp->qsmaskinit) 1752 rcu_cleanup_dead_rnp(rnp); 1753 } 1754 1755 raw_spin_unlock_irq_rcu_node(rnp); 1756 raw_spin_unlock(&rcu_state.ofl_lock); 1757 } 1758 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1759 1760 /* 1761 * Set the quiescent-state-needed bits in all the rcu_node 1762 * structures for all currently online CPUs in breadth-first 1763 * order, starting from the root rcu_node structure, relying on the 1764 * layout of the tree within the rcu_state.node[] array. Note that 1765 * other CPUs will access only the leaves of the hierarchy, thus 1766 * seeing that no grace period is in progress, at least until the 1767 * corresponding leaf node has been initialized. 1768 * 1769 * The grace period cannot complete until the initialization 1770 * process finishes, because this kthread handles both. 1771 */ 1772 rcu_state.gp_state = RCU_GP_INIT; 1773 rcu_for_each_node_breadth_first(rnp) { 1774 rcu_gp_slow(gp_init_delay); 1775 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1776 rdp = this_cpu_ptr(&rcu_data); 1777 rcu_preempt_check_blocked_tasks(rnp); 1778 rnp->qsmask = rnp->qsmaskinit; 1779 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1780 if (rnp == rdp->mynode) 1781 (void)__note_gp_changes(rnp, rdp); 1782 rcu_preempt_boost_start_gp(rnp); 1783 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1784 rnp->level, rnp->grplo, 1785 rnp->grphi, rnp->qsmask); 1786 /* Quiescent states for tasks on any now-offline CPUs. */ 1787 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1788 rnp->rcu_gp_init_mask = mask; 1789 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1790 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1791 else 1792 raw_spin_unlock_irq_rcu_node(rnp); 1793 cond_resched_tasks_rcu_qs(); 1794 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1795 } 1796 1797 return true; 1798 } 1799 1800 /* 1801 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1802 * time. 1803 */ 1804 static bool rcu_gp_fqs_check_wake(int *gfp) 1805 { 1806 struct rcu_node *rnp = rcu_get_root(); 1807 1808 // If under overload conditions, force an immediate FQS scan. 1809 if (*gfp & RCU_GP_FLAG_OVLD) 1810 return true; 1811 1812 // Someone like call_rcu() requested a force-quiescent-state scan. 1813 *gfp = READ_ONCE(rcu_state.gp_flags); 1814 if (*gfp & RCU_GP_FLAG_FQS) 1815 return true; 1816 1817 // The current grace period has completed. 1818 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1819 return true; 1820 1821 return false; 1822 } 1823 1824 /* 1825 * Do one round of quiescent-state forcing. 1826 */ 1827 static void rcu_gp_fqs(bool first_time) 1828 { 1829 struct rcu_node *rnp = rcu_get_root(); 1830 1831 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1832 rcu_state.n_force_qs++; 1833 if (first_time) { 1834 /* Collect dyntick-idle snapshots. */ 1835 force_qs_rnp(dyntick_save_progress_counter); 1836 } else { 1837 /* Handle dyntick-idle and offline CPUs. */ 1838 force_qs_rnp(rcu_implicit_dynticks_qs); 1839 } 1840 /* Clear flag to prevent immediate re-entry. */ 1841 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1842 raw_spin_lock_irq_rcu_node(rnp); 1843 WRITE_ONCE(rcu_state.gp_flags, 1844 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1845 raw_spin_unlock_irq_rcu_node(rnp); 1846 } 1847 } 1848 1849 /* 1850 * Loop doing repeated quiescent-state forcing until the grace period ends. 1851 */ 1852 static void rcu_gp_fqs_loop(void) 1853 { 1854 bool first_gp_fqs; 1855 int gf = 0; 1856 unsigned long j; 1857 int ret; 1858 struct rcu_node *rnp = rcu_get_root(); 1859 1860 first_gp_fqs = true; 1861 j = READ_ONCE(jiffies_till_first_fqs); 1862 if (rcu_state.cbovld) 1863 gf = RCU_GP_FLAG_OVLD; 1864 ret = 0; 1865 for (;;) { 1866 if (!ret) { 1867 rcu_state.jiffies_force_qs = jiffies + j; 1868 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1869 jiffies + (j ? 3 * j : 2)); 1870 } 1871 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1872 TPS("fqswait")); 1873 rcu_state.gp_state = RCU_GP_WAIT_FQS; 1874 ret = swait_event_idle_timeout_exclusive( 1875 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); 1876 rcu_gp_torture_wait(); 1877 rcu_state.gp_state = RCU_GP_DOING_FQS; 1878 /* Locking provides needed memory barriers. */ 1879 /* If grace period done, leave loop. */ 1880 if (!READ_ONCE(rnp->qsmask) && 1881 !rcu_preempt_blocked_readers_cgp(rnp)) 1882 break; 1883 /* If time for quiescent-state forcing, do it. */ 1884 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 1885 (gf & RCU_GP_FLAG_FQS)) { 1886 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1887 TPS("fqsstart")); 1888 rcu_gp_fqs(first_gp_fqs); 1889 gf = 0; 1890 if (first_gp_fqs) { 1891 first_gp_fqs = false; 1892 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 1893 } 1894 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1895 TPS("fqsend")); 1896 cond_resched_tasks_rcu_qs(); 1897 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1898 ret = 0; /* Force full wait till next FQS. */ 1899 j = READ_ONCE(jiffies_till_next_fqs); 1900 } else { 1901 /* Deal with stray signal. */ 1902 cond_resched_tasks_rcu_qs(); 1903 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1904 WARN_ON(signal_pending(current)); 1905 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1906 TPS("fqswaitsig")); 1907 ret = 1; /* Keep old FQS timing. */ 1908 j = jiffies; 1909 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1910 j = 1; 1911 else 1912 j = rcu_state.jiffies_force_qs - j; 1913 gf = 0; 1914 } 1915 } 1916 } 1917 1918 /* 1919 * Clean up after the old grace period. 1920 */ 1921 static void rcu_gp_cleanup(void) 1922 { 1923 int cpu; 1924 bool needgp = false; 1925 unsigned long gp_duration; 1926 unsigned long new_gp_seq; 1927 bool offloaded; 1928 struct rcu_data *rdp; 1929 struct rcu_node *rnp = rcu_get_root(); 1930 struct swait_queue_head *sq; 1931 1932 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1933 raw_spin_lock_irq_rcu_node(rnp); 1934 rcu_state.gp_end = jiffies; 1935 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 1936 if (gp_duration > rcu_state.gp_max) 1937 rcu_state.gp_max = gp_duration; 1938 1939 /* 1940 * We know the grace period is complete, but to everyone else 1941 * it appears to still be ongoing. But it is also the case 1942 * that to everyone else it looks like there is nothing that 1943 * they can do to advance the grace period. It is therefore 1944 * safe for us to drop the lock in order to mark the grace 1945 * period as completed in all of the rcu_node structures. 1946 */ 1947 raw_spin_unlock_irq_rcu_node(rnp); 1948 1949 /* 1950 * Propagate new ->gp_seq value to rcu_node structures so that 1951 * other CPUs don't have to wait until the start of the next grace 1952 * period to process their callbacks. This also avoids some nasty 1953 * RCU grace-period initialization races by forcing the end of 1954 * the current grace period to be completely recorded in all of 1955 * the rcu_node structures before the beginning of the next grace 1956 * period is recorded in any of the rcu_node structures. 1957 */ 1958 new_gp_seq = rcu_state.gp_seq; 1959 rcu_seq_end(&new_gp_seq); 1960 rcu_for_each_node_breadth_first(rnp) { 1961 raw_spin_lock_irq_rcu_node(rnp); 1962 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 1963 dump_blkd_tasks(rnp, 10); 1964 WARN_ON_ONCE(rnp->qsmask); 1965 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 1966 rdp = this_cpu_ptr(&rcu_data); 1967 if (rnp == rdp->mynode) 1968 needgp = __note_gp_changes(rnp, rdp) || needgp; 1969 /* smp_mb() provided by prior unlock-lock pair. */ 1970 needgp = rcu_future_gp_cleanup(rnp) || needgp; 1971 // Reset overload indication for CPUs no longer overloaded 1972 if (rcu_is_leaf_node(rnp)) 1973 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 1974 rdp = per_cpu_ptr(&rcu_data, cpu); 1975 check_cb_ovld_locked(rdp, rnp); 1976 } 1977 sq = rcu_nocb_gp_get(rnp); 1978 raw_spin_unlock_irq_rcu_node(rnp); 1979 rcu_nocb_gp_cleanup(sq); 1980 cond_resched_tasks_rcu_qs(); 1981 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1982 rcu_gp_slow(gp_cleanup_delay); 1983 } 1984 rnp = rcu_get_root(); 1985 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 1986 1987 /* Declare grace period done, trace first to use old GP number. */ 1988 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 1989 rcu_seq_end(&rcu_state.gp_seq); 1990 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1991 rcu_state.gp_state = RCU_GP_IDLE; 1992 /* Check for GP requests since above loop. */ 1993 rdp = this_cpu_ptr(&rcu_data); 1994 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 1995 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 1996 TPS("CleanupMore")); 1997 needgp = true; 1998 } 1999 /* Advance CBs to reduce false positives below. */ 2000 offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2001 rcu_segcblist_is_offloaded(&rdp->cblist); 2002 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2003 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2004 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2005 trace_rcu_grace_period(rcu_state.name, 2006 rcu_state.gp_seq, 2007 TPS("newreq")); 2008 } else { 2009 WRITE_ONCE(rcu_state.gp_flags, 2010 rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2011 } 2012 raw_spin_unlock_irq_rcu_node(rnp); 2013 } 2014 2015 /* 2016 * Body of kthread that handles grace periods. 2017 */ 2018 static int __noreturn rcu_gp_kthread(void *unused) 2019 { 2020 rcu_bind_gp_kthread(); 2021 for (;;) { 2022 2023 /* Handle grace-period start. */ 2024 for (;;) { 2025 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2026 TPS("reqwait")); 2027 rcu_state.gp_state = RCU_GP_WAIT_GPS; 2028 swait_event_idle_exclusive(rcu_state.gp_wq, 2029 READ_ONCE(rcu_state.gp_flags) & 2030 RCU_GP_FLAG_INIT); 2031 rcu_gp_torture_wait(); 2032 rcu_state.gp_state = RCU_GP_DONE_GPS; 2033 /* Locking provides needed memory barrier. */ 2034 if (rcu_gp_init()) 2035 break; 2036 cond_resched_tasks_rcu_qs(); 2037 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2038 WARN_ON(signal_pending(current)); 2039 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2040 TPS("reqwaitsig")); 2041 } 2042 2043 /* Handle quiescent-state forcing. */ 2044 rcu_gp_fqs_loop(); 2045 2046 /* Handle grace-period end. */ 2047 rcu_state.gp_state = RCU_GP_CLEANUP; 2048 rcu_gp_cleanup(); 2049 rcu_state.gp_state = RCU_GP_CLEANED; 2050 } 2051 } 2052 2053 /* 2054 * Report a full set of quiescent states to the rcu_state data structure. 2055 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2056 * another grace period is required. Whether we wake the grace-period 2057 * kthread or it awakens itself for the next round of quiescent-state 2058 * forcing, that kthread will clean up after the just-completed grace 2059 * period. Note that the caller must hold rnp->lock, which is released 2060 * before return. 2061 */ 2062 static void rcu_report_qs_rsp(unsigned long flags) 2063 __releases(rcu_get_root()->lock) 2064 { 2065 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2066 WARN_ON_ONCE(!rcu_gp_in_progress()); 2067 WRITE_ONCE(rcu_state.gp_flags, 2068 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2069 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2070 rcu_gp_kthread_wake(); 2071 } 2072 2073 /* 2074 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2075 * Allows quiescent states for a group of CPUs to be reported at one go 2076 * to the specified rcu_node structure, though all the CPUs in the group 2077 * must be represented by the same rcu_node structure (which need not be a 2078 * leaf rcu_node structure, though it often will be). The gps parameter 2079 * is the grace-period snapshot, which means that the quiescent states 2080 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2081 * must be held upon entry, and it is released before return. 2082 * 2083 * As a special case, if mask is zero, the bit-already-cleared check is 2084 * disabled. This allows propagating quiescent state due to resumed tasks 2085 * during grace-period initialization. 2086 */ 2087 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2088 unsigned long gps, unsigned long flags) 2089 __releases(rnp->lock) 2090 { 2091 unsigned long oldmask = 0; 2092 struct rcu_node *rnp_c; 2093 2094 raw_lockdep_assert_held_rcu_node(rnp); 2095 2096 /* Walk up the rcu_node hierarchy. */ 2097 for (;;) { 2098 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2099 2100 /* 2101 * Our bit has already been cleared, or the 2102 * relevant grace period is already over, so done. 2103 */ 2104 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2105 return; 2106 } 2107 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2108 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2109 rcu_preempt_blocked_readers_cgp(rnp)); 2110 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2111 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2112 mask, rnp->qsmask, rnp->level, 2113 rnp->grplo, rnp->grphi, 2114 !!rnp->gp_tasks); 2115 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2116 2117 /* Other bits still set at this level, so done. */ 2118 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2119 return; 2120 } 2121 rnp->completedqs = rnp->gp_seq; 2122 mask = rnp->grpmask; 2123 if (rnp->parent == NULL) { 2124 2125 /* No more levels. Exit loop holding root lock. */ 2126 2127 break; 2128 } 2129 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2130 rnp_c = rnp; 2131 rnp = rnp->parent; 2132 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2133 oldmask = READ_ONCE(rnp_c->qsmask); 2134 } 2135 2136 /* 2137 * Get here if we are the last CPU to pass through a quiescent 2138 * state for this grace period. Invoke rcu_report_qs_rsp() 2139 * to clean up and start the next grace period if one is needed. 2140 */ 2141 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2142 } 2143 2144 /* 2145 * Record a quiescent state for all tasks that were previously queued 2146 * on the specified rcu_node structure and that were blocking the current 2147 * RCU grace period. The caller must hold the corresponding rnp->lock with 2148 * irqs disabled, and this lock is released upon return, but irqs remain 2149 * disabled. 2150 */ 2151 static void __maybe_unused 2152 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2153 __releases(rnp->lock) 2154 { 2155 unsigned long gps; 2156 unsigned long mask; 2157 struct rcu_node *rnp_p; 2158 2159 raw_lockdep_assert_held_rcu_node(rnp); 2160 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2161 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2162 rnp->qsmask != 0) { 2163 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2164 return; /* Still need more quiescent states! */ 2165 } 2166 2167 rnp->completedqs = rnp->gp_seq; 2168 rnp_p = rnp->parent; 2169 if (rnp_p == NULL) { 2170 /* 2171 * Only one rcu_node structure in the tree, so don't 2172 * try to report up to its nonexistent parent! 2173 */ 2174 rcu_report_qs_rsp(flags); 2175 return; 2176 } 2177 2178 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2179 gps = rnp->gp_seq; 2180 mask = rnp->grpmask; 2181 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2182 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2183 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2184 } 2185 2186 /* 2187 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2188 * structure. This must be called from the specified CPU. 2189 */ 2190 static void 2191 rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) 2192 { 2193 unsigned long flags; 2194 unsigned long mask; 2195 bool needwake = false; 2196 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2197 rcu_segcblist_is_offloaded(&rdp->cblist); 2198 struct rcu_node *rnp; 2199 2200 rnp = rdp->mynode; 2201 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2202 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2203 rdp->gpwrap) { 2204 2205 /* 2206 * The grace period in which this quiescent state was 2207 * recorded has ended, so don't report it upwards. 2208 * We will instead need a new quiescent state that lies 2209 * within the current grace period. 2210 */ 2211 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2213 return; 2214 } 2215 mask = rdp->grpmask; 2216 if (rdp->cpu == smp_processor_id()) 2217 rdp->core_needs_qs = false; 2218 if ((rnp->qsmask & mask) == 0) { 2219 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2220 } else { 2221 /* 2222 * This GP can't end until cpu checks in, so all of our 2223 * callbacks can be processed during the next GP. 2224 */ 2225 if (!offloaded) 2226 needwake = rcu_accelerate_cbs(rnp, rdp); 2227 2228 rcu_disable_urgency_upon_qs(rdp); 2229 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2230 /* ^^^ Released rnp->lock */ 2231 if (needwake) 2232 rcu_gp_kthread_wake(); 2233 } 2234 } 2235 2236 /* 2237 * Check to see if there is a new grace period of which this CPU 2238 * is not yet aware, and if so, set up local rcu_data state for it. 2239 * Otherwise, see if this CPU has just passed through its first 2240 * quiescent state for this grace period, and record that fact if so. 2241 */ 2242 static void 2243 rcu_check_quiescent_state(struct rcu_data *rdp) 2244 { 2245 /* Check for grace-period ends and beginnings. */ 2246 note_gp_changes(rdp); 2247 2248 /* 2249 * Does this CPU still need to do its part for current grace period? 2250 * If no, return and let the other CPUs do their part as well. 2251 */ 2252 if (!rdp->core_needs_qs) 2253 return; 2254 2255 /* 2256 * Was there a quiescent state since the beginning of the grace 2257 * period? If no, then exit and wait for the next call. 2258 */ 2259 if (rdp->cpu_no_qs.b.norm) 2260 return; 2261 2262 /* 2263 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2264 * judge of that). 2265 */ 2266 rcu_report_qs_rdp(rdp->cpu, rdp); 2267 } 2268 2269 /* 2270 * Near the end of the offline process. Trace the fact that this CPU 2271 * is going offline. 2272 */ 2273 int rcutree_dying_cpu(unsigned int cpu) 2274 { 2275 bool blkd; 2276 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2277 struct rcu_node *rnp = rdp->mynode; 2278 2279 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2280 return 0; 2281 2282 blkd = !!(rnp->qsmask & rdp->grpmask); 2283 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 2284 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 2285 return 0; 2286 } 2287 2288 /* 2289 * All CPUs for the specified rcu_node structure have gone offline, 2290 * and all tasks that were preempted within an RCU read-side critical 2291 * section while running on one of those CPUs have since exited their RCU 2292 * read-side critical section. Some other CPU is reporting this fact with 2293 * the specified rcu_node structure's ->lock held and interrupts disabled. 2294 * This function therefore goes up the tree of rcu_node structures, 2295 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2296 * the leaf rcu_node structure's ->qsmaskinit field has already been 2297 * updated. 2298 * 2299 * This function does check that the specified rcu_node structure has 2300 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2301 * prematurely. That said, invoking it after the fact will cost you 2302 * a needless lock acquisition. So once it has done its work, don't 2303 * invoke it again. 2304 */ 2305 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2306 { 2307 long mask; 2308 struct rcu_node *rnp = rnp_leaf; 2309 2310 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2311 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2312 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2313 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2314 return; 2315 for (;;) { 2316 mask = rnp->grpmask; 2317 rnp = rnp->parent; 2318 if (!rnp) 2319 break; 2320 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2321 rnp->qsmaskinit &= ~mask; 2322 /* Between grace periods, so better already be zero! */ 2323 WARN_ON_ONCE(rnp->qsmask); 2324 if (rnp->qsmaskinit) { 2325 raw_spin_unlock_rcu_node(rnp); 2326 /* irqs remain disabled. */ 2327 return; 2328 } 2329 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2330 } 2331 } 2332 2333 /* 2334 * The CPU has been completely removed, and some other CPU is reporting 2335 * this fact from process context. Do the remainder of the cleanup. 2336 * There can only be one CPU hotplug operation at a time, so no need for 2337 * explicit locking. 2338 */ 2339 int rcutree_dead_cpu(unsigned int cpu) 2340 { 2341 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2342 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2343 2344 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2345 return 0; 2346 2347 /* Adjust any no-longer-needed kthreads. */ 2348 rcu_boost_kthread_setaffinity(rnp, -1); 2349 /* Do any needed no-CB deferred wakeups from this CPU. */ 2350 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); 2351 2352 // Stop-machine done, so allow nohz_full to disable tick. 2353 tick_dep_clear(TICK_DEP_BIT_RCU); 2354 return 0; 2355 } 2356 2357 /* 2358 * Invoke any RCU callbacks that have made it to the end of their grace 2359 * period. Thottle as specified by rdp->blimit. 2360 */ 2361 static void rcu_do_batch(struct rcu_data *rdp) 2362 { 2363 unsigned long flags; 2364 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2365 rcu_segcblist_is_offloaded(&rdp->cblist); 2366 struct rcu_head *rhp; 2367 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2368 long bl, count; 2369 long pending, tlimit = 0; 2370 2371 /* If no callbacks are ready, just return. */ 2372 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2373 trace_rcu_batch_start(rcu_state.name, 2374 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2375 trace_rcu_batch_end(rcu_state.name, 0, 2376 !rcu_segcblist_empty(&rdp->cblist), 2377 need_resched(), is_idle_task(current), 2378 rcu_is_callbacks_kthread()); 2379 return; 2380 } 2381 2382 /* 2383 * Extract the list of ready callbacks, disabling to prevent 2384 * races with call_rcu() from interrupt handlers. Leave the 2385 * callback counts, as rcu_barrier() needs to be conservative. 2386 */ 2387 local_irq_save(flags); 2388 rcu_nocb_lock(rdp); 2389 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2390 pending = rcu_segcblist_n_cbs(&rdp->cblist); 2391 bl = max(rdp->blimit, pending >> rcu_divisor); 2392 if (unlikely(bl > 100)) 2393 tlimit = local_clock() + rcu_resched_ns; 2394 trace_rcu_batch_start(rcu_state.name, 2395 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2396 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2397 if (offloaded) 2398 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2399 rcu_nocb_unlock_irqrestore(rdp, flags); 2400 2401 /* Invoke callbacks. */ 2402 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2403 rhp = rcu_cblist_dequeue(&rcl); 2404 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2405 rcu_callback_t f; 2406 2407 debug_rcu_head_unqueue(rhp); 2408 2409 rcu_lock_acquire(&rcu_callback_map); 2410 trace_rcu_invoke_callback(rcu_state.name, rhp); 2411 2412 f = rhp->func; 2413 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2414 f(rhp); 2415 2416 rcu_lock_release(&rcu_callback_map); 2417 2418 /* 2419 * Stop only if limit reached and CPU has something to do. 2420 * Note: The rcl structure counts down from zero. 2421 */ 2422 if (-rcl.len >= bl && !offloaded && 2423 (need_resched() || 2424 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2425 break; 2426 if (unlikely(tlimit)) { 2427 /* only call local_clock() every 32 callbacks */ 2428 if (likely((-rcl.len & 31) || local_clock() < tlimit)) 2429 continue; 2430 /* Exceeded the time limit, so leave. */ 2431 break; 2432 } 2433 if (offloaded) { 2434 WARN_ON_ONCE(in_serving_softirq()); 2435 local_bh_enable(); 2436 lockdep_assert_irqs_enabled(); 2437 cond_resched_tasks_rcu_qs(); 2438 lockdep_assert_irqs_enabled(); 2439 local_bh_disable(); 2440 } 2441 } 2442 2443 local_irq_save(flags); 2444 rcu_nocb_lock(rdp); 2445 count = -rcl.len; 2446 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2447 is_idle_task(current), rcu_is_callbacks_kthread()); 2448 2449 /* Update counts and requeue any remaining callbacks. */ 2450 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2451 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2452 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2453 2454 /* Reinstate batch limit if we have worked down the excess. */ 2455 count = rcu_segcblist_n_cbs(&rdp->cblist); 2456 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2457 rdp->blimit = blimit; 2458 2459 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2460 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2461 rdp->qlen_last_fqs_check = 0; 2462 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2463 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2464 rdp->qlen_last_fqs_check = count; 2465 2466 /* 2467 * The following usually indicates a double call_rcu(). To track 2468 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2469 */ 2470 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); 2471 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2472 count != 0 && rcu_segcblist_empty(&rdp->cblist)); 2473 2474 rcu_nocb_unlock_irqrestore(rdp, flags); 2475 2476 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2477 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) 2478 invoke_rcu_core(); 2479 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2480 } 2481 2482 /* 2483 * This function is invoked from each scheduling-clock interrupt, 2484 * and checks to see if this CPU is in a non-context-switch quiescent 2485 * state, for example, user mode or idle loop. It also schedules RCU 2486 * core processing. If the current grace period has gone on too long, 2487 * it will ask the scheduler to manufacture a context switch for the sole 2488 * purpose of providing a providing the needed quiescent state. 2489 */ 2490 void rcu_sched_clock_irq(int user) 2491 { 2492 trace_rcu_utilization(TPS("Start scheduler-tick")); 2493 raw_cpu_inc(rcu_data.ticks_this_gp); 2494 /* The load-acquire pairs with the store-release setting to true. */ 2495 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2496 /* Idle and userspace execution already are quiescent states. */ 2497 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2498 set_tsk_need_resched(current); 2499 set_preempt_need_resched(); 2500 } 2501 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2502 } 2503 rcu_flavor_sched_clock_irq(user); 2504 if (rcu_pending(user)) 2505 invoke_rcu_core(); 2506 2507 trace_rcu_utilization(TPS("End scheduler-tick")); 2508 } 2509 2510 /* 2511 * Scan the leaf rcu_node structures. For each structure on which all 2512 * CPUs have reported a quiescent state and on which there are tasks 2513 * blocking the current grace period, initiate RCU priority boosting. 2514 * Otherwise, invoke the specified function to check dyntick state for 2515 * each CPU that has not yet reported a quiescent state. 2516 */ 2517 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2518 { 2519 int cpu; 2520 unsigned long flags; 2521 unsigned long mask; 2522 struct rcu_data *rdp; 2523 struct rcu_node *rnp; 2524 2525 rcu_state.cbovld = rcu_state.cbovldnext; 2526 rcu_state.cbovldnext = false; 2527 rcu_for_each_leaf_node(rnp) { 2528 cond_resched_tasks_rcu_qs(); 2529 mask = 0; 2530 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2531 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2532 if (rnp->qsmask == 0) { 2533 if (!IS_ENABLED(CONFIG_PREEMPT_RCU) || 2534 rcu_preempt_blocked_readers_cgp(rnp)) { 2535 /* 2536 * No point in scanning bits because they 2537 * are all zero. But we might need to 2538 * priority-boost blocked readers. 2539 */ 2540 rcu_initiate_boost(rnp, flags); 2541 /* rcu_initiate_boost() releases rnp->lock */ 2542 continue; 2543 } 2544 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2545 continue; 2546 } 2547 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2548 rdp = per_cpu_ptr(&rcu_data, cpu); 2549 if (f(rdp)) { 2550 mask |= rdp->grpmask; 2551 rcu_disable_urgency_upon_qs(rdp); 2552 } 2553 } 2554 if (mask != 0) { 2555 /* Idle/offline CPUs, report (releases rnp->lock). */ 2556 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2557 } else { 2558 /* Nothing to do here, so just drop the lock. */ 2559 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2560 } 2561 } 2562 } 2563 2564 /* 2565 * Force quiescent states on reluctant CPUs, and also detect which 2566 * CPUs are in dyntick-idle mode. 2567 */ 2568 void rcu_force_quiescent_state(void) 2569 { 2570 unsigned long flags; 2571 bool ret; 2572 struct rcu_node *rnp; 2573 struct rcu_node *rnp_old = NULL; 2574 2575 /* Funnel through hierarchy to reduce memory contention. */ 2576 rnp = __this_cpu_read(rcu_data.mynode); 2577 for (; rnp != NULL; rnp = rnp->parent) { 2578 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2579 !raw_spin_trylock(&rnp->fqslock); 2580 if (rnp_old != NULL) 2581 raw_spin_unlock(&rnp_old->fqslock); 2582 if (ret) 2583 return; 2584 rnp_old = rnp; 2585 } 2586 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2587 2588 /* Reached the root of the rcu_node tree, acquire lock. */ 2589 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2590 raw_spin_unlock(&rnp_old->fqslock); 2591 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2592 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2593 return; /* Someone beat us to it. */ 2594 } 2595 WRITE_ONCE(rcu_state.gp_flags, 2596 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2597 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2598 rcu_gp_kthread_wake(); 2599 } 2600 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2601 2602 /* Perform RCU core processing work for the current CPU. */ 2603 static __latent_entropy void rcu_core(void) 2604 { 2605 unsigned long flags; 2606 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2607 struct rcu_node *rnp = rdp->mynode; 2608 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2609 rcu_segcblist_is_offloaded(&rdp->cblist); 2610 2611 if (cpu_is_offline(smp_processor_id())) 2612 return; 2613 trace_rcu_utilization(TPS("Start RCU core")); 2614 WARN_ON_ONCE(!rdp->beenonline); 2615 2616 /* Report any deferred quiescent states if preemption enabled. */ 2617 if (!(preempt_count() & PREEMPT_MASK)) { 2618 rcu_preempt_deferred_qs(current); 2619 } else if (rcu_preempt_need_deferred_qs(current)) { 2620 set_tsk_need_resched(current); 2621 set_preempt_need_resched(); 2622 } 2623 2624 /* Update RCU state based on any recent quiescent states. */ 2625 rcu_check_quiescent_state(rdp); 2626 2627 /* No grace period and unregistered callbacks? */ 2628 if (!rcu_gp_in_progress() && 2629 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { 2630 local_irq_save(flags); 2631 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2632 rcu_accelerate_cbs_unlocked(rnp, rdp); 2633 local_irq_restore(flags); 2634 } 2635 2636 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2637 2638 /* If there are callbacks ready, invoke them. */ 2639 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && 2640 likely(READ_ONCE(rcu_scheduler_fully_active))) 2641 rcu_do_batch(rdp); 2642 2643 /* Do any needed deferred wakeups of rcuo kthreads. */ 2644 do_nocb_deferred_wakeup(rdp); 2645 trace_rcu_utilization(TPS("End RCU core")); 2646 } 2647 2648 static void rcu_core_si(struct softirq_action *h) 2649 { 2650 rcu_core(); 2651 } 2652 2653 static void rcu_wake_cond(struct task_struct *t, int status) 2654 { 2655 /* 2656 * If the thread is yielding, only wake it when this 2657 * is invoked from idle 2658 */ 2659 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2660 wake_up_process(t); 2661 } 2662 2663 static void invoke_rcu_core_kthread(void) 2664 { 2665 struct task_struct *t; 2666 unsigned long flags; 2667 2668 local_irq_save(flags); 2669 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2670 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2671 if (t != NULL && t != current) 2672 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2673 local_irq_restore(flags); 2674 } 2675 2676 /* 2677 * Wake up this CPU's rcuc kthread to do RCU core processing. 2678 */ 2679 static void invoke_rcu_core(void) 2680 { 2681 if (!cpu_online(smp_processor_id())) 2682 return; 2683 if (use_softirq) 2684 raise_softirq(RCU_SOFTIRQ); 2685 else 2686 invoke_rcu_core_kthread(); 2687 } 2688 2689 static void rcu_cpu_kthread_park(unsigned int cpu) 2690 { 2691 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2692 } 2693 2694 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2695 { 2696 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2697 } 2698 2699 /* 2700 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2701 * the RCU softirq used in configurations of RCU that do not support RCU 2702 * priority boosting. 2703 */ 2704 static void rcu_cpu_kthread(unsigned int cpu) 2705 { 2706 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2707 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2708 int spincnt; 2709 2710 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2711 for (spincnt = 0; spincnt < 10; spincnt++) { 2712 local_bh_disable(); 2713 *statusp = RCU_KTHREAD_RUNNING; 2714 local_irq_disable(); 2715 work = *workp; 2716 *workp = 0; 2717 local_irq_enable(); 2718 if (work) 2719 rcu_core(); 2720 local_bh_enable(); 2721 if (*workp == 0) { 2722 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2723 *statusp = RCU_KTHREAD_WAITING; 2724 return; 2725 } 2726 } 2727 *statusp = RCU_KTHREAD_YIELDING; 2728 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2729 schedule_timeout_interruptible(2); 2730 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2731 *statusp = RCU_KTHREAD_WAITING; 2732 } 2733 2734 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2735 .store = &rcu_data.rcu_cpu_kthread_task, 2736 .thread_should_run = rcu_cpu_kthread_should_run, 2737 .thread_fn = rcu_cpu_kthread, 2738 .thread_comm = "rcuc/%u", 2739 .setup = rcu_cpu_kthread_setup, 2740 .park = rcu_cpu_kthread_park, 2741 }; 2742 2743 /* 2744 * Spawn per-CPU RCU core processing kthreads. 2745 */ 2746 static int __init rcu_spawn_core_kthreads(void) 2747 { 2748 int cpu; 2749 2750 for_each_possible_cpu(cpu) 2751 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2752 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq) 2753 return 0; 2754 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2755 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2756 return 0; 2757 } 2758 early_initcall(rcu_spawn_core_kthreads); 2759 2760 /* 2761 * Handle any core-RCU processing required by a call_rcu() invocation. 2762 */ 2763 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2764 unsigned long flags) 2765 { 2766 /* 2767 * If called from an extended quiescent state, invoke the RCU 2768 * core in order to force a re-evaluation of RCU's idleness. 2769 */ 2770 if (!rcu_is_watching()) 2771 invoke_rcu_core(); 2772 2773 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2774 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2775 return; 2776 2777 /* 2778 * Force the grace period if too many callbacks or too long waiting. 2779 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2780 * if some other CPU has recently done so. Also, don't bother 2781 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2782 * is the only one waiting for a grace period to complete. 2783 */ 2784 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2785 rdp->qlen_last_fqs_check + qhimark)) { 2786 2787 /* Are we ignoring a completed grace period? */ 2788 note_gp_changes(rdp); 2789 2790 /* Start a new grace period if one not already started. */ 2791 if (!rcu_gp_in_progress()) { 2792 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2793 } else { 2794 /* Give the grace period a kick. */ 2795 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 2796 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2797 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2798 rcu_force_quiescent_state(); 2799 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2800 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2801 } 2802 } 2803 } 2804 2805 /* 2806 * RCU callback function to leak a callback. 2807 */ 2808 static void rcu_leak_callback(struct rcu_head *rhp) 2809 { 2810 } 2811 2812 /* 2813 * Check and if necessary update the leaf rcu_node structure's 2814 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2815 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 2816 * structure's ->lock. 2817 */ 2818 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 2819 { 2820 raw_lockdep_assert_held_rcu_node(rnp); 2821 if (qovld_calc <= 0) 2822 return; // Early boot and wildcard value set. 2823 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 2824 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 2825 else 2826 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 2827 } 2828 2829 /* 2830 * Check and if necessary update the leaf rcu_node structure's 2831 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2832 * number of queued RCU callbacks. No locks need be held, but the 2833 * caller must have disabled interrupts. 2834 * 2835 * Note that this function ignores the possibility that there are a lot 2836 * of callbacks all of which have already seen the end of their respective 2837 * grace periods. This omission is due to the need for no-CBs CPUs to 2838 * be holding ->nocb_lock to do this check, which is too heavy for a 2839 * common-case operation. 2840 */ 2841 static void check_cb_ovld(struct rcu_data *rdp) 2842 { 2843 struct rcu_node *const rnp = rdp->mynode; 2844 2845 if (qovld_calc <= 0 || 2846 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 2847 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 2848 return; // Early boot wildcard value or already set correctly. 2849 raw_spin_lock_rcu_node(rnp); 2850 check_cb_ovld_locked(rdp, rnp); 2851 raw_spin_unlock_rcu_node(rnp); 2852 } 2853 2854 /* Helper function for call_rcu() and friends. */ 2855 static void 2856 __call_rcu(struct rcu_head *head, rcu_callback_t func) 2857 { 2858 unsigned long flags; 2859 struct rcu_data *rdp; 2860 bool was_alldone; 2861 2862 /* Misaligned rcu_head! */ 2863 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2864 2865 if (debug_rcu_head_queue(head)) { 2866 /* 2867 * Probable double call_rcu(), so leak the callback. 2868 * Use rcu:rcu_callback trace event to find the previous 2869 * time callback was passed to __call_rcu(). 2870 */ 2871 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2872 head, head->func); 2873 WRITE_ONCE(head->func, rcu_leak_callback); 2874 return; 2875 } 2876 head->func = func; 2877 head->next = NULL; 2878 local_irq_save(flags); 2879 rdp = this_cpu_ptr(&rcu_data); 2880 2881 /* Add the callback to our list. */ 2882 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 2883 // This can trigger due to call_rcu() from offline CPU: 2884 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 2885 WARN_ON_ONCE(!rcu_is_watching()); 2886 // Very early boot, before rcu_init(). Initialize if needed 2887 // and then drop through to queue the callback. 2888 if (rcu_segcblist_empty(&rdp->cblist)) 2889 rcu_segcblist_init(&rdp->cblist); 2890 } 2891 2892 check_cb_ovld(rdp); 2893 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) 2894 return; // Enqueued onto ->nocb_bypass, so just leave. 2895 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. 2896 rcu_segcblist_enqueue(&rdp->cblist, head); 2897 if (__is_kfree_rcu_offset((unsigned long)func)) 2898 trace_rcu_kfree_callback(rcu_state.name, head, 2899 (unsigned long)func, 2900 rcu_segcblist_n_cbs(&rdp->cblist)); 2901 else 2902 trace_rcu_callback(rcu_state.name, head, 2903 rcu_segcblist_n_cbs(&rdp->cblist)); 2904 2905 /* Go handle any RCU core processing required. */ 2906 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2907 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { 2908 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2909 } else { 2910 __call_rcu_core(rdp, head, flags); 2911 local_irq_restore(flags); 2912 } 2913 } 2914 2915 /** 2916 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2917 * @head: structure to be used for queueing the RCU updates. 2918 * @func: actual callback function to be invoked after the grace period 2919 * 2920 * The callback function will be invoked some time after a full grace 2921 * period elapses, in other words after all pre-existing RCU read-side 2922 * critical sections have completed. However, the callback function 2923 * might well execute concurrently with RCU read-side critical sections 2924 * that started after call_rcu() was invoked. RCU read-side critical 2925 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and 2926 * may be nested. In addition, regions of code across which interrupts, 2927 * preemption, or softirqs have been disabled also serve as RCU read-side 2928 * critical sections. This includes hardware interrupt handlers, softirq 2929 * handlers, and NMI handlers. 2930 * 2931 * Note that all CPUs must agree that the grace period extended beyond 2932 * all pre-existing RCU read-side critical section. On systems with more 2933 * than one CPU, this means that when "func()" is invoked, each CPU is 2934 * guaranteed to have executed a full memory barrier since the end of its 2935 * last RCU read-side critical section whose beginning preceded the call 2936 * to call_rcu(). It also means that each CPU executing an RCU read-side 2937 * critical section that continues beyond the start of "func()" must have 2938 * executed a memory barrier after the call_rcu() but before the beginning 2939 * of that RCU read-side critical section. Note that these guarantees 2940 * include CPUs that are offline, idle, or executing in user mode, as 2941 * well as CPUs that are executing in the kernel. 2942 * 2943 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 2944 * resulting RCU callback function "func()", then both CPU A and CPU B are 2945 * guaranteed to execute a full memory barrier during the time interval 2946 * between the call to call_rcu() and the invocation of "func()" -- even 2947 * if CPU A and CPU B are the same CPU (but again only if the system has 2948 * more than one CPU). 2949 */ 2950 void call_rcu(struct rcu_head *head, rcu_callback_t func) 2951 { 2952 __call_rcu(head, func); 2953 } 2954 EXPORT_SYMBOL_GPL(call_rcu); 2955 2956 2957 /* Maximum number of jiffies to wait before draining a batch. */ 2958 #define KFREE_DRAIN_JIFFIES (HZ / 50) 2959 #define KFREE_N_BATCHES 2 2960 2961 /* 2962 * This macro defines how many entries the "records" array 2963 * will contain. It is based on the fact that the size of 2964 * kfree_rcu_bulk_data structure becomes exactly one page. 2965 */ 2966 #define KFREE_BULK_MAX_ENTR ((PAGE_SIZE / sizeof(void *)) - 3) 2967 2968 /** 2969 * struct kfree_rcu_bulk_data - single block to store kfree_rcu() pointers 2970 * @nr_records: Number of active pointers in the array 2971 * @records: Array of the kfree_rcu() pointers 2972 * @next: Next bulk object in the block chain 2973 * @head_free_debug: For debug, when CONFIG_DEBUG_OBJECTS_RCU_HEAD is set 2974 */ 2975 struct kfree_rcu_bulk_data { 2976 unsigned long nr_records; 2977 void *records[KFREE_BULK_MAX_ENTR]; 2978 struct kfree_rcu_bulk_data *next; 2979 struct rcu_head *head_free_debug; 2980 }; 2981 2982 /** 2983 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests 2984 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period 2985 * @head_free: List of kfree_rcu() objects waiting for a grace period 2986 * @bhead_free: Bulk-List of kfree_rcu() objects waiting for a grace period 2987 * @krcp: Pointer to @kfree_rcu_cpu structure 2988 */ 2989 2990 struct kfree_rcu_cpu_work { 2991 struct rcu_work rcu_work; 2992 struct rcu_head *head_free; 2993 struct kfree_rcu_bulk_data *bhead_free; 2994 struct kfree_rcu_cpu *krcp; 2995 }; 2996 2997 /** 2998 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period 2999 * @head: List of kfree_rcu() objects not yet waiting for a grace period 3000 * @bhead: Bulk-List of kfree_rcu() objects not yet waiting for a grace period 3001 * @bcached: Keeps at most one object for later reuse when build chain blocks 3002 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period 3003 * @lock: Synchronize access to this structure 3004 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES 3005 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending 3006 * @initialized: The @lock and @rcu_work fields have been initialized 3007 * 3008 * This is a per-CPU structure. The reason that it is not included in 3009 * the rcu_data structure is to permit this code to be extracted from 3010 * the RCU files. Such extraction could allow further optimization of 3011 * the interactions with the slab allocators. 3012 */ 3013 struct kfree_rcu_cpu { 3014 struct rcu_head *head; 3015 struct kfree_rcu_bulk_data *bhead; 3016 struct kfree_rcu_bulk_data *bcached; 3017 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; 3018 spinlock_t lock; 3019 struct delayed_work monitor_work; 3020 bool monitor_todo; 3021 bool initialized; 3022 // Number of objects for which GP not started 3023 int count; 3024 }; 3025 3026 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc); 3027 3028 static __always_inline void 3029 debug_rcu_head_unqueue_bulk(struct rcu_head *head) 3030 { 3031 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3032 for (; head; head = head->next) 3033 debug_rcu_head_unqueue(head); 3034 #endif 3035 } 3036 3037 /* 3038 * This function is invoked in workqueue context after a grace period. 3039 * It frees all the objects queued on ->bhead_free or ->head_free. 3040 */ 3041 static void kfree_rcu_work(struct work_struct *work) 3042 { 3043 unsigned long flags; 3044 struct rcu_head *head, *next; 3045 struct kfree_rcu_bulk_data *bhead, *bnext; 3046 struct kfree_rcu_cpu *krcp; 3047 struct kfree_rcu_cpu_work *krwp; 3048 3049 krwp = container_of(to_rcu_work(work), 3050 struct kfree_rcu_cpu_work, rcu_work); 3051 krcp = krwp->krcp; 3052 spin_lock_irqsave(&krcp->lock, flags); 3053 head = krwp->head_free; 3054 krwp->head_free = NULL; 3055 bhead = krwp->bhead_free; 3056 krwp->bhead_free = NULL; 3057 spin_unlock_irqrestore(&krcp->lock, flags); 3058 3059 /* "bhead" is now private, so traverse locklessly. */ 3060 for (; bhead; bhead = bnext) { 3061 bnext = bhead->next; 3062 3063 debug_rcu_head_unqueue_bulk(bhead->head_free_debug); 3064 3065 rcu_lock_acquire(&rcu_callback_map); 3066 trace_rcu_invoke_kfree_bulk_callback(rcu_state.name, 3067 bhead->nr_records, bhead->records); 3068 3069 kfree_bulk(bhead->nr_records, bhead->records); 3070 rcu_lock_release(&rcu_callback_map); 3071 3072 if (cmpxchg(&krcp->bcached, NULL, bhead)) 3073 free_page((unsigned long) bhead); 3074 3075 cond_resched_tasks_rcu_qs(); 3076 } 3077 3078 /* 3079 * Emergency case only. It can happen under low memory 3080 * condition when an allocation gets failed, so the "bulk" 3081 * path can not be temporary maintained. 3082 */ 3083 for (; head; head = next) { 3084 unsigned long offset = (unsigned long)head->func; 3085 3086 next = head->next; 3087 debug_rcu_head_unqueue(head); 3088 rcu_lock_acquire(&rcu_callback_map); 3089 trace_rcu_invoke_kfree_callback(rcu_state.name, head, offset); 3090 3091 if (!WARN_ON_ONCE(!__is_kfree_rcu_offset(offset))) 3092 kfree((void *)head - offset); 3093 3094 rcu_lock_release(&rcu_callback_map); 3095 cond_resched_tasks_rcu_qs(); 3096 } 3097 } 3098 3099 /* 3100 * Schedule the kfree batch RCU work to run in workqueue context after a GP. 3101 * 3102 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES 3103 * timeout has been reached. 3104 */ 3105 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) 3106 { 3107 struct kfree_rcu_cpu_work *krwp; 3108 bool queued = false; 3109 int i; 3110 3111 lockdep_assert_held(&krcp->lock); 3112 3113 for (i = 0; i < KFREE_N_BATCHES; i++) { 3114 krwp = &(krcp->krw_arr[i]); 3115 3116 /* 3117 * Try to detach bhead or head and attach it over any 3118 * available corresponding free channel. It can be that 3119 * a previous RCU batch is in progress, it means that 3120 * immediately to queue another one is not possible so 3121 * return false to tell caller to retry. 3122 */ 3123 if ((krcp->bhead && !krwp->bhead_free) || 3124 (krcp->head && !krwp->head_free)) { 3125 /* Channel 1. */ 3126 if (!krwp->bhead_free) { 3127 krwp->bhead_free = krcp->bhead; 3128 krcp->bhead = NULL; 3129 } 3130 3131 /* Channel 2. */ 3132 if (!krwp->head_free) { 3133 krwp->head_free = krcp->head; 3134 krcp->head = NULL; 3135 } 3136 3137 WRITE_ONCE(krcp->count, 0); 3138 3139 /* 3140 * One work is per one batch, so there are two "free channels", 3141 * "bhead_free" and "head_free" the batch can handle. It can be 3142 * that the work is in the pending state when two channels have 3143 * been detached following each other, one by one. 3144 */ 3145 queue_rcu_work(system_wq, &krwp->rcu_work); 3146 queued = true; 3147 } 3148 } 3149 3150 return queued; 3151 } 3152 3153 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp, 3154 unsigned long flags) 3155 { 3156 // Attempt to start a new batch. 3157 krcp->monitor_todo = false; 3158 if (queue_kfree_rcu_work(krcp)) { 3159 // Success! Our job is done here. 3160 spin_unlock_irqrestore(&krcp->lock, flags); 3161 return; 3162 } 3163 3164 // Previous RCU batch still in progress, try again later. 3165 krcp->monitor_todo = true; 3166 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3167 spin_unlock_irqrestore(&krcp->lock, flags); 3168 } 3169 3170 /* 3171 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. 3172 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch. 3173 */ 3174 static void kfree_rcu_monitor(struct work_struct *work) 3175 { 3176 unsigned long flags; 3177 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu, 3178 monitor_work.work); 3179 3180 spin_lock_irqsave(&krcp->lock, flags); 3181 if (krcp->monitor_todo) 3182 kfree_rcu_drain_unlock(krcp, flags); 3183 else 3184 spin_unlock_irqrestore(&krcp->lock, flags); 3185 } 3186 3187 static inline bool 3188 kfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, 3189 struct rcu_head *head, rcu_callback_t func) 3190 { 3191 struct kfree_rcu_bulk_data *bnode; 3192 3193 if (unlikely(!krcp->initialized)) 3194 return false; 3195 3196 lockdep_assert_held(&krcp->lock); 3197 3198 /* Check if a new block is required. */ 3199 if (!krcp->bhead || 3200 krcp->bhead->nr_records == KFREE_BULK_MAX_ENTR) { 3201 bnode = xchg(&krcp->bcached, NULL); 3202 if (!bnode) { 3203 WARN_ON_ONCE(sizeof(struct kfree_rcu_bulk_data) > PAGE_SIZE); 3204 3205 bnode = (struct kfree_rcu_bulk_data *) 3206 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 3207 } 3208 3209 /* Switch to emergency path. */ 3210 if (unlikely(!bnode)) 3211 return false; 3212 3213 /* Initialize the new block. */ 3214 bnode->nr_records = 0; 3215 bnode->next = krcp->bhead; 3216 bnode->head_free_debug = NULL; 3217 3218 /* Attach it to the head. */ 3219 krcp->bhead = bnode; 3220 } 3221 3222 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3223 head->func = func; 3224 head->next = krcp->bhead->head_free_debug; 3225 krcp->bhead->head_free_debug = head; 3226 #endif 3227 3228 /* Finally insert. */ 3229 krcp->bhead->records[krcp->bhead->nr_records++] = 3230 (void *) head - (unsigned long) func; 3231 3232 return true; 3233 } 3234 3235 /* 3236 * Queue a request for lazy invocation of kfree_bulk()/kfree() after a grace 3237 * period. Please note there are two paths are maintained, one is the main one 3238 * that uses kfree_bulk() interface and second one is emergency one, that is 3239 * used only when the main path can not be maintained temporary, due to memory 3240 * pressure. 3241 * 3242 * Each kfree_call_rcu() request is added to a batch. The batch will be drained 3243 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will 3244 * be free'd in workqueue context. This allows us to: batch requests together to 3245 * reduce the number of grace periods during heavy kfree_rcu() load. 3246 */ 3247 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 3248 { 3249 unsigned long flags; 3250 struct kfree_rcu_cpu *krcp; 3251 3252 local_irq_save(flags); // For safely calling this_cpu_ptr(). 3253 krcp = this_cpu_ptr(&krc); 3254 if (krcp->initialized) 3255 spin_lock(&krcp->lock); 3256 3257 // Queue the object but don't yet schedule the batch. 3258 if (debug_rcu_head_queue(head)) { 3259 // Probable double kfree_rcu(), just leak. 3260 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", 3261 __func__, head); 3262 goto unlock_return; 3263 } 3264 3265 /* 3266 * Under high memory pressure GFP_NOWAIT can fail, 3267 * in that case the emergency path is maintained. 3268 */ 3269 if (unlikely(!kfree_call_rcu_add_ptr_to_bulk(krcp, head, func))) { 3270 head->func = func; 3271 head->next = krcp->head; 3272 krcp->head = head; 3273 } 3274 3275 WRITE_ONCE(krcp->count, krcp->count + 1); 3276 3277 // Set timer to drain after KFREE_DRAIN_JIFFIES. 3278 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3279 !krcp->monitor_todo) { 3280 krcp->monitor_todo = true; 3281 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3282 } 3283 3284 unlock_return: 3285 if (krcp->initialized) 3286 spin_unlock(&krcp->lock); 3287 local_irq_restore(flags); 3288 } 3289 EXPORT_SYMBOL_GPL(kfree_call_rcu); 3290 3291 static unsigned long 3292 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 3293 { 3294 int cpu; 3295 unsigned long count = 0; 3296 3297 /* Snapshot count of all CPUs */ 3298 for_each_online_cpu(cpu) { 3299 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3300 3301 count += READ_ONCE(krcp->count); 3302 } 3303 3304 return count; 3305 } 3306 3307 static unsigned long 3308 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 3309 { 3310 int cpu, freed = 0; 3311 unsigned long flags; 3312 3313 for_each_online_cpu(cpu) { 3314 int count; 3315 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3316 3317 count = krcp->count; 3318 spin_lock_irqsave(&krcp->lock, flags); 3319 if (krcp->monitor_todo) 3320 kfree_rcu_drain_unlock(krcp, flags); 3321 else 3322 spin_unlock_irqrestore(&krcp->lock, flags); 3323 3324 sc->nr_to_scan -= count; 3325 freed += count; 3326 3327 if (sc->nr_to_scan <= 0) 3328 break; 3329 } 3330 3331 return freed; 3332 } 3333 3334 static struct shrinker kfree_rcu_shrinker = { 3335 .count_objects = kfree_rcu_shrink_count, 3336 .scan_objects = kfree_rcu_shrink_scan, 3337 .batch = 0, 3338 .seeks = DEFAULT_SEEKS, 3339 }; 3340 3341 void __init kfree_rcu_scheduler_running(void) 3342 { 3343 int cpu; 3344 unsigned long flags; 3345 3346 for_each_online_cpu(cpu) { 3347 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3348 3349 spin_lock_irqsave(&krcp->lock, flags); 3350 if (!krcp->head || krcp->monitor_todo) { 3351 spin_unlock_irqrestore(&krcp->lock, flags); 3352 continue; 3353 } 3354 krcp->monitor_todo = true; 3355 schedule_delayed_work_on(cpu, &krcp->monitor_work, 3356 KFREE_DRAIN_JIFFIES); 3357 spin_unlock_irqrestore(&krcp->lock, flags); 3358 } 3359 } 3360 3361 /* 3362 * During early boot, any blocking grace-period wait automatically 3363 * implies a grace period. Later on, this is never the case for PREEMPTION. 3364 * 3365 * Howevr, because a context switch is a grace period for !PREEMPTION, any 3366 * blocking grace-period wait automatically implies a grace period if 3367 * there is only one CPU online at any point time during execution of 3368 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 3369 * occasionally incorrectly indicate that there are multiple CPUs online 3370 * when there was in fact only one the whole time, as this just adds some 3371 * overhead: RCU still operates correctly. 3372 */ 3373 static int rcu_blocking_is_gp(void) 3374 { 3375 int ret; 3376 3377 if (IS_ENABLED(CONFIG_PREEMPTION)) 3378 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 3379 might_sleep(); /* Check for RCU read-side critical section. */ 3380 preempt_disable(); 3381 ret = num_online_cpus() <= 1; 3382 preempt_enable(); 3383 return ret; 3384 } 3385 3386 /** 3387 * synchronize_rcu - wait until a grace period has elapsed. 3388 * 3389 * Control will return to the caller some time after a full grace 3390 * period has elapsed, in other words after all currently executing RCU 3391 * read-side critical sections have completed. Note, however, that 3392 * upon return from synchronize_rcu(), the caller might well be executing 3393 * concurrently with new RCU read-side critical sections that began while 3394 * synchronize_rcu() was waiting. RCU read-side critical sections are 3395 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 3396 * In addition, regions of code across which interrupts, preemption, or 3397 * softirqs have been disabled also serve as RCU read-side critical 3398 * sections. This includes hardware interrupt handlers, softirq handlers, 3399 * and NMI handlers. 3400 * 3401 * Note that this guarantee implies further memory-ordering guarantees. 3402 * On systems with more than one CPU, when synchronize_rcu() returns, 3403 * each CPU is guaranteed to have executed a full memory barrier since 3404 * the end of its last RCU read-side critical section whose beginning 3405 * preceded the call to synchronize_rcu(). In addition, each CPU having 3406 * an RCU read-side critical section that extends beyond the return from 3407 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3408 * after the beginning of synchronize_rcu() and before the beginning of 3409 * that RCU read-side critical section. Note that these guarantees include 3410 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3411 * that are executing in the kernel. 3412 * 3413 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3414 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3415 * to have executed a full memory barrier during the execution of 3416 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3417 * again only if the system has more than one CPU). 3418 */ 3419 void synchronize_rcu(void) 3420 { 3421 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3422 lock_is_held(&rcu_lock_map) || 3423 lock_is_held(&rcu_sched_lock_map), 3424 "Illegal synchronize_rcu() in RCU read-side critical section"); 3425 if (rcu_blocking_is_gp()) 3426 return; 3427 if (rcu_gp_is_expedited()) 3428 synchronize_rcu_expedited(); 3429 else 3430 wait_rcu_gp(call_rcu); 3431 } 3432 EXPORT_SYMBOL_GPL(synchronize_rcu); 3433 3434 /** 3435 * get_state_synchronize_rcu - Snapshot current RCU state 3436 * 3437 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3438 * to determine whether or not a full grace period has elapsed in the 3439 * meantime. 3440 */ 3441 unsigned long get_state_synchronize_rcu(void) 3442 { 3443 /* 3444 * Any prior manipulation of RCU-protected data must happen 3445 * before the load from ->gp_seq. 3446 */ 3447 smp_mb(); /* ^^^ */ 3448 return rcu_seq_snap(&rcu_state.gp_seq); 3449 } 3450 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3451 3452 /** 3453 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3454 * 3455 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 3456 * 3457 * If a full RCU grace period has elapsed since the earlier call to 3458 * get_state_synchronize_rcu(), just return. Otherwise, invoke 3459 * synchronize_rcu() to wait for a full grace period. 3460 * 3461 * Yes, this function does not take counter wrap into account. But 3462 * counter wrap is harmless. If the counter wraps, we have waited for 3463 * more than 2 billion grace periods (and way more on a 64-bit system!), 3464 * so waiting for one additional grace period should be just fine. 3465 */ 3466 void cond_synchronize_rcu(unsigned long oldstate) 3467 { 3468 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) 3469 synchronize_rcu(); 3470 else 3471 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3472 } 3473 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3474 3475 /* 3476 * Check to see if there is any immediate RCU-related work to be done by 3477 * the current CPU, returning 1 if so and zero otherwise. The checks are 3478 * in order of increasing expense: checks that can be carried out against 3479 * CPU-local state are performed first. However, we must check for CPU 3480 * stalls first, else we might not get a chance. 3481 */ 3482 static int rcu_pending(int user) 3483 { 3484 bool gp_in_progress; 3485 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3486 struct rcu_node *rnp = rdp->mynode; 3487 3488 /* Check for CPU stalls, if enabled. */ 3489 check_cpu_stall(rdp); 3490 3491 /* Does this CPU need a deferred NOCB wakeup? */ 3492 if (rcu_nocb_need_deferred_wakeup(rdp)) 3493 return 1; 3494 3495 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3496 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) 3497 return 0; 3498 3499 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3500 gp_in_progress = rcu_gp_in_progress(); 3501 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3502 return 1; 3503 3504 /* Does this CPU have callbacks ready to invoke? */ 3505 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 3506 return 1; 3507 3508 /* Has RCU gone idle with this CPU needing another grace period? */ 3509 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3510 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) || 3511 !rcu_segcblist_is_offloaded(&rdp->cblist)) && 3512 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3513 return 1; 3514 3515 /* Have RCU grace period completed or started? */ 3516 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3517 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3518 return 1; 3519 3520 /* nothing to do */ 3521 return 0; 3522 } 3523 3524 /* 3525 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3526 * the compiler is expected to optimize this away. 3527 */ 3528 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3529 { 3530 trace_rcu_barrier(rcu_state.name, s, cpu, 3531 atomic_read(&rcu_state.barrier_cpu_count), done); 3532 } 3533 3534 /* 3535 * RCU callback function for rcu_barrier(). If we are last, wake 3536 * up the task executing rcu_barrier(). 3537 * 3538 * Note that the value of rcu_state.barrier_sequence must be captured 3539 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3540 * other CPUs might count the value down to zero before this CPU gets 3541 * around to invoking rcu_barrier_trace(), which might result in bogus 3542 * data from the next instance of rcu_barrier(). 3543 */ 3544 static void rcu_barrier_callback(struct rcu_head *rhp) 3545 { 3546 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3547 3548 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3549 rcu_barrier_trace(TPS("LastCB"), -1, s); 3550 complete(&rcu_state.barrier_completion); 3551 } else { 3552 rcu_barrier_trace(TPS("CB"), -1, s); 3553 } 3554 } 3555 3556 /* 3557 * Called with preemption disabled, and from cross-cpu IRQ context. 3558 */ 3559 static void rcu_barrier_func(void *cpu_in) 3560 { 3561 uintptr_t cpu = (uintptr_t)cpu_in; 3562 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3563 3564 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3565 rdp->barrier_head.func = rcu_barrier_callback; 3566 debug_rcu_head_queue(&rdp->barrier_head); 3567 rcu_nocb_lock(rdp); 3568 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); 3569 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3570 atomic_inc(&rcu_state.barrier_cpu_count); 3571 } else { 3572 debug_rcu_head_unqueue(&rdp->barrier_head); 3573 rcu_barrier_trace(TPS("IRQNQ"), -1, 3574 rcu_state.barrier_sequence); 3575 } 3576 rcu_nocb_unlock(rdp); 3577 } 3578 3579 /** 3580 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3581 * 3582 * Note that this primitive does not necessarily wait for an RCU grace period 3583 * to complete. For example, if there are no RCU callbacks queued anywhere 3584 * in the system, then rcu_barrier() is within its rights to return 3585 * immediately, without waiting for anything, much less an RCU grace period. 3586 */ 3587 void rcu_barrier(void) 3588 { 3589 uintptr_t cpu; 3590 struct rcu_data *rdp; 3591 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3592 3593 rcu_barrier_trace(TPS("Begin"), -1, s); 3594 3595 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3596 mutex_lock(&rcu_state.barrier_mutex); 3597 3598 /* Did someone else do our work for us? */ 3599 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3600 rcu_barrier_trace(TPS("EarlyExit"), -1, 3601 rcu_state.barrier_sequence); 3602 smp_mb(); /* caller's subsequent code after above check. */ 3603 mutex_unlock(&rcu_state.barrier_mutex); 3604 return; 3605 } 3606 3607 /* Mark the start of the barrier operation. */ 3608 rcu_seq_start(&rcu_state.barrier_sequence); 3609 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 3610 3611 /* 3612 * Initialize the count to two rather than to zero in order 3613 * to avoid a too-soon return to zero in case of an immediate 3614 * invocation of the just-enqueued callback (or preemption of 3615 * this task). Exclude CPU-hotplug operations to ensure that no 3616 * offline non-offloaded CPU has callbacks queued. 3617 */ 3618 init_completion(&rcu_state.barrier_completion); 3619 atomic_set(&rcu_state.barrier_cpu_count, 2); 3620 get_online_cpus(); 3621 3622 /* 3623 * Force each CPU with callbacks to register a new callback. 3624 * When that callback is invoked, we will know that all of the 3625 * corresponding CPU's preceding callbacks have been invoked. 3626 */ 3627 for_each_possible_cpu(cpu) { 3628 rdp = per_cpu_ptr(&rcu_data, cpu); 3629 if (cpu_is_offline(cpu) && 3630 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3631 continue; 3632 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { 3633 rcu_barrier_trace(TPS("OnlineQ"), cpu, 3634 rcu_state.barrier_sequence); 3635 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1); 3636 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && 3637 cpu_is_offline(cpu)) { 3638 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, 3639 rcu_state.barrier_sequence); 3640 local_irq_disable(); 3641 rcu_barrier_func((void *)cpu); 3642 local_irq_enable(); 3643 } else if (cpu_is_offline(cpu)) { 3644 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu, 3645 rcu_state.barrier_sequence); 3646 } else { 3647 rcu_barrier_trace(TPS("OnlineNQ"), cpu, 3648 rcu_state.barrier_sequence); 3649 } 3650 } 3651 put_online_cpus(); 3652 3653 /* 3654 * Now that we have an rcu_barrier_callback() callback on each 3655 * CPU, and thus each counted, remove the initial count. 3656 */ 3657 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 3658 complete(&rcu_state.barrier_completion); 3659 3660 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3661 wait_for_completion(&rcu_state.barrier_completion); 3662 3663 /* Mark the end of the barrier operation. */ 3664 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 3665 rcu_seq_end(&rcu_state.barrier_sequence); 3666 3667 /* Other rcu_barrier() invocations can now safely proceed. */ 3668 mutex_unlock(&rcu_state.barrier_mutex); 3669 } 3670 EXPORT_SYMBOL_GPL(rcu_barrier); 3671 3672 /* 3673 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 3674 * first CPU in a given leaf rcu_node structure coming online. The caller 3675 * must hold the corresponding leaf rcu_node ->lock with interrrupts 3676 * disabled. 3677 */ 3678 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3679 { 3680 long mask; 3681 long oldmask; 3682 struct rcu_node *rnp = rnp_leaf; 3683 3684 raw_lockdep_assert_held_rcu_node(rnp_leaf); 3685 WARN_ON_ONCE(rnp->wait_blkd_tasks); 3686 for (;;) { 3687 mask = rnp->grpmask; 3688 rnp = rnp->parent; 3689 if (rnp == NULL) 3690 return; 3691 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3692 oldmask = rnp->qsmaskinit; 3693 rnp->qsmaskinit |= mask; 3694 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3695 if (oldmask) 3696 return; 3697 } 3698 } 3699 3700 /* 3701 * Do boot-time initialization of a CPU's per-CPU RCU data. 3702 */ 3703 static void __init 3704 rcu_boot_init_percpu_data(int cpu) 3705 { 3706 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3707 3708 /* Set up local state, ensuring consistent view of global state. */ 3709 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3710 WARN_ON_ONCE(rdp->dynticks_nesting != 1); 3711 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); 3712 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 3713 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 3714 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 3715 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 3716 rdp->cpu = cpu; 3717 rcu_boot_init_nocb_percpu_data(rdp); 3718 } 3719 3720 /* 3721 * Invoked early in the CPU-online process, when pretty much all services 3722 * are available. The incoming CPU is not present. 3723 * 3724 * Initializes a CPU's per-CPU RCU data. Note that only one online or 3725 * offline event can be happening at a given time. Note also that we can 3726 * accept some slop in the rsp->gp_seq access due to the fact that this 3727 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 3728 * And any offloaded callbacks are being numbered elsewhere. 3729 */ 3730 int rcutree_prepare_cpu(unsigned int cpu) 3731 { 3732 unsigned long flags; 3733 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3734 struct rcu_node *rnp = rcu_get_root(); 3735 3736 /* Set up local state, ensuring consistent view of global state. */ 3737 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3738 rdp->qlen_last_fqs_check = 0; 3739 rdp->n_force_qs_snap = rcu_state.n_force_qs; 3740 rdp->blimit = blimit; 3741 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 3742 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3743 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 3744 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ 3745 rcu_dynticks_eqs_online(); 3746 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 3747 3748 /* 3749 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 3750 * propagation up the rcu_node tree will happen at the beginning 3751 * of the next grace period. 3752 */ 3753 rnp = rdp->mynode; 3754 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3755 rdp->beenonline = true; /* We have now been online. */ 3756 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 3757 rdp->gp_seq_needed = rdp->gp_seq; 3758 rdp->cpu_no_qs.b.norm = true; 3759 rdp->core_needs_qs = false; 3760 rdp->rcu_iw_pending = false; 3761 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 3762 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 3763 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3764 rcu_prepare_kthreads(cpu); 3765 rcu_spawn_cpu_nocb_kthread(cpu); 3766 3767 return 0; 3768 } 3769 3770 /* 3771 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 3772 */ 3773 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 3774 { 3775 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3776 3777 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 3778 } 3779 3780 /* 3781 * Near the end of the CPU-online process. Pretty much all services 3782 * enabled, and the CPU is now very much alive. 3783 */ 3784 int rcutree_online_cpu(unsigned int cpu) 3785 { 3786 unsigned long flags; 3787 struct rcu_data *rdp; 3788 struct rcu_node *rnp; 3789 3790 rdp = per_cpu_ptr(&rcu_data, cpu); 3791 rnp = rdp->mynode; 3792 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3793 rnp->ffmask |= rdp->grpmask; 3794 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3795 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 3796 return 0; /* Too early in boot for scheduler work. */ 3797 sync_sched_exp_online_cleanup(cpu); 3798 rcutree_affinity_setting(cpu, -1); 3799 3800 // Stop-machine done, so allow nohz_full to disable tick. 3801 tick_dep_clear(TICK_DEP_BIT_RCU); 3802 return 0; 3803 } 3804 3805 /* 3806 * Near the beginning of the process. The CPU is still very much alive 3807 * with pretty much all services enabled. 3808 */ 3809 int rcutree_offline_cpu(unsigned int cpu) 3810 { 3811 unsigned long flags; 3812 struct rcu_data *rdp; 3813 struct rcu_node *rnp; 3814 3815 rdp = per_cpu_ptr(&rcu_data, cpu); 3816 rnp = rdp->mynode; 3817 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3818 rnp->ffmask &= ~rdp->grpmask; 3819 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3820 3821 rcutree_affinity_setting(cpu, cpu); 3822 3823 // nohz_full CPUs need the tick for stop-machine to work quickly 3824 tick_dep_set(TICK_DEP_BIT_RCU); 3825 return 0; 3826 } 3827 3828 static DEFINE_PER_CPU(int, rcu_cpu_started); 3829 3830 /* 3831 * Mark the specified CPU as being online so that subsequent grace periods 3832 * (both expedited and normal) will wait on it. Note that this means that 3833 * incoming CPUs are not allowed to use RCU read-side critical sections 3834 * until this function is called. Failing to observe this restriction 3835 * will result in lockdep splats. 3836 * 3837 * Note that this function is special in that it is invoked directly 3838 * from the incoming CPU rather than from the cpuhp_step mechanism. 3839 * This is because this function must be invoked at a precise location. 3840 */ 3841 void rcu_cpu_starting(unsigned int cpu) 3842 { 3843 unsigned long flags; 3844 unsigned long mask; 3845 int nbits; 3846 unsigned long oldmask; 3847 struct rcu_data *rdp; 3848 struct rcu_node *rnp; 3849 3850 if (per_cpu(rcu_cpu_started, cpu)) 3851 return; 3852 3853 per_cpu(rcu_cpu_started, cpu) = 1; 3854 3855 rdp = per_cpu_ptr(&rcu_data, cpu); 3856 rnp = rdp->mynode; 3857 mask = rdp->grpmask; 3858 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3859 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 3860 oldmask = rnp->expmaskinitnext; 3861 rnp->expmaskinitnext |= mask; 3862 oldmask ^= rnp->expmaskinitnext; 3863 nbits = bitmap_weight(&oldmask, BITS_PER_LONG); 3864 /* Allow lockless access for expedited grace periods. */ 3865 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */ 3866 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 3867 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 3868 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 3869 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 3870 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ 3871 rcu_disable_urgency_upon_qs(rdp); 3872 /* Report QS -after- changing ->qsmaskinitnext! */ 3873 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 3874 } else { 3875 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3876 } 3877 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 3878 } 3879 3880 #ifdef CONFIG_HOTPLUG_CPU 3881 /* 3882 * The outgoing function has no further need of RCU, so remove it from 3883 * the rcu_node tree's ->qsmaskinitnext bit masks. 3884 * 3885 * Note that this function is special in that it is invoked directly 3886 * from the outgoing CPU rather than from the cpuhp_step mechanism. 3887 * This is because this function must be invoked at a precise location. 3888 */ 3889 void rcu_report_dead(unsigned int cpu) 3890 { 3891 unsigned long flags; 3892 unsigned long mask; 3893 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3894 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 3895 3896 /* QS for any half-done expedited grace period. */ 3897 preempt_disable(); 3898 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 3899 preempt_enable(); 3900 rcu_preempt_deferred_qs(current); 3901 3902 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 3903 mask = rdp->grpmask; 3904 raw_spin_lock(&rcu_state.ofl_lock); 3905 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 3906 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 3907 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 3908 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 3909 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 3910 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 3911 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3912 } 3913 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 3914 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3915 raw_spin_unlock(&rcu_state.ofl_lock); 3916 3917 per_cpu(rcu_cpu_started, cpu) = 0; 3918 } 3919 3920 /* 3921 * The outgoing CPU has just passed through the dying-idle state, and we 3922 * are being invoked from the CPU that was IPIed to continue the offline 3923 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 3924 */ 3925 void rcutree_migrate_callbacks(int cpu) 3926 { 3927 unsigned long flags; 3928 struct rcu_data *my_rdp; 3929 struct rcu_node *my_rnp; 3930 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3931 bool needwake; 3932 3933 if (rcu_segcblist_is_offloaded(&rdp->cblist) || 3934 rcu_segcblist_empty(&rdp->cblist)) 3935 return; /* No callbacks to migrate. */ 3936 3937 local_irq_save(flags); 3938 my_rdp = this_cpu_ptr(&rcu_data); 3939 my_rnp = my_rdp->mynode; 3940 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 3941 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies)); 3942 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 3943 /* Leverage recent GPs and set GP for new callbacks. */ 3944 needwake = rcu_advance_cbs(my_rnp, rdp) || 3945 rcu_advance_cbs(my_rnp, my_rdp); 3946 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 3947 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 3948 rcu_segcblist_disable(&rdp->cblist); 3949 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 3950 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 3951 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) { 3952 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 3953 __call_rcu_nocb_wake(my_rdp, true, flags); 3954 } else { 3955 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 3956 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); 3957 } 3958 if (needwake) 3959 rcu_gp_kthread_wake(); 3960 lockdep_assert_irqs_enabled(); 3961 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 3962 !rcu_segcblist_empty(&rdp->cblist), 3963 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 3964 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 3965 rcu_segcblist_first_cb(&rdp->cblist)); 3966 } 3967 #endif 3968 3969 /* 3970 * On non-huge systems, use expedited RCU grace periods to make suspend 3971 * and hibernation run faster. 3972 */ 3973 static int rcu_pm_notify(struct notifier_block *self, 3974 unsigned long action, void *hcpu) 3975 { 3976 switch (action) { 3977 case PM_HIBERNATION_PREPARE: 3978 case PM_SUSPEND_PREPARE: 3979 rcu_expedite_gp(); 3980 break; 3981 case PM_POST_HIBERNATION: 3982 case PM_POST_SUSPEND: 3983 rcu_unexpedite_gp(); 3984 break; 3985 default: 3986 break; 3987 } 3988 return NOTIFY_OK; 3989 } 3990 3991 /* 3992 * Spawn the kthreads that handle RCU's grace periods. 3993 */ 3994 static int __init rcu_spawn_gp_kthread(void) 3995 { 3996 unsigned long flags; 3997 int kthread_prio_in = kthread_prio; 3998 struct rcu_node *rnp; 3999 struct sched_param sp; 4000 struct task_struct *t; 4001 4002 /* Force priority into range. */ 4003 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4004 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4005 kthread_prio = 2; 4006 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4007 kthread_prio = 1; 4008 else if (kthread_prio < 0) 4009 kthread_prio = 0; 4010 else if (kthread_prio > 99) 4011 kthread_prio = 99; 4012 4013 if (kthread_prio != kthread_prio_in) 4014 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 4015 kthread_prio, kthread_prio_in); 4016 4017 rcu_scheduler_fully_active = 1; 4018 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4019 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4020 return 0; 4021 if (kthread_prio) { 4022 sp.sched_priority = kthread_prio; 4023 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4024 } 4025 rnp = rcu_get_root(); 4026 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4027 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4028 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4029 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4030 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4031 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4032 wake_up_process(t); 4033 rcu_spawn_nocb_kthreads(); 4034 rcu_spawn_boost_kthreads(); 4035 return 0; 4036 } 4037 early_initcall(rcu_spawn_gp_kthread); 4038 4039 /* 4040 * This function is invoked towards the end of the scheduler's 4041 * initialization process. Before this is called, the idle task might 4042 * contain synchronous grace-period primitives (during which time, this idle 4043 * task is booting the system, and such primitives are no-ops). After this 4044 * function is called, any synchronous grace-period primitives are run as 4045 * expedited, with the requesting task driving the grace period forward. 4046 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4047 * runtime RCU functionality. 4048 */ 4049 void rcu_scheduler_starting(void) 4050 { 4051 WARN_ON(num_online_cpus() != 1); 4052 WARN_ON(nr_context_switches() > 0); 4053 rcu_test_sync_prims(); 4054 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4055 rcu_test_sync_prims(); 4056 } 4057 4058 /* 4059 * Helper function for rcu_init() that initializes the rcu_state structure. 4060 */ 4061 static void __init rcu_init_one(void) 4062 { 4063 static const char * const buf[] = RCU_NODE_NAME_INIT; 4064 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4065 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4066 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4067 4068 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4069 int cpustride = 1; 4070 int i; 4071 int j; 4072 struct rcu_node *rnp; 4073 4074 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4075 4076 /* Silence gcc 4.8 false positive about array index out of range. */ 4077 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4078 panic("rcu_init_one: rcu_num_lvls out of range"); 4079 4080 /* Initialize the level-tracking arrays. */ 4081 4082 for (i = 1; i < rcu_num_lvls; i++) 4083 rcu_state.level[i] = 4084 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4085 rcu_init_levelspread(levelspread, num_rcu_lvl); 4086 4087 /* Initialize the elements themselves, starting from the leaves. */ 4088 4089 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4090 cpustride *= levelspread[i]; 4091 rnp = rcu_state.level[i]; 4092 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4093 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4094 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4095 &rcu_node_class[i], buf[i]); 4096 raw_spin_lock_init(&rnp->fqslock); 4097 lockdep_set_class_and_name(&rnp->fqslock, 4098 &rcu_fqs_class[i], fqs[i]); 4099 rnp->gp_seq = rcu_state.gp_seq; 4100 rnp->gp_seq_needed = rcu_state.gp_seq; 4101 rnp->completedqs = rcu_state.gp_seq; 4102 rnp->qsmask = 0; 4103 rnp->qsmaskinit = 0; 4104 rnp->grplo = j * cpustride; 4105 rnp->grphi = (j + 1) * cpustride - 1; 4106 if (rnp->grphi >= nr_cpu_ids) 4107 rnp->grphi = nr_cpu_ids - 1; 4108 if (i == 0) { 4109 rnp->grpnum = 0; 4110 rnp->grpmask = 0; 4111 rnp->parent = NULL; 4112 } else { 4113 rnp->grpnum = j % levelspread[i - 1]; 4114 rnp->grpmask = BIT(rnp->grpnum); 4115 rnp->parent = rcu_state.level[i - 1] + 4116 j / levelspread[i - 1]; 4117 } 4118 rnp->level = i; 4119 INIT_LIST_HEAD(&rnp->blkd_tasks); 4120 rcu_init_one_nocb(rnp); 4121 init_waitqueue_head(&rnp->exp_wq[0]); 4122 init_waitqueue_head(&rnp->exp_wq[1]); 4123 init_waitqueue_head(&rnp->exp_wq[2]); 4124 init_waitqueue_head(&rnp->exp_wq[3]); 4125 spin_lock_init(&rnp->exp_lock); 4126 } 4127 } 4128 4129 init_swait_queue_head(&rcu_state.gp_wq); 4130 init_swait_queue_head(&rcu_state.expedited_wq); 4131 rnp = rcu_first_leaf_node(); 4132 for_each_possible_cpu(i) { 4133 while (i > rnp->grphi) 4134 rnp++; 4135 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4136 rcu_boot_init_percpu_data(i); 4137 } 4138 } 4139 4140 /* 4141 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4142 * replace the definitions in tree.h because those are needed to size 4143 * the ->node array in the rcu_state structure. 4144 */ 4145 static void __init rcu_init_geometry(void) 4146 { 4147 ulong d; 4148 int i; 4149 int rcu_capacity[RCU_NUM_LVLS]; 4150 4151 /* 4152 * Initialize any unspecified boot parameters. 4153 * The default values of jiffies_till_first_fqs and 4154 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4155 * value, which is a function of HZ, then adding one for each 4156 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4157 */ 4158 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4159 if (jiffies_till_first_fqs == ULONG_MAX) 4160 jiffies_till_first_fqs = d; 4161 if (jiffies_till_next_fqs == ULONG_MAX) 4162 jiffies_till_next_fqs = d; 4163 adjust_jiffies_till_sched_qs(); 4164 4165 /* If the compile-time values are accurate, just leave. */ 4166 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4167 nr_cpu_ids == NR_CPUS) 4168 return; 4169 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4170 rcu_fanout_leaf, nr_cpu_ids); 4171 4172 /* 4173 * The boot-time rcu_fanout_leaf parameter must be at least two 4174 * and cannot exceed the number of bits in the rcu_node masks. 4175 * Complain and fall back to the compile-time values if this 4176 * limit is exceeded. 4177 */ 4178 if (rcu_fanout_leaf < 2 || 4179 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4180 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4181 WARN_ON(1); 4182 return; 4183 } 4184 4185 /* 4186 * Compute number of nodes that can be handled an rcu_node tree 4187 * with the given number of levels. 4188 */ 4189 rcu_capacity[0] = rcu_fanout_leaf; 4190 for (i = 1; i < RCU_NUM_LVLS; i++) 4191 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4192 4193 /* 4194 * The tree must be able to accommodate the configured number of CPUs. 4195 * If this limit is exceeded, fall back to the compile-time values. 4196 */ 4197 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4198 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4199 WARN_ON(1); 4200 return; 4201 } 4202 4203 /* Calculate the number of levels in the tree. */ 4204 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4205 } 4206 rcu_num_lvls = i + 1; 4207 4208 /* Calculate the number of rcu_nodes at each level of the tree. */ 4209 for (i = 0; i < rcu_num_lvls; i++) { 4210 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4211 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4212 } 4213 4214 /* Calculate the total number of rcu_node structures. */ 4215 rcu_num_nodes = 0; 4216 for (i = 0; i < rcu_num_lvls; i++) 4217 rcu_num_nodes += num_rcu_lvl[i]; 4218 } 4219 4220 /* 4221 * Dump out the structure of the rcu_node combining tree associated 4222 * with the rcu_state structure. 4223 */ 4224 static void __init rcu_dump_rcu_node_tree(void) 4225 { 4226 int level = 0; 4227 struct rcu_node *rnp; 4228 4229 pr_info("rcu_node tree layout dump\n"); 4230 pr_info(" "); 4231 rcu_for_each_node_breadth_first(rnp) { 4232 if (rnp->level != level) { 4233 pr_cont("\n"); 4234 pr_info(" "); 4235 level = rnp->level; 4236 } 4237 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4238 } 4239 pr_cont("\n"); 4240 } 4241 4242 struct workqueue_struct *rcu_gp_wq; 4243 struct workqueue_struct *rcu_par_gp_wq; 4244 4245 static void __init kfree_rcu_batch_init(void) 4246 { 4247 int cpu; 4248 int i; 4249 4250 for_each_possible_cpu(cpu) { 4251 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 4252 4253 spin_lock_init(&krcp->lock); 4254 for (i = 0; i < KFREE_N_BATCHES; i++) { 4255 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); 4256 krcp->krw_arr[i].krcp = krcp; 4257 } 4258 4259 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); 4260 krcp->initialized = true; 4261 } 4262 if (register_shrinker(&kfree_rcu_shrinker)) 4263 pr_err("Failed to register kfree_rcu() shrinker!\n"); 4264 } 4265 4266 void __init rcu_init(void) 4267 { 4268 int cpu; 4269 4270 rcu_early_boot_tests(); 4271 4272 kfree_rcu_batch_init(); 4273 rcu_bootup_announce(); 4274 rcu_init_geometry(); 4275 rcu_init_one(); 4276 if (dump_tree) 4277 rcu_dump_rcu_node_tree(); 4278 if (use_softirq) 4279 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4280 4281 /* 4282 * We don't need protection against CPU-hotplug here because 4283 * this is called early in boot, before either interrupts 4284 * or the scheduler are operational. 4285 */ 4286 pm_notifier(rcu_pm_notify, 0); 4287 for_each_online_cpu(cpu) { 4288 rcutree_prepare_cpu(cpu); 4289 rcu_cpu_starting(cpu); 4290 rcutree_online_cpu(cpu); 4291 } 4292 4293 /* Create workqueue for expedited GPs and for Tree SRCU. */ 4294 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 4295 WARN_ON(!rcu_gp_wq); 4296 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 4297 WARN_ON(!rcu_par_gp_wq); 4298 srcu_init(); 4299 4300 /* Fill in default value for rcutree.qovld boot parameter. */ 4301 /* -After- the rcu_node ->lock fields are initialized! */ 4302 if (qovld < 0) 4303 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4304 else 4305 qovld_calc = qovld; 4306 } 4307 4308 #include "tree_stall.h" 4309 #include "tree_exp.h" 4310 #include "tree_plugin.h" 4311