1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/percpu.h> 36 #include <linux/notifier.h> 37 #include <linux/cpu.h> 38 #include <linux/mutex.h> 39 #include <linux/time.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/wait.h> 42 #include <linux/kthread.h> 43 #include <uapi/linux/sched/types.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 #include <linux/random.h> 47 #include <linux/trace_events.h> 48 #include <linux/suspend.h> 49 #include <linux/ftrace.h> 50 #include <linux/tick.h> 51 #include <linux/sysrq.h> 52 #include <linux/kprobes.h> 53 #include <linux/gfp.h> 54 #include <linux/oom.h> 55 #include <linux/smpboot.h> 56 #include <linux/jiffies.h> 57 #include <linux/slab.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/sched/clock.h> 60 #include <linux/vmalloc.h> 61 #include <linux/mm.h> 62 #include <linux/kasan.h> 63 #include "../time/tick-internal.h" 64 65 #include "tree.h" 66 #include "rcu.h" 67 68 #ifdef MODULE_PARAM_PREFIX 69 #undef MODULE_PARAM_PREFIX 70 #endif 71 #define MODULE_PARAM_PREFIX "rcutree." 72 73 /* Data structures. */ 74 75 /* 76 * Steal a bit from the bottom of ->dynticks for idle entry/exit 77 * control. Initially this is for TLB flushing. 78 */ 79 #define RCU_DYNTICK_CTRL_MASK 0x1 80 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 81 82 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 83 .dynticks_nesting = 1, 84 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 85 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 86 }; 87 static struct rcu_state rcu_state = { 88 .level = { &rcu_state.node[0] }, 89 .gp_state = RCU_GP_IDLE, 90 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 91 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 92 .name = RCU_NAME, 93 .abbr = RCU_ABBR, 94 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 95 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 96 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 97 }; 98 99 /* Dump rcu_node combining tree at boot to verify correct setup. */ 100 static bool dump_tree; 101 module_param(dump_tree, bool, 0444); 102 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 103 static bool use_softirq = true; 104 module_param(use_softirq, bool, 0444); 105 /* Control rcu_node-tree auto-balancing at boot time. */ 106 static bool rcu_fanout_exact; 107 module_param(rcu_fanout_exact, bool, 0444); 108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 110 module_param(rcu_fanout_leaf, int, 0444); 111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 112 /* Number of rcu_nodes at specified level. */ 113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 115 116 /* 117 * The rcu_scheduler_active variable is initialized to the value 118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 120 * RCU can assume that there is but one task, allowing RCU to (for example) 121 * optimize synchronize_rcu() to a simple barrier(). When this variable 122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 123 * to detect real grace periods. This variable is also used to suppress 124 * boot-time false positives from lockdep-RCU error checking. Finally, it 125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 126 * is fully initialized, including all of its kthreads having been spawned. 127 */ 128 int rcu_scheduler_active __read_mostly; 129 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 130 131 /* 132 * The rcu_scheduler_fully_active variable transitions from zero to one 133 * during the early_initcall() processing, which is after the scheduler 134 * is capable of creating new tasks. So RCU processing (for example, 135 * creating tasks for RCU priority boosting) must be delayed until after 136 * rcu_scheduler_fully_active transitions from zero to one. We also 137 * currently delay invocation of any RCU callbacks until after this point. 138 * 139 * It might later prove better for people registering RCU callbacks during 140 * early boot to take responsibility for these callbacks, but one step at 141 * a time. 142 */ 143 static int rcu_scheduler_fully_active __read_mostly; 144 145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 146 unsigned long gps, unsigned long flags); 147 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 148 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 150 static void invoke_rcu_core(void); 151 static void rcu_report_exp_rdp(struct rcu_data *rdp); 152 static void sync_sched_exp_online_cleanup(int cpu); 153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 154 155 /* rcuc/rcub kthread realtime priority */ 156 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 157 module_param(kthread_prio, int, 0444); 158 159 /* Delay in jiffies for grace-period initialization delays, debug only. */ 160 161 static int gp_preinit_delay; 162 module_param(gp_preinit_delay, int, 0444); 163 static int gp_init_delay; 164 module_param(gp_init_delay, int, 0444); 165 static int gp_cleanup_delay; 166 module_param(gp_cleanup_delay, int, 0444); 167 168 // Add delay to rcu_read_unlock() for strict grace periods. 169 static int rcu_unlock_delay; 170 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD 171 module_param(rcu_unlock_delay, int, 0444); 172 #endif 173 174 /* 175 * This rcu parameter is runtime-read-only. It reflects 176 * a minimum allowed number of objects which can be cached 177 * per-CPU. Object size is equal to one page. This value 178 * can be changed at boot time. 179 */ 180 static int rcu_min_cached_objs = 5; 181 module_param(rcu_min_cached_objs, int, 0444); 182 183 /* Retrieve RCU kthreads priority for rcutorture */ 184 int rcu_get_gp_kthreads_prio(void) 185 { 186 return kthread_prio; 187 } 188 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 189 190 /* 191 * Number of grace periods between delays, normalized by the duration of 192 * the delay. The longer the delay, the more the grace periods between 193 * each delay. The reason for this normalization is that it means that, 194 * for non-zero delays, the overall slowdown of grace periods is constant 195 * regardless of the duration of the delay. This arrangement balances 196 * the need for long delays to increase some race probabilities with the 197 * need for fast grace periods to increase other race probabilities. 198 */ 199 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 200 201 /* 202 * Compute the mask of online CPUs for the specified rcu_node structure. 203 * This will not be stable unless the rcu_node structure's ->lock is 204 * held, but the bit corresponding to the current CPU will be stable 205 * in most contexts. 206 */ 207 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 208 { 209 return READ_ONCE(rnp->qsmaskinitnext); 210 } 211 212 /* 213 * Return true if an RCU grace period is in progress. The READ_ONCE()s 214 * permit this function to be invoked without holding the root rcu_node 215 * structure's ->lock, but of course results can be subject to change. 216 */ 217 static int rcu_gp_in_progress(void) 218 { 219 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 220 } 221 222 /* 223 * Return the number of callbacks queued on the specified CPU. 224 * Handles both the nocbs and normal cases. 225 */ 226 static long rcu_get_n_cbs_cpu(int cpu) 227 { 228 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 229 230 if (rcu_segcblist_is_enabled(&rdp->cblist)) 231 return rcu_segcblist_n_cbs(&rdp->cblist); 232 return 0; 233 } 234 235 void rcu_softirq_qs(void) 236 { 237 rcu_qs(); 238 rcu_preempt_deferred_qs(current); 239 } 240 241 /* 242 * Record entry into an extended quiescent state. This is only to be 243 * called when not already in an extended quiescent state, that is, 244 * RCU is watching prior to the call to this function and is no longer 245 * watching upon return. 246 */ 247 static noinstr void rcu_dynticks_eqs_enter(void) 248 { 249 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 250 int seq; 251 252 /* 253 * CPUs seeing atomic_add_return() must see prior RCU read-side 254 * critical sections, and we also must force ordering with the 255 * next idle sojourn. 256 */ 257 rcu_dynticks_task_trace_enter(); // Before ->dynticks update! 258 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 259 // RCU is no longer watching. Better be in extended quiescent state! 260 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 261 (seq & RCU_DYNTICK_CTRL_CTR)); 262 /* Better not have special action (TLB flush) pending! */ 263 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 264 (seq & RCU_DYNTICK_CTRL_MASK)); 265 } 266 267 /* 268 * Record exit from an extended quiescent state. This is only to be 269 * called from an extended quiescent state, that is, RCU is not watching 270 * prior to the call to this function and is watching upon return. 271 */ 272 static noinstr void rcu_dynticks_eqs_exit(void) 273 { 274 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 275 int seq; 276 277 /* 278 * CPUs seeing atomic_add_return() must see prior idle sojourns, 279 * and we also must force ordering with the next RCU read-side 280 * critical section. 281 */ 282 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 283 // RCU is now watching. Better not be in an extended quiescent state! 284 rcu_dynticks_task_trace_exit(); // After ->dynticks update! 285 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 286 !(seq & RCU_DYNTICK_CTRL_CTR)); 287 if (seq & RCU_DYNTICK_CTRL_MASK) { 288 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); 289 smp_mb__after_atomic(); /* _exit after clearing mask. */ 290 } 291 } 292 293 /* 294 * Reset the current CPU's ->dynticks counter to indicate that the 295 * newly onlined CPU is no longer in an extended quiescent state. 296 * This will either leave the counter unchanged, or increment it 297 * to the next non-quiescent value. 298 * 299 * The non-atomic test/increment sequence works because the upper bits 300 * of the ->dynticks counter are manipulated only by the corresponding CPU, 301 * or when the corresponding CPU is offline. 302 */ 303 static void rcu_dynticks_eqs_online(void) 304 { 305 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 306 307 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) 308 return; 309 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 310 } 311 312 /* 313 * Is the current CPU in an extended quiescent state? 314 * 315 * No ordering, as we are sampling CPU-local information. 316 */ 317 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) 318 { 319 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 320 321 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); 322 } 323 324 /* 325 * Snapshot the ->dynticks counter with full ordering so as to allow 326 * stable comparison of this counter with past and future snapshots. 327 */ 328 static int rcu_dynticks_snap(struct rcu_data *rdp) 329 { 330 int snap = atomic_add_return(0, &rdp->dynticks); 331 332 return snap & ~RCU_DYNTICK_CTRL_MASK; 333 } 334 335 /* 336 * Return true if the snapshot returned from rcu_dynticks_snap() 337 * indicates that RCU is in an extended quiescent state. 338 */ 339 static bool rcu_dynticks_in_eqs(int snap) 340 { 341 return !(snap & RCU_DYNTICK_CTRL_CTR); 342 } 343 344 /* Return true if the specified CPU is currently idle from an RCU viewpoint. */ 345 bool rcu_is_idle_cpu(int cpu) 346 { 347 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 348 349 return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); 350 } 351 352 /* 353 * Return true if the CPU corresponding to the specified rcu_data 354 * structure has spent some time in an extended quiescent state since 355 * rcu_dynticks_snap() returned the specified snapshot. 356 */ 357 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 358 { 359 return snap != rcu_dynticks_snap(rdp); 360 } 361 362 /* 363 * Return true if the referenced integer is zero while the specified 364 * CPU remains within a single extended quiescent state. 365 */ 366 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) 367 { 368 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 369 int snap; 370 371 // If not quiescent, force back to earlier extended quiescent state. 372 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK | 373 RCU_DYNTICK_CTRL_CTR); 374 375 smp_rmb(); // Order ->dynticks and *vp reads. 376 if (READ_ONCE(*vp)) 377 return false; // Non-zero, so report failure; 378 smp_rmb(); // Order *vp read and ->dynticks re-read. 379 380 // If still in the same extended quiescent state, we are good! 381 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK); 382 } 383 384 /* 385 * Set the special (bottom) bit of the specified CPU so that it 386 * will take special action (such as flushing its TLB) on the 387 * next exit from an extended quiescent state. Returns true if 388 * the bit was successfully set, or false if the CPU was not in 389 * an extended quiescent state. 390 */ 391 bool rcu_eqs_special_set(int cpu) 392 { 393 int old; 394 int new; 395 int new_old; 396 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 397 398 new_old = atomic_read(&rdp->dynticks); 399 do { 400 old = new_old; 401 if (old & RCU_DYNTICK_CTRL_CTR) 402 return false; 403 new = old | RCU_DYNTICK_CTRL_MASK; 404 new_old = atomic_cmpxchg(&rdp->dynticks, old, new); 405 } while (new_old != old); 406 return true; 407 } 408 409 /* 410 * Let the RCU core know that this CPU has gone through the scheduler, 411 * which is a quiescent state. This is called when the need for a 412 * quiescent state is urgent, so we burn an atomic operation and full 413 * memory barriers to let the RCU core know about it, regardless of what 414 * this CPU might (or might not) do in the near future. 415 * 416 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 417 * 418 * The caller must have disabled interrupts and must not be idle. 419 */ 420 notrace void rcu_momentary_dyntick_idle(void) 421 { 422 int special; 423 424 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 425 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 426 &this_cpu_ptr(&rcu_data)->dynticks); 427 /* It is illegal to call this from idle state. */ 428 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 429 rcu_preempt_deferred_qs(current); 430 } 431 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 432 433 /** 434 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 435 * 436 * If the current CPU is idle and running at a first-level (not nested) 437 * interrupt, or directly, from idle, return true. 438 * 439 * The caller must have at least disabled IRQs. 440 */ 441 static int rcu_is_cpu_rrupt_from_idle(void) 442 { 443 long nesting; 444 445 /* 446 * Usually called from the tick; but also used from smp_function_call() 447 * for expedited grace periods. This latter can result in running from 448 * the idle task, instead of an actual IPI. 449 */ 450 lockdep_assert_irqs_disabled(); 451 452 /* Check for counter underflows */ 453 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, 454 "RCU dynticks_nesting counter underflow!"); 455 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, 456 "RCU dynticks_nmi_nesting counter underflow/zero!"); 457 458 /* Are we at first interrupt nesting level? */ 459 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting); 460 if (nesting > 1) 461 return false; 462 463 /* 464 * If we're not in an interrupt, we must be in the idle task! 465 */ 466 WARN_ON_ONCE(!nesting && !is_idle_task(current)); 467 468 /* Does CPU appear to be idle from an RCU standpoint? */ 469 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; 470 } 471 472 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10) 473 // Maximum callbacks per rcu_do_batch ... 474 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood. 475 static long blimit = DEFAULT_RCU_BLIMIT; 476 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit. 477 static long qhimark = DEFAULT_RCU_QHIMARK; 478 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit. 479 static long qlowmark = DEFAULT_RCU_QLOMARK; 480 #define DEFAULT_RCU_QOVLD_MULT 2 481 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 482 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS. 483 static long qovld_calc = -1; // No pre-initialization lock acquisitions! 484 485 module_param(blimit, long, 0444); 486 module_param(qhimark, long, 0444); 487 module_param(qlowmark, long, 0444); 488 module_param(qovld, long, 0444); 489 490 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX; 491 static ulong jiffies_till_next_fqs = ULONG_MAX; 492 static bool rcu_kick_kthreads; 493 static int rcu_divisor = 7; 494 module_param(rcu_divisor, int, 0644); 495 496 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 497 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 498 module_param(rcu_resched_ns, long, 0644); 499 500 /* 501 * How long the grace period must be before we start recruiting 502 * quiescent-state help from rcu_note_context_switch(). 503 */ 504 static ulong jiffies_till_sched_qs = ULONG_MAX; 505 module_param(jiffies_till_sched_qs, ulong, 0444); 506 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 507 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 508 509 /* 510 * Make sure that we give the grace-period kthread time to detect any 511 * idle CPUs before taking active measures to force quiescent states. 512 * However, don't go below 100 milliseconds, adjusted upwards for really 513 * large systems. 514 */ 515 static void adjust_jiffies_till_sched_qs(void) 516 { 517 unsigned long j; 518 519 /* If jiffies_till_sched_qs was specified, respect the request. */ 520 if (jiffies_till_sched_qs != ULONG_MAX) { 521 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 522 return; 523 } 524 /* Otherwise, set to third fqs scan, but bound below on large system. */ 525 j = READ_ONCE(jiffies_till_first_fqs) + 526 2 * READ_ONCE(jiffies_till_next_fqs); 527 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 528 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 529 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 530 WRITE_ONCE(jiffies_to_sched_qs, j); 531 } 532 533 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 534 { 535 ulong j; 536 int ret = kstrtoul(val, 0, &j); 537 538 if (!ret) { 539 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 540 adjust_jiffies_till_sched_qs(); 541 } 542 return ret; 543 } 544 545 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 546 { 547 ulong j; 548 int ret = kstrtoul(val, 0, &j); 549 550 if (!ret) { 551 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 552 adjust_jiffies_till_sched_qs(); 553 } 554 return ret; 555 } 556 557 static const struct kernel_param_ops first_fqs_jiffies_ops = { 558 .set = param_set_first_fqs_jiffies, 559 .get = param_get_ulong, 560 }; 561 562 static const struct kernel_param_ops next_fqs_jiffies_ops = { 563 .set = param_set_next_fqs_jiffies, 564 .get = param_get_ulong, 565 }; 566 567 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 568 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 569 module_param(rcu_kick_kthreads, bool, 0644); 570 571 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 572 static int rcu_pending(int user); 573 574 /* 575 * Return the number of RCU GPs completed thus far for debug & stats. 576 */ 577 unsigned long rcu_get_gp_seq(void) 578 { 579 return READ_ONCE(rcu_state.gp_seq); 580 } 581 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 582 583 /* 584 * Return the number of RCU expedited batches completed thus far for 585 * debug & stats. Odd numbers mean that a batch is in progress, even 586 * numbers mean idle. The value returned will thus be roughly double 587 * the cumulative batches since boot. 588 */ 589 unsigned long rcu_exp_batches_completed(void) 590 { 591 return rcu_state.expedited_sequence; 592 } 593 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 594 595 /* 596 * Return the root node of the rcu_state structure. 597 */ 598 static struct rcu_node *rcu_get_root(void) 599 { 600 return &rcu_state.node[0]; 601 } 602 603 /* 604 * Send along grace-period-related data for rcutorture diagnostics. 605 */ 606 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 607 unsigned long *gp_seq) 608 { 609 switch (test_type) { 610 case RCU_FLAVOR: 611 *flags = READ_ONCE(rcu_state.gp_flags); 612 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 613 break; 614 default: 615 break; 616 } 617 } 618 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 619 620 /* 621 * Enter an RCU extended quiescent state, which can be either the 622 * idle loop or adaptive-tickless usermode execution. 623 * 624 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 625 * the possibility of usermode upcalls having messed up our count 626 * of interrupt nesting level during the prior busy period. 627 */ 628 static noinstr void rcu_eqs_enter(bool user) 629 { 630 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 631 632 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); 633 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); 634 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 635 rdp->dynticks_nesting == 0); 636 if (rdp->dynticks_nesting != 1) { 637 // RCU will still be watching, so just do accounting and leave. 638 rdp->dynticks_nesting--; 639 return; 640 } 641 642 lockdep_assert_irqs_disabled(); 643 instrumentation_begin(); 644 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); 645 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 646 rdp = this_cpu_ptr(&rcu_data); 647 do_nocb_deferred_wakeup(rdp); 648 rcu_prepare_for_idle(); 649 rcu_preempt_deferred_qs(current); 650 651 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 652 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 653 654 instrumentation_end(); 655 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 656 // RCU is watching here ... 657 rcu_dynticks_eqs_enter(); 658 // ... but is no longer watching here. 659 rcu_dynticks_task_enter(); 660 } 661 662 /** 663 * rcu_idle_enter - inform RCU that current CPU is entering idle 664 * 665 * Enter idle mode, in other words, -leave- the mode in which RCU 666 * read-side critical sections can occur. (Though RCU read-side 667 * critical sections can occur in irq handlers in idle, a possibility 668 * handled by irq_enter() and irq_exit().) 669 * 670 * If you add or remove a call to rcu_idle_enter(), be sure to test with 671 * CONFIG_RCU_EQS_DEBUG=y. 672 */ 673 void rcu_idle_enter(void) 674 { 675 lockdep_assert_irqs_disabled(); 676 rcu_eqs_enter(false); 677 } 678 EXPORT_SYMBOL_GPL(rcu_idle_enter); 679 680 #ifdef CONFIG_NO_HZ_FULL 681 /** 682 * rcu_user_enter - inform RCU that we are resuming userspace. 683 * 684 * Enter RCU idle mode right before resuming userspace. No use of RCU 685 * is permitted between this call and rcu_user_exit(). This way the 686 * CPU doesn't need to maintain the tick for RCU maintenance purposes 687 * when the CPU runs in userspace. 688 * 689 * If you add or remove a call to rcu_user_enter(), be sure to test with 690 * CONFIG_RCU_EQS_DEBUG=y. 691 */ 692 noinstr void rcu_user_enter(void) 693 { 694 lockdep_assert_irqs_disabled(); 695 rcu_eqs_enter(true); 696 } 697 #endif /* CONFIG_NO_HZ_FULL */ 698 699 /** 700 * rcu_nmi_exit - inform RCU of exit from NMI context 701 * 702 * If we are returning from the outermost NMI handler that interrupted an 703 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting 704 * to let the RCU grace-period handling know that the CPU is back to 705 * being RCU-idle. 706 * 707 * If you add or remove a call to rcu_nmi_exit(), be sure to test 708 * with CONFIG_RCU_EQS_DEBUG=y. 709 */ 710 noinstr void rcu_nmi_exit(void) 711 { 712 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 713 714 instrumentation_begin(); 715 /* 716 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 717 * (We are exiting an NMI handler, so RCU better be paying attention 718 * to us!) 719 */ 720 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); 721 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 722 723 /* 724 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 725 * leave it in non-RCU-idle state. 726 */ 727 if (rdp->dynticks_nmi_nesting != 1) { 728 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, 729 atomic_read(&rdp->dynticks)); 730 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ 731 rdp->dynticks_nmi_nesting - 2); 732 instrumentation_end(); 733 return; 734 } 735 736 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 737 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); 738 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 739 740 if (!in_nmi()) 741 rcu_prepare_for_idle(); 742 743 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 744 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 745 instrumentation_end(); 746 747 // RCU is watching here ... 748 rcu_dynticks_eqs_enter(); 749 // ... but is no longer watching here. 750 751 if (!in_nmi()) 752 rcu_dynticks_task_enter(); 753 } 754 755 /** 756 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 757 * 758 * Exit from an interrupt handler, which might possibly result in entering 759 * idle mode, in other words, leaving the mode in which read-side critical 760 * sections can occur. The caller must have disabled interrupts. 761 * 762 * This code assumes that the idle loop never does anything that might 763 * result in unbalanced calls to irq_enter() and irq_exit(). If your 764 * architecture's idle loop violates this assumption, RCU will give you what 765 * you deserve, good and hard. But very infrequently and irreproducibly. 766 * 767 * Use things like work queues to work around this limitation. 768 * 769 * You have been warned. 770 * 771 * If you add or remove a call to rcu_irq_exit(), be sure to test with 772 * CONFIG_RCU_EQS_DEBUG=y. 773 */ 774 void noinstr rcu_irq_exit(void) 775 { 776 lockdep_assert_irqs_disabled(); 777 rcu_nmi_exit(); 778 } 779 780 /** 781 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq 782 * towards in kernel preemption 783 * 784 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe 785 * from RCU point of view. Invoked from return from interrupt before kernel 786 * preemption. 787 */ 788 void rcu_irq_exit_preempt(void) 789 { 790 lockdep_assert_irqs_disabled(); 791 rcu_nmi_exit(); 792 793 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 794 "RCU dynticks_nesting counter underflow/zero!"); 795 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 796 DYNTICK_IRQ_NONIDLE, 797 "Bad RCU dynticks_nmi_nesting counter\n"); 798 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 799 "RCU in extended quiescent state!"); 800 } 801 802 #ifdef CONFIG_PROVE_RCU 803 /** 804 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 805 */ 806 void rcu_irq_exit_check_preempt(void) 807 { 808 lockdep_assert_irqs_disabled(); 809 810 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 811 "RCU dynticks_nesting counter underflow/zero!"); 812 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 813 DYNTICK_IRQ_NONIDLE, 814 "Bad RCU dynticks_nmi_nesting counter\n"); 815 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 816 "RCU in extended quiescent state!"); 817 } 818 #endif /* #ifdef CONFIG_PROVE_RCU */ 819 820 /* 821 * Wrapper for rcu_irq_exit() where interrupts are enabled. 822 * 823 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 824 * with CONFIG_RCU_EQS_DEBUG=y. 825 */ 826 void rcu_irq_exit_irqson(void) 827 { 828 unsigned long flags; 829 830 local_irq_save(flags); 831 rcu_irq_exit(); 832 local_irq_restore(flags); 833 } 834 835 /* 836 * Exit an RCU extended quiescent state, which can be either the 837 * idle loop or adaptive-tickless usermode execution. 838 * 839 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 840 * allow for the possibility of usermode upcalls messing up our count of 841 * interrupt nesting level during the busy period that is just now starting. 842 */ 843 static void noinstr rcu_eqs_exit(bool user) 844 { 845 struct rcu_data *rdp; 846 long oldval; 847 848 lockdep_assert_irqs_disabled(); 849 rdp = this_cpu_ptr(&rcu_data); 850 oldval = rdp->dynticks_nesting; 851 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 852 if (oldval) { 853 // RCU was already watching, so just do accounting and leave. 854 rdp->dynticks_nesting++; 855 return; 856 } 857 rcu_dynticks_task_exit(); 858 // RCU is not watching here ... 859 rcu_dynticks_eqs_exit(); 860 // ... but is watching here. 861 instrumentation_begin(); 862 863 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 864 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 865 866 rcu_cleanup_after_idle(); 867 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); 868 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 869 WRITE_ONCE(rdp->dynticks_nesting, 1); 870 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); 871 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 872 instrumentation_end(); 873 } 874 875 /** 876 * rcu_idle_exit - inform RCU that current CPU is leaving idle 877 * 878 * Exit idle mode, in other words, -enter- the mode in which RCU 879 * read-side critical sections can occur. 880 * 881 * If you add or remove a call to rcu_idle_exit(), be sure to test with 882 * CONFIG_RCU_EQS_DEBUG=y. 883 */ 884 void rcu_idle_exit(void) 885 { 886 unsigned long flags; 887 888 local_irq_save(flags); 889 rcu_eqs_exit(false); 890 local_irq_restore(flags); 891 } 892 EXPORT_SYMBOL_GPL(rcu_idle_exit); 893 894 #ifdef CONFIG_NO_HZ_FULL 895 /** 896 * rcu_user_exit - inform RCU that we are exiting userspace. 897 * 898 * Exit RCU idle mode while entering the kernel because it can 899 * run a RCU read side critical section anytime. 900 * 901 * If you add or remove a call to rcu_user_exit(), be sure to test with 902 * CONFIG_RCU_EQS_DEBUG=y. 903 */ 904 void noinstr rcu_user_exit(void) 905 { 906 rcu_eqs_exit(1); 907 } 908 909 /** 910 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 911 * 912 * The scheduler tick is not normally enabled when CPUs enter the kernel 913 * from nohz_full userspace execution. After all, nohz_full userspace 914 * execution is an RCU quiescent state and the time executing in the kernel 915 * is quite short. Except of course when it isn't. And it is not hard to 916 * cause a large system to spend tens of seconds or even minutes looping 917 * in the kernel, which can cause a number of problems, include RCU CPU 918 * stall warnings. 919 * 920 * Therefore, if a nohz_full CPU fails to report a quiescent state 921 * in a timely manner, the RCU grace-period kthread sets that CPU's 922 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 923 * exception will invoke this function, which will turn on the scheduler 924 * tick, which will enable RCU to detect that CPU's quiescent states, 925 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 926 * The tick will be disabled once a quiescent state is reported for 927 * this CPU. 928 * 929 * Of course, in carefully tuned systems, there might never be an 930 * interrupt or exception. In that case, the RCU grace-period kthread 931 * will eventually cause one to happen. However, in less carefully 932 * controlled environments, this function allows RCU to get what it 933 * needs without creating otherwise useless interruptions. 934 */ 935 void __rcu_irq_enter_check_tick(void) 936 { 937 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 938 939 // If we're here from NMI there's nothing to do. 940 if (in_nmi()) 941 return; 942 943 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 944 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 945 946 if (!tick_nohz_full_cpu(rdp->cpu) || 947 !READ_ONCE(rdp->rcu_urgent_qs) || 948 READ_ONCE(rdp->rcu_forced_tick)) { 949 // RCU doesn't need nohz_full help from this CPU, or it is 950 // already getting that help. 951 return; 952 } 953 954 // We get here only when not in an extended quiescent state and 955 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 956 // already watching and (2) The fact that we are in an interrupt 957 // handler and that the rcu_node lock is an irq-disabled lock 958 // prevents self-deadlock. So we can safely recheck under the lock. 959 // Note that the nohz_full state currently cannot change. 960 raw_spin_lock_rcu_node(rdp->mynode); 961 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { 962 // A nohz_full CPU is in the kernel and RCU needs a 963 // quiescent state. Turn on the tick! 964 WRITE_ONCE(rdp->rcu_forced_tick, true); 965 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 966 } 967 raw_spin_unlock_rcu_node(rdp->mynode); 968 } 969 #endif /* CONFIG_NO_HZ_FULL */ 970 971 /** 972 * rcu_nmi_enter - inform RCU of entry to NMI context 973 * 974 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and 975 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know 976 * that the CPU is active. This implementation permits nested NMIs, as 977 * long as the nesting level does not overflow an int. (You will probably 978 * run out of stack space first.) 979 * 980 * If you add or remove a call to rcu_nmi_enter(), be sure to test 981 * with CONFIG_RCU_EQS_DEBUG=y. 982 */ 983 noinstr void rcu_nmi_enter(void) 984 { 985 long incby = 2; 986 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 987 988 /* Complain about underflow. */ 989 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); 990 991 /* 992 * If idle from RCU viewpoint, atomically increment ->dynticks 993 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 994 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 995 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 996 * to be in the outermost NMI handler that interrupted an RCU-idle 997 * period (observation due to Andy Lutomirski). 998 */ 999 if (rcu_dynticks_curr_cpu_in_eqs()) { 1000 1001 if (!in_nmi()) 1002 rcu_dynticks_task_exit(); 1003 1004 // RCU is not watching here ... 1005 rcu_dynticks_eqs_exit(); 1006 // ... but is watching here. 1007 1008 if (!in_nmi()) { 1009 instrumentation_begin(); 1010 rcu_cleanup_after_idle(); 1011 instrumentation_end(); 1012 } 1013 1014 instrumentation_begin(); 1015 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() 1016 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); 1017 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 1018 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 1019 1020 incby = 1; 1021 } else if (!in_nmi()) { 1022 instrumentation_begin(); 1023 rcu_irq_enter_check_tick(); 1024 instrumentation_end(); 1025 } else { 1026 instrumentation_begin(); 1027 } 1028 1029 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 1030 rdp->dynticks_nmi_nesting, 1031 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); 1032 instrumentation_end(); 1033 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ 1034 rdp->dynticks_nmi_nesting + incby); 1035 barrier(); 1036 } 1037 1038 /** 1039 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 1040 * 1041 * Enter an interrupt handler, which might possibly result in exiting 1042 * idle mode, in other words, entering the mode in which read-side critical 1043 * sections can occur. The caller must have disabled interrupts. 1044 * 1045 * Note that the Linux kernel is fully capable of entering an interrupt 1046 * handler that it never exits, for example when doing upcalls to user mode! 1047 * This code assumes that the idle loop never does upcalls to user mode. 1048 * If your architecture's idle loop does do upcalls to user mode (or does 1049 * anything else that results in unbalanced calls to the irq_enter() and 1050 * irq_exit() functions), RCU will give you what you deserve, good and hard. 1051 * But very infrequently and irreproducibly. 1052 * 1053 * Use things like work queues to work around this limitation. 1054 * 1055 * You have been warned. 1056 * 1057 * If you add or remove a call to rcu_irq_enter(), be sure to test with 1058 * CONFIG_RCU_EQS_DEBUG=y. 1059 */ 1060 noinstr void rcu_irq_enter(void) 1061 { 1062 lockdep_assert_irqs_disabled(); 1063 rcu_nmi_enter(); 1064 } 1065 1066 /* 1067 * Wrapper for rcu_irq_enter() where interrupts are enabled. 1068 * 1069 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 1070 * with CONFIG_RCU_EQS_DEBUG=y. 1071 */ 1072 void rcu_irq_enter_irqson(void) 1073 { 1074 unsigned long flags; 1075 1076 local_irq_save(flags); 1077 rcu_irq_enter(); 1078 local_irq_restore(flags); 1079 } 1080 1081 /* 1082 * If any sort of urgency was applied to the current CPU (for example, 1083 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 1084 * to get to a quiescent state, disable it. 1085 */ 1086 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 1087 { 1088 raw_lockdep_assert_held_rcu_node(rdp->mynode); 1089 WRITE_ONCE(rdp->rcu_urgent_qs, false); 1090 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 1091 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 1092 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 1093 WRITE_ONCE(rdp->rcu_forced_tick, false); 1094 } 1095 } 1096 1097 /** 1098 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 1099 * 1100 * Return true if RCU is watching the running CPU, which means that this 1101 * CPU can safely enter RCU read-side critical sections. In other words, 1102 * if the current CPU is not in its idle loop or is in an interrupt or 1103 * NMI handler, return true. 1104 * 1105 * Make notrace because it can be called by the internal functions of 1106 * ftrace, and making this notrace removes unnecessary recursion calls. 1107 */ 1108 notrace bool rcu_is_watching(void) 1109 { 1110 bool ret; 1111 1112 preempt_disable_notrace(); 1113 ret = !rcu_dynticks_curr_cpu_in_eqs(); 1114 preempt_enable_notrace(); 1115 return ret; 1116 } 1117 EXPORT_SYMBOL_GPL(rcu_is_watching); 1118 1119 /* 1120 * If a holdout task is actually running, request an urgent quiescent 1121 * state from its CPU. This is unsynchronized, so migrations can cause 1122 * the request to go to the wrong CPU. Which is OK, all that will happen 1123 * is that the CPU's next context switch will be a bit slower and next 1124 * time around this task will generate another request. 1125 */ 1126 void rcu_request_urgent_qs_task(struct task_struct *t) 1127 { 1128 int cpu; 1129 1130 barrier(); 1131 cpu = task_cpu(t); 1132 if (!task_curr(t)) 1133 return; /* This task is not running on that CPU. */ 1134 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 1135 } 1136 1137 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1138 1139 /* 1140 * Is the current CPU online as far as RCU is concerned? 1141 * 1142 * Disable preemption to avoid false positives that could otherwise 1143 * happen due to the current CPU number being sampled, this task being 1144 * preempted, its old CPU being taken offline, resuming on some other CPU, 1145 * then determining that its old CPU is now offline. 1146 * 1147 * Disable checking if in an NMI handler because we cannot safely 1148 * report errors from NMI handlers anyway. In addition, it is OK to use 1149 * RCU on an offline processor during initial boot, hence the check for 1150 * rcu_scheduler_fully_active. 1151 */ 1152 bool rcu_lockdep_current_cpu_online(void) 1153 { 1154 struct rcu_data *rdp; 1155 struct rcu_node *rnp; 1156 bool ret = false; 1157 1158 if (in_nmi() || !rcu_scheduler_fully_active) 1159 return true; 1160 preempt_disable_notrace(); 1161 rdp = this_cpu_ptr(&rcu_data); 1162 rnp = rdp->mynode; 1163 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1) 1164 ret = true; 1165 preempt_enable_notrace(); 1166 return ret; 1167 } 1168 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1169 1170 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 1171 1172 /* 1173 * We are reporting a quiescent state on behalf of some other CPU, so 1174 * it is our responsibility to check for and handle potential overflow 1175 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 1176 * After all, the CPU might be in deep idle state, and thus executing no 1177 * code whatsoever. 1178 */ 1179 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1180 { 1181 raw_lockdep_assert_held_rcu_node(rnp); 1182 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 1183 rnp->gp_seq)) 1184 WRITE_ONCE(rdp->gpwrap, true); 1185 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 1186 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 1187 } 1188 1189 /* 1190 * Snapshot the specified CPU's dynticks counter so that we can later 1191 * credit them with an implicit quiescent state. Return 1 if this CPU 1192 * is in dynticks idle mode, which is an extended quiescent state. 1193 */ 1194 static int dyntick_save_progress_counter(struct rcu_data *rdp) 1195 { 1196 rdp->dynticks_snap = rcu_dynticks_snap(rdp); 1197 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1198 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1199 rcu_gpnum_ovf(rdp->mynode, rdp); 1200 return 1; 1201 } 1202 return 0; 1203 } 1204 1205 /* 1206 * Return true if the specified CPU has passed through a quiescent 1207 * state by virtue of being in or having passed through an dynticks 1208 * idle state since the last call to dyntick_save_progress_counter() 1209 * for this same CPU, or by virtue of having been offline. 1210 */ 1211 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 1212 { 1213 unsigned long jtsq; 1214 bool *rnhqp; 1215 bool *ruqp; 1216 struct rcu_node *rnp = rdp->mynode; 1217 1218 /* 1219 * If the CPU passed through or entered a dynticks idle phase with 1220 * no active irq/NMI handlers, then we can safely pretend that the CPU 1221 * already acknowledged the request to pass through a quiescent 1222 * state. Either way, that CPU cannot possibly be in an RCU 1223 * read-side critical section that started before the beginning 1224 * of the current RCU grace period. 1225 */ 1226 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 1227 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1228 rcu_gpnum_ovf(rnp, rdp); 1229 return 1; 1230 } 1231 1232 /* 1233 * Complain if a CPU that is considered to be offline from RCU's 1234 * perspective has not yet reported a quiescent state. After all, 1235 * the offline CPU should have reported a quiescent state during 1236 * the CPU-offline process, or, failing that, by rcu_gp_init() 1237 * if it ran concurrently with either the CPU going offline or the 1238 * last task on a leaf rcu_node structure exiting its RCU read-side 1239 * critical section while all CPUs corresponding to that structure 1240 * are offline. This added warning detects bugs in any of these 1241 * code paths. 1242 * 1243 * The rcu_node structure's ->lock is held here, which excludes 1244 * the relevant portions the CPU-hotplug code, the grace-period 1245 * initialization code, and the rcu_read_unlock() code paths. 1246 * 1247 * For more detail, please refer to the "Hotplug CPU" section 1248 * of RCU's Requirements documentation. 1249 */ 1250 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) { 1251 bool onl; 1252 struct rcu_node *rnp1; 1253 1254 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 1255 __func__, rnp->grplo, rnp->grphi, rnp->level, 1256 (long)rnp->gp_seq, (long)rnp->completedqs); 1257 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 1258 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 1259 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 1260 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 1261 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 1262 __func__, rdp->cpu, ".o"[onl], 1263 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 1264 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 1265 return 1; /* Break things loose after complaining. */ 1266 } 1267 1268 /* 1269 * A CPU running for an extended time within the kernel can 1270 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 1271 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 1272 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 1273 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 1274 * variable are safe because the assignments are repeated if this 1275 * CPU failed to pass through a quiescent state. This code 1276 * also checks .jiffies_resched in case jiffies_to_sched_qs 1277 * is set way high. 1278 */ 1279 jtsq = READ_ONCE(jiffies_to_sched_qs); 1280 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); 1281 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); 1282 if (!READ_ONCE(*rnhqp) && 1283 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 1284 time_after(jiffies, rcu_state.jiffies_resched) || 1285 rcu_state.cbovld)) { 1286 WRITE_ONCE(*rnhqp, true); 1287 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1288 smp_store_release(ruqp, true); 1289 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 1290 WRITE_ONCE(*ruqp, true); 1291 } 1292 1293 /* 1294 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 1295 * The above code handles this, but only for straight cond_resched(). 1296 * And some in-kernel loops check need_resched() before calling 1297 * cond_resched(), which defeats the above code for CPUs that are 1298 * running in-kernel with scheduling-clock interrupts disabled. 1299 * So hit them over the head with the resched_cpu() hammer! 1300 */ 1301 if (tick_nohz_full_cpu(rdp->cpu) && 1302 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 1303 rcu_state.cbovld)) { 1304 WRITE_ONCE(*ruqp, true); 1305 resched_cpu(rdp->cpu); 1306 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1307 } 1308 1309 /* 1310 * If more than halfway to RCU CPU stall-warning time, invoke 1311 * resched_cpu() more frequently to try to loosen things up a bit. 1312 * Also check to see if the CPU is getting hammered with interrupts, 1313 * but only once per grace period, just to keep the IPIs down to 1314 * a dull roar. 1315 */ 1316 if (time_after(jiffies, rcu_state.jiffies_resched)) { 1317 if (time_after(jiffies, 1318 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 1319 resched_cpu(rdp->cpu); 1320 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1321 } 1322 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1323 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 1324 (rnp->ffmask & rdp->grpmask)) { 1325 rdp->rcu_iw_pending = true; 1326 rdp->rcu_iw_gp_seq = rnp->gp_seq; 1327 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1328 } 1329 } 1330 1331 return 0; 1332 } 1333 1334 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1335 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1336 unsigned long gp_seq_req, const char *s) 1337 { 1338 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 1339 gp_seq_req, rnp->level, 1340 rnp->grplo, rnp->grphi, s); 1341 } 1342 1343 /* 1344 * rcu_start_this_gp - Request the start of a particular grace period 1345 * @rnp_start: The leaf node of the CPU from which to start. 1346 * @rdp: The rcu_data corresponding to the CPU from which to start. 1347 * @gp_seq_req: The gp_seq of the grace period to start. 1348 * 1349 * Start the specified grace period, as needed to handle newly arrived 1350 * callbacks. The required future grace periods are recorded in each 1351 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1352 * is reason to awaken the grace-period kthread. 1353 * 1354 * The caller must hold the specified rcu_node structure's ->lock, which 1355 * is why the caller is responsible for waking the grace-period kthread. 1356 * 1357 * Returns true if the GP thread needs to be awakened else false. 1358 */ 1359 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1360 unsigned long gp_seq_req) 1361 { 1362 bool ret = false; 1363 struct rcu_node *rnp; 1364 1365 /* 1366 * Use funnel locking to either acquire the root rcu_node 1367 * structure's lock or bail out if the need for this grace period 1368 * has already been recorded -- or if that grace period has in 1369 * fact already started. If there is already a grace period in 1370 * progress in a non-leaf node, no recording is needed because the 1371 * end of the grace period will scan the leaf rcu_node structures. 1372 * Note that rnp_start->lock must not be released. 1373 */ 1374 raw_lockdep_assert_held_rcu_node(rnp_start); 1375 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1376 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1377 if (rnp != rnp_start) 1378 raw_spin_lock_rcu_node(rnp); 1379 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1380 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1381 (rnp != rnp_start && 1382 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1383 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1384 TPS("Prestarted")); 1385 goto unlock_out; 1386 } 1387 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1388 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1389 /* 1390 * We just marked the leaf or internal node, and a 1391 * grace period is in progress, which means that 1392 * rcu_gp_cleanup() will see the marking. Bail to 1393 * reduce contention. 1394 */ 1395 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1396 TPS("Startedleaf")); 1397 goto unlock_out; 1398 } 1399 if (rnp != rnp_start && rnp->parent != NULL) 1400 raw_spin_unlock_rcu_node(rnp); 1401 if (!rnp->parent) 1402 break; /* At root, and perhaps also leaf. */ 1403 } 1404 1405 /* If GP already in progress, just leave, otherwise start one. */ 1406 if (rcu_gp_in_progress()) { 1407 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1408 goto unlock_out; 1409 } 1410 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1411 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1412 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1413 if (!READ_ONCE(rcu_state.gp_kthread)) { 1414 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1415 goto unlock_out; 1416 } 1417 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1418 ret = true; /* Caller must wake GP kthread. */ 1419 unlock_out: 1420 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1421 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1422 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1423 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1424 } 1425 if (rnp != rnp_start) 1426 raw_spin_unlock_rcu_node(rnp); 1427 return ret; 1428 } 1429 1430 /* 1431 * Clean up any old requests for the just-ended grace period. Also return 1432 * whether any additional grace periods have been requested. 1433 */ 1434 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1435 { 1436 bool needmore; 1437 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1438 1439 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1440 if (!needmore) 1441 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1442 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1443 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1444 return needmore; 1445 } 1446 1447 /* 1448 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1449 * interrupt or softirq handler, in which case we just might immediately 1450 * sleep upon return, resulting in a grace-period hang), and don't bother 1451 * awakening when there is nothing for the grace-period kthread to do 1452 * (as in several CPUs raced to awaken, we lost), and finally don't try 1453 * to awaken a kthread that has not yet been created. If all those checks 1454 * are passed, track some debug information and awaken. 1455 * 1456 * So why do the self-wakeup when in an interrupt or softirq handler 1457 * in the grace-period kthread's context? Because the kthread might have 1458 * been interrupted just as it was going to sleep, and just after the final 1459 * pre-sleep check of the awaken condition. In this case, a wakeup really 1460 * is required, and is therefore supplied. 1461 */ 1462 static void rcu_gp_kthread_wake(void) 1463 { 1464 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1465 1466 if ((current == t && !in_irq() && !in_serving_softirq()) || 1467 !READ_ONCE(rcu_state.gp_flags) || !t) 1468 return; 1469 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1470 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1471 swake_up_one(&rcu_state.gp_wq); 1472 } 1473 1474 /* 1475 * If there is room, assign a ->gp_seq number to any callbacks on this 1476 * CPU that have not already been assigned. Also accelerate any callbacks 1477 * that were previously assigned a ->gp_seq number that has since proven 1478 * to be too conservative, which can happen if callbacks get assigned a 1479 * ->gp_seq number while RCU is idle, but with reference to a non-root 1480 * rcu_node structure. This function is idempotent, so it does not hurt 1481 * to call it repeatedly. Returns an flag saying that we should awaken 1482 * the RCU grace-period kthread. 1483 * 1484 * The caller must hold rnp->lock with interrupts disabled. 1485 */ 1486 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1487 { 1488 unsigned long gp_seq_req; 1489 bool ret = false; 1490 1491 rcu_lockdep_assert_cblist_protected(rdp); 1492 raw_lockdep_assert_held_rcu_node(rnp); 1493 1494 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1495 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1496 return false; 1497 1498 /* 1499 * Callbacks are often registered with incomplete grace-period 1500 * information. Something about the fact that getting exact 1501 * information requires acquiring a global lock... RCU therefore 1502 * makes a conservative estimate of the grace period number at which 1503 * a given callback will become ready to invoke. The following 1504 * code checks this estimate and improves it when possible, thus 1505 * accelerating callback invocation to an earlier grace-period 1506 * number. 1507 */ 1508 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1509 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1510 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1511 1512 /* Trace depending on how much we were able to accelerate. */ 1513 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1514 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); 1515 else 1516 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); 1517 1518 return ret; 1519 } 1520 1521 /* 1522 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1523 * rcu_node structure's ->lock be held. It consults the cached value 1524 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1525 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1526 * while holding the leaf rcu_node structure's ->lock. 1527 */ 1528 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1529 struct rcu_data *rdp) 1530 { 1531 unsigned long c; 1532 bool needwake; 1533 1534 rcu_lockdep_assert_cblist_protected(rdp); 1535 c = rcu_seq_snap(&rcu_state.gp_seq); 1536 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1537 /* Old request still live, so mark recent callbacks. */ 1538 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1539 return; 1540 } 1541 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1542 needwake = rcu_accelerate_cbs(rnp, rdp); 1543 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1544 if (needwake) 1545 rcu_gp_kthread_wake(); 1546 } 1547 1548 /* 1549 * Move any callbacks whose grace period has completed to the 1550 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1551 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1552 * sublist. This function is idempotent, so it does not hurt to 1553 * invoke it repeatedly. As long as it is not invoked -too- often... 1554 * Returns true if the RCU grace-period kthread needs to be awakened. 1555 * 1556 * The caller must hold rnp->lock with interrupts disabled. 1557 */ 1558 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1559 { 1560 rcu_lockdep_assert_cblist_protected(rdp); 1561 raw_lockdep_assert_held_rcu_node(rnp); 1562 1563 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1564 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1565 return false; 1566 1567 /* 1568 * Find all callbacks whose ->gp_seq numbers indicate that they 1569 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1570 */ 1571 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1572 1573 /* Classify any remaining callbacks. */ 1574 return rcu_accelerate_cbs(rnp, rdp); 1575 } 1576 1577 /* 1578 * Move and classify callbacks, but only if doing so won't require 1579 * that the RCU grace-period kthread be awakened. 1580 */ 1581 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1582 struct rcu_data *rdp) 1583 { 1584 rcu_lockdep_assert_cblist_protected(rdp); 1585 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || 1586 !raw_spin_trylock_rcu_node(rnp)) 1587 return; 1588 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1589 raw_spin_unlock_rcu_node(rnp); 1590 } 1591 1592 /* 1593 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a 1594 * quiescent state. This is intended to be invoked when the CPU notices 1595 * a new grace period. 1596 */ 1597 static void rcu_strict_gp_check_qs(void) 1598 { 1599 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 1600 rcu_read_lock(); 1601 rcu_read_unlock(); 1602 } 1603 } 1604 1605 /* 1606 * Update CPU-local rcu_data state to record the beginnings and ends of 1607 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1608 * structure corresponding to the current CPU, and must have irqs disabled. 1609 * Returns true if the grace-period kthread needs to be awakened. 1610 */ 1611 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1612 { 1613 bool ret = false; 1614 bool need_qs; 1615 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); 1616 1617 raw_lockdep_assert_held_rcu_node(rnp); 1618 1619 if (rdp->gp_seq == rnp->gp_seq) 1620 return false; /* Nothing to do. */ 1621 1622 /* Handle the ends of any preceding grace periods first. */ 1623 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1624 unlikely(READ_ONCE(rdp->gpwrap))) { 1625 if (!offloaded) 1626 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1627 rdp->core_needs_qs = false; 1628 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1629 } else { 1630 if (!offloaded) 1631 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1632 if (rdp->core_needs_qs) 1633 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1634 } 1635 1636 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1637 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1638 unlikely(READ_ONCE(rdp->gpwrap))) { 1639 /* 1640 * If the current grace period is waiting for this CPU, 1641 * set up to detect a quiescent state, otherwise don't 1642 * go looking for one. 1643 */ 1644 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1645 need_qs = !!(rnp->qsmask & rdp->grpmask); 1646 rdp->cpu_no_qs.b.norm = need_qs; 1647 rdp->core_needs_qs = need_qs; 1648 zero_cpu_stall_ticks(rdp); 1649 } 1650 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1651 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1652 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1653 WRITE_ONCE(rdp->gpwrap, false); 1654 rcu_gpnum_ovf(rnp, rdp); 1655 return ret; 1656 } 1657 1658 static void note_gp_changes(struct rcu_data *rdp) 1659 { 1660 unsigned long flags; 1661 bool needwake; 1662 struct rcu_node *rnp; 1663 1664 local_irq_save(flags); 1665 rnp = rdp->mynode; 1666 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1667 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1668 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1669 local_irq_restore(flags); 1670 return; 1671 } 1672 needwake = __note_gp_changes(rnp, rdp); 1673 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1674 rcu_strict_gp_check_qs(); 1675 if (needwake) 1676 rcu_gp_kthread_wake(); 1677 } 1678 1679 static void rcu_gp_slow(int delay) 1680 { 1681 if (delay > 0 && 1682 !(rcu_seq_ctr(rcu_state.gp_seq) % 1683 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1684 schedule_timeout_idle(delay); 1685 } 1686 1687 static unsigned long sleep_duration; 1688 1689 /* Allow rcutorture to stall the grace-period kthread. */ 1690 void rcu_gp_set_torture_wait(int duration) 1691 { 1692 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1693 WRITE_ONCE(sleep_duration, duration); 1694 } 1695 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1696 1697 /* Actually implement the aforementioned wait. */ 1698 static void rcu_gp_torture_wait(void) 1699 { 1700 unsigned long duration; 1701 1702 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1703 return; 1704 duration = xchg(&sleep_duration, 0UL); 1705 if (duration > 0) { 1706 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1707 schedule_timeout_idle(duration); 1708 pr_alert("%s: Wait complete\n", __func__); 1709 } 1710 } 1711 1712 /* 1713 * Handler for on_each_cpu() to invoke the target CPU's RCU core 1714 * processing. 1715 */ 1716 static void rcu_strict_gp_boundary(void *unused) 1717 { 1718 invoke_rcu_core(); 1719 } 1720 1721 /* 1722 * Initialize a new grace period. Return false if no grace period required. 1723 */ 1724 static bool rcu_gp_init(void) 1725 { 1726 unsigned long firstseq; 1727 unsigned long flags; 1728 unsigned long oldmask; 1729 unsigned long mask; 1730 struct rcu_data *rdp; 1731 struct rcu_node *rnp = rcu_get_root(); 1732 1733 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1734 raw_spin_lock_irq_rcu_node(rnp); 1735 if (!READ_ONCE(rcu_state.gp_flags)) { 1736 /* Spurious wakeup, tell caller to go back to sleep. */ 1737 raw_spin_unlock_irq_rcu_node(rnp); 1738 return false; 1739 } 1740 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1741 1742 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1743 /* 1744 * Grace period already in progress, don't start another. 1745 * Not supposed to be able to happen. 1746 */ 1747 raw_spin_unlock_irq_rcu_node(rnp); 1748 return false; 1749 } 1750 1751 /* Advance to a new grace period and initialize state. */ 1752 record_gp_stall_check_time(); 1753 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1754 rcu_seq_start(&rcu_state.gp_seq); 1755 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1756 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1757 raw_spin_unlock_irq_rcu_node(rnp); 1758 1759 /* 1760 * Apply per-leaf buffered online and offline operations to 1761 * the rcu_node tree. Note that this new grace period need not 1762 * wait for subsequent online CPUs, and that RCU hooks in the CPU 1763 * offlining path, when combined with checks in this function, 1764 * will handle CPUs that are currently going offline or that will 1765 * go offline later. Please also refer to "Hotplug CPU" section 1766 * of RCU's Requirements documentation. 1767 */ 1768 rcu_state.gp_state = RCU_GP_ONOFF; 1769 rcu_for_each_leaf_node(rnp) { 1770 smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values. 1771 firstseq = READ_ONCE(rnp->ofl_seq); 1772 if (firstseq & 0x1) 1773 while (firstseq == READ_ONCE(rnp->ofl_seq)) 1774 schedule_timeout_idle(1); // Can't wake unless RCU is watching. 1775 smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values. 1776 raw_spin_lock(&rcu_state.ofl_lock); 1777 raw_spin_lock_irq_rcu_node(rnp); 1778 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1779 !rnp->wait_blkd_tasks) { 1780 /* Nothing to do on this leaf rcu_node structure. */ 1781 raw_spin_unlock_irq_rcu_node(rnp); 1782 raw_spin_unlock(&rcu_state.ofl_lock); 1783 continue; 1784 } 1785 1786 /* Record old state, apply changes to ->qsmaskinit field. */ 1787 oldmask = rnp->qsmaskinit; 1788 rnp->qsmaskinit = rnp->qsmaskinitnext; 1789 1790 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1791 if (!oldmask != !rnp->qsmaskinit) { 1792 if (!oldmask) { /* First online CPU for rcu_node. */ 1793 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1794 rcu_init_new_rnp(rnp); 1795 } else if (rcu_preempt_has_tasks(rnp)) { 1796 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1797 } else { /* Last offline CPU and can propagate. */ 1798 rcu_cleanup_dead_rnp(rnp); 1799 } 1800 } 1801 1802 /* 1803 * If all waited-on tasks from prior grace period are 1804 * done, and if all this rcu_node structure's CPUs are 1805 * still offline, propagate up the rcu_node tree and 1806 * clear ->wait_blkd_tasks. Otherwise, if one of this 1807 * rcu_node structure's CPUs has since come back online, 1808 * simply clear ->wait_blkd_tasks. 1809 */ 1810 if (rnp->wait_blkd_tasks && 1811 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1812 rnp->wait_blkd_tasks = false; 1813 if (!rnp->qsmaskinit) 1814 rcu_cleanup_dead_rnp(rnp); 1815 } 1816 1817 raw_spin_unlock_irq_rcu_node(rnp); 1818 raw_spin_unlock(&rcu_state.ofl_lock); 1819 } 1820 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1821 1822 /* 1823 * Set the quiescent-state-needed bits in all the rcu_node 1824 * structures for all currently online CPUs in breadth-first 1825 * order, starting from the root rcu_node structure, relying on the 1826 * layout of the tree within the rcu_state.node[] array. Note that 1827 * other CPUs will access only the leaves of the hierarchy, thus 1828 * seeing that no grace period is in progress, at least until the 1829 * corresponding leaf node has been initialized. 1830 * 1831 * The grace period cannot complete until the initialization 1832 * process finishes, because this kthread handles both. 1833 */ 1834 rcu_state.gp_state = RCU_GP_INIT; 1835 rcu_for_each_node_breadth_first(rnp) { 1836 rcu_gp_slow(gp_init_delay); 1837 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1838 rdp = this_cpu_ptr(&rcu_data); 1839 rcu_preempt_check_blocked_tasks(rnp); 1840 rnp->qsmask = rnp->qsmaskinit; 1841 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1842 if (rnp == rdp->mynode) 1843 (void)__note_gp_changes(rnp, rdp); 1844 rcu_preempt_boost_start_gp(rnp); 1845 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1846 rnp->level, rnp->grplo, 1847 rnp->grphi, rnp->qsmask); 1848 /* Quiescent states for tasks on any now-offline CPUs. */ 1849 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1850 rnp->rcu_gp_init_mask = mask; 1851 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1852 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1853 else 1854 raw_spin_unlock_irq_rcu_node(rnp); 1855 cond_resched_tasks_rcu_qs(); 1856 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1857 } 1858 1859 // If strict, make all CPUs aware of new grace period. 1860 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 1861 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 1862 1863 return true; 1864 } 1865 1866 /* 1867 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1868 * time. 1869 */ 1870 static bool rcu_gp_fqs_check_wake(int *gfp) 1871 { 1872 struct rcu_node *rnp = rcu_get_root(); 1873 1874 // If under overload conditions, force an immediate FQS scan. 1875 if (*gfp & RCU_GP_FLAG_OVLD) 1876 return true; 1877 1878 // Someone like call_rcu() requested a force-quiescent-state scan. 1879 *gfp = READ_ONCE(rcu_state.gp_flags); 1880 if (*gfp & RCU_GP_FLAG_FQS) 1881 return true; 1882 1883 // The current grace period has completed. 1884 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1885 return true; 1886 1887 return false; 1888 } 1889 1890 /* 1891 * Do one round of quiescent-state forcing. 1892 */ 1893 static void rcu_gp_fqs(bool first_time) 1894 { 1895 struct rcu_node *rnp = rcu_get_root(); 1896 1897 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1898 rcu_state.n_force_qs++; 1899 if (first_time) { 1900 /* Collect dyntick-idle snapshots. */ 1901 force_qs_rnp(dyntick_save_progress_counter); 1902 } else { 1903 /* Handle dyntick-idle and offline CPUs. */ 1904 force_qs_rnp(rcu_implicit_dynticks_qs); 1905 } 1906 /* Clear flag to prevent immediate re-entry. */ 1907 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1908 raw_spin_lock_irq_rcu_node(rnp); 1909 WRITE_ONCE(rcu_state.gp_flags, 1910 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1911 raw_spin_unlock_irq_rcu_node(rnp); 1912 } 1913 } 1914 1915 /* 1916 * Loop doing repeated quiescent-state forcing until the grace period ends. 1917 */ 1918 static void rcu_gp_fqs_loop(void) 1919 { 1920 bool first_gp_fqs; 1921 int gf = 0; 1922 unsigned long j; 1923 int ret; 1924 struct rcu_node *rnp = rcu_get_root(); 1925 1926 first_gp_fqs = true; 1927 j = READ_ONCE(jiffies_till_first_fqs); 1928 if (rcu_state.cbovld) 1929 gf = RCU_GP_FLAG_OVLD; 1930 ret = 0; 1931 for (;;) { 1932 if (!ret) { 1933 rcu_state.jiffies_force_qs = jiffies + j; 1934 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1935 jiffies + (j ? 3 * j : 2)); 1936 } 1937 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1938 TPS("fqswait")); 1939 rcu_state.gp_state = RCU_GP_WAIT_FQS; 1940 ret = swait_event_idle_timeout_exclusive( 1941 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); 1942 rcu_gp_torture_wait(); 1943 rcu_state.gp_state = RCU_GP_DOING_FQS; 1944 /* Locking provides needed memory barriers. */ 1945 /* If grace period done, leave loop. */ 1946 if (!READ_ONCE(rnp->qsmask) && 1947 !rcu_preempt_blocked_readers_cgp(rnp)) 1948 break; 1949 /* If time for quiescent-state forcing, do it. */ 1950 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 1951 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) { 1952 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1953 TPS("fqsstart")); 1954 rcu_gp_fqs(first_gp_fqs); 1955 gf = 0; 1956 if (first_gp_fqs) { 1957 first_gp_fqs = false; 1958 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 1959 } 1960 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1961 TPS("fqsend")); 1962 cond_resched_tasks_rcu_qs(); 1963 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1964 ret = 0; /* Force full wait till next FQS. */ 1965 j = READ_ONCE(jiffies_till_next_fqs); 1966 } else { 1967 /* Deal with stray signal. */ 1968 cond_resched_tasks_rcu_qs(); 1969 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1970 WARN_ON(signal_pending(current)); 1971 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1972 TPS("fqswaitsig")); 1973 ret = 1; /* Keep old FQS timing. */ 1974 j = jiffies; 1975 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1976 j = 1; 1977 else 1978 j = rcu_state.jiffies_force_qs - j; 1979 gf = 0; 1980 } 1981 } 1982 } 1983 1984 /* 1985 * Clean up after the old grace period. 1986 */ 1987 static void rcu_gp_cleanup(void) 1988 { 1989 int cpu; 1990 bool needgp = false; 1991 unsigned long gp_duration; 1992 unsigned long new_gp_seq; 1993 bool offloaded; 1994 struct rcu_data *rdp; 1995 struct rcu_node *rnp = rcu_get_root(); 1996 struct swait_queue_head *sq; 1997 1998 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1999 raw_spin_lock_irq_rcu_node(rnp); 2000 rcu_state.gp_end = jiffies; 2001 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 2002 if (gp_duration > rcu_state.gp_max) 2003 rcu_state.gp_max = gp_duration; 2004 2005 /* 2006 * We know the grace period is complete, but to everyone else 2007 * it appears to still be ongoing. But it is also the case 2008 * that to everyone else it looks like there is nothing that 2009 * they can do to advance the grace period. It is therefore 2010 * safe for us to drop the lock in order to mark the grace 2011 * period as completed in all of the rcu_node structures. 2012 */ 2013 raw_spin_unlock_irq_rcu_node(rnp); 2014 2015 /* 2016 * Propagate new ->gp_seq value to rcu_node structures so that 2017 * other CPUs don't have to wait until the start of the next grace 2018 * period to process their callbacks. This also avoids some nasty 2019 * RCU grace-period initialization races by forcing the end of 2020 * the current grace period to be completely recorded in all of 2021 * the rcu_node structures before the beginning of the next grace 2022 * period is recorded in any of the rcu_node structures. 2023 */ 2024 new_gp_seq = rcu_state.gp_seq; 2025 rcu_seq_end(&new_gp_seq); 2026 rcu_for_each_node_breadth_first(rnp) { 2027 raw_spin_lock_irq_rcu_node(rnp); 2028 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 2029 dump_blkd_tasks(rnp, 10); 2030 WARN_ON_ONCE(rnp->qsmask); 2031 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 2032 rdp = this_cpu_ptr(&rcu_data); 2033 if (rnp == rdp->mynode) 2034 needgp = __note_gp_changes(rnp, rdp) || needgp; 2035 /* smp_mb() provided by prior unlock-lock pair. */ 2036 needgp = rcu_future_gp_cleanup(rnp) || needgp; 2037 // Reset overload indication for CPUs no longer overloaded 2038 if (rcu_is_leaf_node(rnp)) 2039 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 2040 rdp = per_cpu_ptr(&rcu_data, cpu); 2041 check_cb_ovld_locked(rdp, rnp); 2042 } 2043 sq = rcu_nocb_gp_get(rnp); 2044 raw_spin_unlock_irq_rcu_node(rnp); 2045 rcu_nocb_gp_cleanup(sq); 2046 cond_resched_tasks_rcu_qs(); 2047 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2048 rcu_gp_slow(gp_cleanup_delay); 2049 } 2050 rnp = rcu_get_root(); 2051 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 2052 2053 /* Declare grace period done, trace first to use old GP number. */ 2054 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 2055 rcu_seq_end(&rcu_state.gp_seq); 2056 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 2057 rcu_state.gp_state = RCU_GP_IDLE; 2058 /* Check for GP requests since above loop. */ 2059 rdp = this_cpu_ptr(&rcu_data); 2060 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 2061 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 2062 TPS("CleanupMore")); 2063 needgp = true; 2064 } 2065 /* Advance CBs to reduce false positives below. */ 2066 offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); 2067 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2068 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2069 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2070 trace_rcu_grace_period(rcu_state.name, 2071 rcu_state.gp_seq, 2072 TPS("newreq")); 2073 } else { 2074 WRITE_ONCE(rcu_state.gp_flags, 2075 rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2076 } 2077 raw_spin_unlock_irq_rcu_node(rnp); 2078 2079 // If strict, make all CPUs aware of the end of the old grace period. 2080 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2081 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 2082 } 2083 2084 /* 2085 * Body of kthread that handles grace periods. 2086 */ 2087 static int __noreturn rcu_gp_kthread(void *unused) 2088 { 2089 rcu_bind_gp_kthread(); 2090 for (;;) { 2091 2092 /* Handle grace-period start. */ 2093 for (;;) { 2094 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2095 TPS("reqwait")); 2096 rcu_state.gp_state = RCU_GP_WAIT_GPS; 2097 swait_event_idle_exclusive(rcu_state.gp_wq, 2098 READ_ONCE(rcu_state.gp_flags) & 2099 RCU_GP_FLAG_INIT); 2100 rcu_gp_torture_wait(); 2101 rcu_state.gp_state = RCU_GP_DONE_GPS; 2102 /* Locking provides needed memory barrier. */ 2103 if (rcu_gp_init()) 2104 break; 2105 cond_resched_tasks_rcu_qs(); 2106 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2107 WARN_ON(signal_pending(current)); 2108 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2109 TPS("reqwaitsig")); 2110 } 2111 2112 /* Handle quiescent-state forcing. */ 2113 rcu_gp_fqs_loop(); 2114 2115 /* Handle grace-period end. */ 2116 rcu_state.gp_state = RCU_GP_CLEANUP; 2117 rcu_gp_cleanup(); 2118 rcu_state.gp_state = RCU_GP_CLEANED; 2119 } 2120 } 2121 2122 /* 2123 * Report a full set of quiescent states to the rcu_state data structure. 2124 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2125 * another grace period is required. Whether we wake the grace-period 2126 * kthread or it awakens itself for the next round of quiescent-state 2127 * forcing, that kthread will clean up after the just-completed grace 2128 * period. Note that the caller must hold rnp->lock, which is released 2129 * before return. 2130 */ 2131 static void rcu_report_qs_rsp(unsigned long flags) 2132 __releases(rcu_get_root()->lock) 2133 { 2134 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2135 WARN_ON_ONCE(!rcu_gp_in_progress()); 2136 WRITE_ONCE(rcu_state.gp_flags, 2137 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2138 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2139 rcu_gp_kthread_wake(); 2140 } 2141 2142 /* 2143 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2144 * Allows quiescent states for a group of CPUs to be reported at one go 2145 * to the specified rcu_node structure, though all the CPUs in the group 2146 * must be represented by the same rcu_node structure (which need not be a 2147 * leaf rcu_node structure, though it often will be). The gps parameter 2148 * is the grace-period snapshot, which means that the quiescent states 2149 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2150 * must be held upon entry, and it is released before return. 2151 * 2152 * As a special case, if mask is zero, the bit-already-cleared check is 2153 * disabled. This allows propagating quiescent state due to resumed tasks 2154 * during grace-period initialization. 2155 */ 2156 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2157 unsigned long gps, unsigned long flags) 2158 __releases(rnp->lock) 2159 { 2160 unsigned long oldmask = 0; 2161 struct rcu_node *rnp_c; 2162 2163 raw_lockdep_assert_held_rcu_node(rnp); 2164 2165 /* Walk up the rcu_node hierarchy. */ 2166 for (;;) { 2167 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2168 2169 /* 2170 * Our bit has already been cleared, or the 2171 * relevant grace period is already over, so done. 2172 */ 2173 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2174 return; 2175 } 2176 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2177 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2178 rcu_preempt_blocked_readers_cgp(rnp)); 2179 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2180 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2181 mask, rnp->qsmask, rnp->level, 2182 rnp->grplo, rnp->grphi, 2183 !!rnp->gp_tasks); 2184 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2185 2186 /* Other bits still set at this level, so done. */ 2187 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2188 return; 2189 } 2190 rnp->completedqs = rnp->gp_seq; 2191 mask = rnp->grpmask; 2192 if (rnp->parent == NULL) { 2193 2194 /* No more levels. Exit loop holding root lock. */ 2195 2196 break; 2197 } 2198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2199 rnp_c = rnp; 2200 rnp = rnp->parent; 2201 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2202 oldmask = READ_ONCE(rnp_c->qsmask); 2203 } 2204 2205 /* 2206 * Get here if we are the last CPU to pass through a quiescent 2207 * state for this grace period. Invoke rcu_report_qs_rsp() 2208 * to clean up and start the next grace period if one is needed. 2209 */ 2210 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2211 } 2212 2213 /* 2214 * Record a quiescent state for all tasks that were previously queued 2215 * on the specified rcu_node structure and that were blocking the current 2216 * RCU grace period. The caller must hold the corresponding rnp->lock with 2217 * irqs disabled, and this lock is released upon return, but irqs remain 2218 * disabled. 2219 */ 2220 static void __maybe_unused 2221 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2222 __releases(rnp->lock) 2223 { 2224 unsigned long gps; 2225 unsigned long mask; 2226 struct rcu_node *rnp_p; 2227 2228 raw_lockdep_assert_held_rcu_node(rnp); 2229 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2230 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2231 rnp->qsmask != 0) { 2232 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2233 return; /* Still need more quiescent states! */ 2234 } 2235 2236 rnp->completedqs = rnp->gp_seq; 2237 rnp_p = rnp->parent; 2238 if (rnp_p == NULL) { 2239 /* 2240 * Only one rcu_node structure in the tree, so don't 2241 * try to report up to its nonexistent parent! 2242 */ 2243 rcu_report_qs_rsp(flags); 2244 return; 2245 } 2246 2247 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2248 gps = rnp->gp_seq; 2249 mask = rnp->grpmask; 2250 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2251 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2252 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2253 } 2254 2255 /* 2256 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2257 * structure. This must be called from the specified CPU. 2258 */ 2259 static void 2260 rcu_report_qs_rdp(struct rcu_data *rdp) 2261 { 2262 unsigned long flags; 2263 unsigned long mask; 2264 bool needwake = false; 2265 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); 2266 struct rcu_node *rnp; 2267 2268 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); 2269 rnp = rdp->mynode; 2270 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2271 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2272 rdp->gpwrap) { 2273 2274 /* 2275 * The grace period in which this quiescent state was 2276 * recorded has ended, so don't report it upwards. 2277 * We will instead need a new quiescent state that lies 2278 * within the current grace period. 2279 */ 2280 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2281 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2282 return; 2283 } 2284 mask = rdp->grpmask; 2285 rdp->core_needs_qs = false; 2286 if ((rnp->qsmask & mask) == 0) { 2287 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2288 } else { 2289 /* 2290 * This GP can't end until cpu checks in, so all of our 2291 * callbacks can be processed during the next GP. 2292 */ 2293 if (!offloaded) 2294 needwake = rcu_accelerate_cbs(rnp, rdp); 2295 2296 rcu_disable_urgency_upon_qs(rdp); 2297 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2298 /* ^^^ Released rnp->lock */ 2299 if (needwake) 2300 rcu_gp_kthread_wake(); 2301 } 2302 } 2303 2304 /* 2305 * Check to see if there is a new grace period of which this CPU 2306 * is not yet aware, and if so, set up local rcu_data state for it. 2307 * Otherwise, see if this CPU has just passed through its first 2308 * quiescent state for this grace period, and record that fact if so. 2309 */ 2310 static void 2311 rcu_check_quiescent_state(struct rcu_data *rdp) 2312 { 2313 /* Check for grace-period ends and beginnings. */ 2314 note_gp_changes(rdp); 2315 2316 /* 2317 * Does this CPU still need to do its part for current grace period? 2318 * If no, return and let the other CPUs do their part as well. 2319 */ 2320 if (!rdp->core_needs_qs) 2321 return; 2322 2323 /* 2324 * Was there a quiescent state since the beginning of the grace 2325 * period? If no, then exit and wait for the next call. 2326 */ 2327 if (rdp->cpu_no_qs.b.norm) 2328 return; 2329 2330 /* 2331 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2332 * judge of that). 2333 */ 2334 rcu_report_qs_rdp(rdp); 2335 } 2336 2337 /* 2338 * Near the end of the offline process. Trace the fact that this CPU 2339 * is going offline. 2340 */ 2341 int rcutree_dying_cpu(unsigned int cpu) 2342 { 2343 bool blkd; 2344 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2345 struct rcu_node *rnp = rdp->mynode; 2346 2347 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2348 return 0; 2349 2350 blkd = !!(rnp->qsmask & rdp->grpmask); 2351 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 2352 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 2353 return 0; 2354 } 2355 2356 /* 2357 * All CPUs for the specified rcu_node structure have gone offline, 2358 * and all tasks that were preempted within an RCU read-side critical 2359 * section while running on one of those CPUs have since exited their RCU 2360 * read-side critical section. Some other CPU is reporting this fact with 2361 * the specified rcu_node structure's ->lock held and interrupts disabled. 2362 * This function therefore goes up the tree of rcu_node structures, 2363 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2364 * the leaf rcu_node structure's ->qsmaskinit field has already been 2365 * updated. 2366 * 2367 * This function does check that the specified rcu_node structure has 2368 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2369 * prematurely. That said, invoking it after the fact will cost you 2370 * a needless lock acquisition. So once it has done its work, don't 2371 * invoke it again. 2372 */ 2373 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2374 { 2375 long mask; 2376 struct rcu_node *rnp = rnp_leaf; 2377 2378 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2379 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2380 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2381 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2382 return; 2383 for (;;) { 2384 mask = rnp->grpmask; 2385 rnp = rnp->parent; 2386 if (!rnp) 2387 break; 2388 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2389 rnp->qsmaskinit &= ~mask; 2390 /* Between grace periods, so better already be zero! */ 2391 WARN_ON_ONCE(rnp->qsmask); 2392 if (rnp->qsmaskinit) { 2393 raw_spin_unlock_rcu_node(rnp); 2394 /* irqs remain disabled. */ 2395 return; 2396 } 2397 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2398 } 2399 } 2400 2401 /* 2402 * The CPU has been completely removed, and some other CPU is reporting 2403 * this fact from process context. Do the remainder of the cleanup. 2404 * There can only be one CPU hotplug operation at a time, so no need for 2405 * explicit locking. 2406 */ 2407 int rcutree_dead_cpu(unsigned int cpu) 2408 { 2409 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2410 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2411 2412 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2413 return 0; 2414 2415 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); 2416 /* Adjust any no-longer-needed kthreads. */ 2417 rcu_boost_kthread_setaffinity(rnp, -1); 2418 /* Do any needed no-CB deferred wakeups from this CPU. */ 2419 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); 2420 2421 // Stop-machine done, so allow nohz_full to disable tick. 2422 tick_dep_clear(TICK_DEP_BIT_RCU); 2423 return 0; 2424 } 2425 2426 /* 2427 * Invoke any RCU callbacks that have made it to the end of their grace 2428 * period. Thottle as specified by rdp->blimit. 2429 */ 2430 static void rcu_do_batch(struct rcu_data *rdp) 2431 { 2432 int div; 2433 unsigned long flags; 2434 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); 2435 struct rcu_head *rhp; 2436 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2437 long bl, count; 2438 long pending, tlimit = 0; 2439 2440 /* If no callbacks are ready, just return. */ 2441 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2442 trace_rcu_batch_start(rcu_state.name, 2443 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2444 trace_rcu_batch_end(rcu_state.name, 0, 2445 !rcu_segcblist_empty(&rdp->cblist), 2446 need_resched(), is_idle_task(current), 2447 rcu_is_callbacks_kthread()); 2448 return; 2449 } 2450 2451 /* 2452 * Extract the list of ready callbacks, disabling to prevent 2453 * races with call_rcu() from interrupt handlers. Leave the 2454 * callback counts, as rcu_barrier() needs to be conservative. 2455 */ 2456 local_irq_save(flags); 2457 rcu_nocb_lock(rdp); 2458 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2459 pending = rcu_segcblist_n_cbs(&rdp->cblist); 2460 div = READ_ONCE(rcu_divisor); 2461 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; 2462 bl = max(rdp->blimit, pending >> div); 2463 if (unlikely(bl > 100)) { 2464 long rrn = READ_ONCE(rcu_resched_ns); 2465 2466 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; 2467 tlimit = local_clock() + rrn; 2468 } 2469 trace_rcu_batch_start(rcu_state.name, 2470 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2471 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2472 if (offloaded) 2473 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2474 rcu_nocb_unlock_irqrestore(rdp, flags); 2475 2476 /* Invoke callbacks. */ 2477 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2478 rhp = rcu_cblist_dequeue(&rcl); 2479 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2480 rcu_callback_t f; 2481 2482 debug_rcu_head_unqueue(rhp); 2483 2484 rcu_lock_acquire(&rcu_callback_map); 2485 trace_rcu_invoke_callback(rcu_state.name, rhp); 2486 2487 f = rhp->func; 2488 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2489 f(rhp); 2490 2491 rcu_lock_release(&rcu_callback_map); 2492 2493 /* 2494 * Stop only if limit reached and CPU has something to do. 2495 * Note: The rcl structure counts down from zero. 2496 */ 2497 if (-rcl.len >= bl && !offloaded && 2498 (need_resched() || 2499 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2500 break; 2501 if (unlikely(tlimit)) { 2502 /* only call local_clock() every 32 callbacks */ 2503 if (likely((-rcl.len & 31) || local_clock() < tlimit)) 2504 continue; 2505 /* Exceeded the time limit, so leave. */ 2506 break; 2507 } 2508 if (offloaded) { 2509 WARN_ON_ONCE(in_serving_softirq()); 2510 local_bh_enable(); 2511 lockdep_assert_irqs_enabled(); 2512 cond_resched_tasks_rcu_qs(); 2513 lockdep_assert_irqs_enabled(); 2514 local_bh_disable(); 2515 } 2516 } 2517 2518 local_irq_save(flags); 2519 rcu_nocb_lock(rdp); 2520 count = -rcl.len; 2521 rdp->n_cbs_invoked += count; 2522 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2523 is_idle_task(current), rcu_is_callbacks_kthread()); 2524 2525 /* Update counts and requeue any remaining callbacks. */ 2526 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2527 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2528 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2529 2530 /* Reinstate batch limit if we have worked down the excess. */ 2531 count = rcu_segcblist_n_cbs(&rdp->cblist); 2532 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2533 rdp->blimit = blimit; 2534 2535 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2536 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2537 rdp->qlen_last_fqs_check = 0; 2538 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2539 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2540 rdp->qlen_last_fqs_check = count; 2541 2542 /* 2543 * The following usually indicates a double call_rcu(). To track 2544 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2545 */ 2546 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); 2547 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2548 count != 0 && rcu_segcblist_empty(&rdp->cblist)); 2549 2550 rcu_nocb_unlock_irqrestore(rdp, flags); 2551 2552 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2553 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) 2554 invoke_rcu_core(); 2555 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2556 } 2557 2558 /* 2559 * This function is invoked from each scheduling-clock interrupt, 2560 * and checks to see if this CPU is in a non-context-switch quiescent 2561 * state, for example, user mode or idle loop. It also schedules RCU 2562 * core processing. If the current grace period has gone on too long, 2563 * it will ask the scheduler to manufacture a context switch for the sole 2564 * purpose of providing a providing the needed quiescent state. 2565 */ 2566 void rcu_sched_clock_irq(int user) 2567 { 2568 trace_rcu_utilization(TPS("Start scheduler-tick")); 2569 raw_cpu_inc(rcu_data.ticks_this_gp); 2570 /* The load-acquire pairs with the store-release setting to true. */ 2571 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2572 /* Idle and userspace execution already are quiescent states. */ 2573 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2574 set_tsk_need_resched(current); 2575 set_preempt_need_resched(); 2576 } 2577 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2578 } 2579 rcu_flavor_sched_clock_irq(user); 2580 if (rcu_pending(user)) 2581 invoke_rcu_core(); 2582 2583 trace_rcu_utilization(TPS("End scheduler-tick")); 2584 } 2585 2586 /* 2587 * Scan the leaf rcu_node structures. For each structure on which all 2588 * CPUs have reported a quiescent state and on which there are tasks 2589 * blocking the current grace period, initiate RCU priority boosting. 2590 * Otherwise, invoke the specified function to check dyntick state for 2591 * each CPU that has not yet reported a quiescent state. 2592 */ 2593 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2594 { 2595 int cpu; 2596 unsigned long flags; 2597 unsigned long mask; 2598 struct rcu_data *rdp; 2599 struct rcu_node *rnp; 2600 2601 rcu_state.cbovld = rcu_state.cbovldnext; 2602 rcu_state.cbovldnext = false; 2603 rcu_for_each_leaf_node(rnp) { 2604 cond_resched_tasks_rcu_qs(); 2605 mask = 0; 2606 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2607 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2608 if (rnp->qsmask == 0) { 2609 if (rcu_preempt_blocked_readers_cgp(rnp)) { 2610 /* 2611 * No point in scanning bits because they 2612 * are all zero. But we might need to 2613 * priority-boost blocked readers. 2614 */ 2615 rcu_initiate_boost(rnp, flags); 2616 /* rcu_initiate_boost() releases rnp->lock */ 2617 continue; 2618 } 2619 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2620 continue; 2621 } 2622 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2623 rdp = per_cpu_ptr(&rcu_data, cpu); 2624 if (f(rdp)) { 2625 mask |= rdp->grpmask; 2626 rcu_disable_urgency_upon_qs(rdp); 2627 } 2628 } 2629 if (mask != 0) { 2630 /* Idle/offline CPUs, report (releases rnp->lock). */ 2631 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2632 } else { 2633 /* Nothing to do here, so just drop the lock. */ 2634 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2635 } 2636 } 2637 } 2638 2639 /* 2640 * Force quiescent states on reluctant CPUs, and also detect which 2641 * CPUs are in dyntick-idle mode. 2642 */ 2643 void rcu_force_quiescent_state(void) 2644 { 2645 unsigned long flags; 2646 bool ret; 2647 struct rcu_node *rnp; 2648 struct rcu_node *rnp_old = NULL; 2649 2650 /* Funnel through hierarchy to reduce memory contention. */ 2651 rnp = __this_cpu_read(rcu_data.mynode); 2652 for (; rnp != NULL; rnp = rnp->parent) { 2653 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2654 !raw_spin_trylock(&rnp->fqslock); 2655 if (rnp_old != NULL) 2656 raw_spin_unlock(&rnp_old->fqslock); 2657 if (ret) 2658 return; 2659 rnp_old = rnp; 2660 } 2661 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2662 2663 /* Reached the root of the rcu_node tree, acquire lock. */ 2664 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2665 raw_spin_unlock(&rnp_old->fqslock); 2666 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2667 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2668 return; /* Someone beat us to it. */ 2669 } 2670 WRITE_ONCE(rcu_state.gp_flags, 2671 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2672 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2673 rcu_gp_kthread_wake(); 2674 } 2675 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2676 2677 // Workqueue handler for an RCU reader for kernels enforcing struct RCU 2678 // grace periods. 2679 static void strict_work_handler(struct work_struct *work) 2680 { 2681 rcu_read_lock(); 2682 rcu_read_unlock(); 2683 } 2684 2685 /* Perform RCU core processing work for the current CPU. */ 2686 static __latent_entropy void rcu_core(void) 2687 { 2688 unsigned long flags; 2689 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2690 struct rcu_node *rnp = rdp->mynode; 2691 const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); 2692 2693 if (cpu_is_offline(smp_processor_id())) 2694 return; 2695 trace_rcu_utilization(TPS("Start RCU core")); 2696 WARN_ON_ONCE(!rdp->beenonline); 2697 2698 /* Report any deferred quiescent states if preemption enabled. */ 2699 if (!(preempt_count() & PREEMPT_MASK)) { 2700 rcu_preempt_deferred_qs(current); 2701 } else if (rcu_preempt_need_deferred_qs(current)) { 2702 set_tsk_need_resched(current); 2703 set_preempt_need_resched(); 2704 } 2705 2706 /* Update RCU state based on any recent quiescent states. */ 2707 rcu_check_quiescent_state(rdp); 2708 2709 /* No grace period and unregistered callbacks? */ 2710 if (!rcu_gp_in_progress() && 2711 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { 2712 local_irq_save(flags); 2713 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2714 rcu_accelerate_cbs_unlocked(rnp, rdp); 2715 local_irq_restore(flags); 2716 } 2717 2718 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2719 2720 /* If there are callbacks ready, invoke them. */ 2721 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && 2722 likely(READ_ONCE(rcu_scheduler_fully_active))) 2723 rcu_do_batch(rdp); 2724 2725 /* Do any needed deferred wakeups of rcuo kthreads. */ 2726 do_nocb_deferred_wakeup(rdp); 2727 trace_rcu_utilization(TPS("End RCU core")); 2728 2729 // If strict GPs, schedule an RCU reader in a clean environment. 2730 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2731 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); 2732 } 2733 2734 static void rcu_core_si(struct softirq_action *h) 2735 { 2736 rcu_core(); 2737 } 2738 2739 static void rcu_wake_cond(struct task_struct *t, int status) 2740 { 2741 /* 2742 * If the thread is yielding, only wake it when this 2743 * is invoked from idle 2744 */ 2745 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2746 wake_up_process(t); 2747 } 2748 2749 static void invoke_rcu_core_kthread(void) 2750 { 2751 struct task_struct *t; 2752 unsigned long flags; 2753 2754 local_irq_save(flags); 2755 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2756 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2757 if (t != NULL && t != current) 2758 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2759 local_irq_restore(flags); 2760 } 2761 2762 /* 2763 * Wake up this CPU's rcuc kthread to do RCU core processing. 2764 */ 2765 static void invoke_rcu_core(void) 2766 { 2767 if (!cpu_online(smp_processor_id())) 2768 return; 2769 if (use_softirq) 2770 raise_softirq(RCU_SOFTIRQ); 2771 else 2772 invoke_rcu_core_kthread(); 2773 } 2774 2775 static void rcu_cpu_kthread_park(unsigned int cpu) 2776 { 2777 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2778 } 2779 2780 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2781 { 2782 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2783 } 2784 2785 /* 2786 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2787 * the RCU softirq used in configurations of RCU that do not support RCU 2788 * priority boosting. 2789 */ 2790 static void rcu_cpu_kthread(unsigned int cpu) 2791 { 2792 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2793 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2794 int spincnt; 2795 2796 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2797 for (spincnt = 0; spincnt < 10; spincnt++) { 2798 local_bh_disable(); 2799 *statusp = RCU_KTHREAD_RUNNING; 2800 local_irq_disable(); 2801 work = *workp; 2802 *workp = 0; 2803 local_irq_enable(); 2804 if (work) 2805 rcu_core(); 2806 local_bh_enable(); 2807 if (*workp == 0) { 2808 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2809 *statusp = RCU_KTHREAD_WAITING; 2810 return; 2811 } 2812 } 2813 *statusp = RCU_KTHREAD_YIELDING; 2814 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2815 schedule_timeout_idle(2); 2816 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2817 *statusp = RCU_KTHREAD_WAITING; 2818 } 2819 2820 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2821 .store = &rcu_data.rcu_cpu_kthread_task, 2822 .thread_should_run = rcu_cpu_kthread_should_run, 2823 .thread_fn = rcu_cpu_kthread, 2824 .thread_comm = "rcuc/%u", 2825 .setup = rcu_cpu_kthread_setup, 2826 .park = rcu_cpu_kthread_park, 2827 }; 2828 2829 /* 2830 * Spawn per-CPU RCU core processing kthreads. 2831 */ 2832 static int __init rcu_spawn_core_kthreads(void) 2833 { 2834 int cpu; 2835 2836 for_each_possible_cpu(cpu) 2837 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2838 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq) 2839 return 0; 2840 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2841 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2842 return 0; 2843 } 2844 early_initcall(rcu_spawn_core_kthreads); 2845 2846 /* 2847 * Handle any core-RCU processing required by a call_rcu() invocation. 2848 */ 2849 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2850 unsigned long flags) 2851 { 2852 /* 2853 * If called from an extended quiescent state, invoke the RCU 2854 * core in order to force a re-evaluation of RCU's idleness. 2855 */ 2856 if (!rcu_is_watching()) 2857 invoke_rcu_core(); 2858 2859 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2860 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2861 return; 2862 2863 /* 2864 * Force the grace period if too many callbacks or too long waiting. 2865 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2866 * if some other CPU has recently done so. Also, don't bother 2867 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2868 * is the only one waiting for a grace period to complete. 2869 */ 2870 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2871 rdp->qlen_last_fqs_check + qhimark)) { 2872 2873 /* Are we ignoring a completed grace period? */ 2874 note_gp_changes(rdp); 2875 2876 /* Start a new grace period if one not already started. */ 2877 if (!rcu_gp_in_progress()) { 2878 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2879 } else { 2880 /* Give the grace period a kick. */ 2881 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 2882 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2883 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2884 rcu_force_quiescent_state(); 2885 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2886 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2887 } 2888 } 2889 } 2890 2891 /* 2892 * RCU callback function to leak a callback. 2893 */ 2894 static void rcu_leak_callback(struct rcu_head *rhp) 2895 { 2896 } 2897 2898 /* 2899 * Check and if necessary update the leaf rcu_node structure's 2900 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2901 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 2902 * structure's ->lock. 2903 */ 2904 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 2905 { 2906 raw_lockdep_assert_held_rcu_node(rnp); 2907 if (qovld_calc <= 0) 2908 return; // Early boot and wildcard value set. 2909 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 2910 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 2911 else 2912 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 2913 } 2914 2915 /* 2916 * Check and if necessary update the leaf rcu_node structure's 2917 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2918 * number of queued RCU callbacks. No locks need be held, but the 2919 * caller must have disabled interrupts. 2920 * 2921 * Note that this function ignores the possibility that there are a lot 2922 * of callbacks all of which have already seen the end of their respective 2923 * grace periods. This omission is due to the need for no-CBs CPUs to 2924 * be holding ->nocb_lock to do this check, which is too heavy for a 2925 * common-case operation. 2926 */ 2927 static void check_cb_ovld(struct rcu_data *rdp) 2928 { 2929 struct rcu_node *const rnp = rdp->mynode; 2930 2931 if (qovld_calc <= 0 || 2932 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 2933 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 2934 return; // Early boot wildcard value or already set correctly. 2935 raw_spin_lock_rcu_node(rnp); 2936 check_cb_ovld_locked(rdp, rnp); 2937 raw_spin_unlock_rcu_node(rnp); 2938 } 2939 2940 /* Helper function for call_rcu() and friends. */ 2941 static void 2942 __call_rcu(struct rcu_head *head, rcu_callback_t func) 2943 { 2944 unsigned long flags; 2945 struct rcu_data *rdp; 2946 bool was_alldone; 2947 2948 /* Misaligned rcu_head! */ 2949 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2950 2951 if (debug_rcu_head_queue(head)) { 2952 /* 2953 * Probable double call_rcu(), so leak the callback. 2954 * Use rcu:rcu_callback trace event to find the previous 2955 * time callback was passed to __call_rcu(). 2956 */ 2957 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2958 head, head->func); 2959 WRITE_ONCE(head->func, rcu_leak_callback); 2960 return; 2961 } 2962 head->func = func; 2963 head->next = NULL; 2964 local_irq_save(flags); 2965 kasan_record_aux_stack(head); 2966 rdp = this_cpu_ptr(&rcu_data); 2967 2968 /* Add the callback to our list. */ 2969 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 2970 // This can trigger due to call_rcu() from offline CPU: 2971 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 2972 WARN_ON_ONCE(!rcu_is_watching()); 2973 // Very early boot, before rcu_init(). Initialize if needed 2974 // and then drop through to queue the callback. 2975 if (rcu_segcblist_empty(&rdp->cblist)) 2976 rcu_segcblist_init(&rdp->cblist); 2977 } 2978 2979 check_cb_ovld(rdp); 2980 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) 2981 return; // Enqueued onto ->nocb_bypass, so just leave. 2982 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. 2983 rcu_segcblist_enqueue(&rdp->cblist, head); 2984 if (__is_kvfree_rcu_offset((unsigned long)func)) 2985 trace_rcu_kvfree_callback(rcu_state.name, head, 2986 (unsigned long)func, 2987 rcu_segcblist_n_cbs(&rdp->cblist)); 2988 else 2989 trace_rcu_callback(rcu_state.name, head, 2990 rcu_segcblist_n_cbs(&rdp->cblist)); 2991 2992 /* Go handle any RCU core processing required. */ 2993 if (unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { 2994 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2995 } else { 2996 __call_rcu_core(rdp, head, flags); 2997 local_irq_restore(flags); 2998 } 2999 } 3000 3001 /** 3002 * call_rcu() - Queue an RCU callback for invocation after a grace period. 3003 * @head: structure to be used for queueing the RCU updates. 3004 * @func: actual callback function to be invoked after the grace period 3005 * 3006 * The callback function will be invoked some time after a full grace 3007 * period elapses, in other words after all pre-existing RCU read-side 3008 * critical sections have completed. However, the callback function 3009 * might well execute concurrently with RCU read-side critical sections 3010 * that started after call_rcu() was invoked. RCU read-side critical 3011 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and 3012 * may be nested. In addition, regions of code across which interrupts, 3013 * preemption, or softirqs have been disabled also serve as RCU read-side 3014 * critical sections. This includes hardware interrupt handlers, softirq 3015 * handlers, and NMI handlers. 3016 * 3017 * Note that all CPUs must agree that the grace period extended beyond 3018 * all pre-existing RCU read-side critical section. On systems with more 3019 * than one CPU, this means that when "func()" is invoked, each CPU is 3020 * guaranteed to have executed a full memory barrier since the end of its 3021 * last RCU read-side critical section whose beginning preceded the call 3022 * to call_rcu(). It also means that each CPU executing an RCU read-side 3023 * critical section that continues beyond the start of "func()" must have 3024 * executed a memory barrier after the call_rcu() but before the beginning 3025 * of that RCU read-side critical section. Note that these guarantees 3026 * include CPUs that are offline, idle, or executing in user mode, as 3027 * well as CPUs that are executing in the kernel. 3028 * 3029 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 3030 * resulting RCU callback function "func()", then both CPU A and CPU B are 3031 * guaranteed to execute a full memory barrier during the time interval 3032 * between the call to call_rcu() and the invocation of "func()" -- even 3033 * if CPU A and CPU B are the same CPU (but again only if the system has 3034 * more than one CPU). 3035 */ 3036 void call_rcu(struct rcu_head *head, rcu_callback_t func) 3037 { 3038 __call_rcu(head, func); 3039 } 3040 EXPORT_SYMBOL_GPL(call_rcu); 3041 3042 3043 /* Maximum number of jiffies to wait before draining a batch. */ 3044 #define KFREE_DRAIN_JIFFIES (HZ / 50) 3045 #define KFREE_N_BATCHES 2 3046 #define FREE_N_CHANNELS 2 3047 3048 /** 3049 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers 3050 * @nr_records: Number of active pointers in the array 3051 * @next: Next bulk object in the block chain 3052 * @records: Array of the kvfree_rcu() pointers 3053 */ 3054 struct kvfree_rcu_bulk_data { 3055 unsigned long nr_records; 3056 struct kvfree_rcu_bulk_data *next; 3057 void *records[]; 3058 }; 3059 3060 /* 3061 * This macro defines how many entries the "records" array 3062 * will contain. It is based on the fact that the size of 3063 * kvfree_rcu_bulk_data structure becomes exactly one page. 3064 */ 3065 #define KVFREE_BULK_MAX_ENTR \ 3066 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *)) 3067 3068 /** 3069 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests 3070 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period 3071 * @head_free: List of kfree_rcu() objects waiting for a grace period 3072 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period 3073 * @krcp: Pointer to @kfree_rcu_cpu structure 3074 */ 3075 3076 struct kfree_rcu_cpu_work { 3077 struct rcu_work rcu_work; 3078 struct rcu_head *head_free; 3079 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS]; 3080 struct kfree_rcu_cpu *krcp; 3081 }; 3082 3083 /** 3084 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period 3085 * @head: List of kfree_rcu() objects not yet waiting for a grace period 3086 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period 3087 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period 3088 * @lock: Synchronize access to this structure 3089 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES 3090 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending 3091 * @initialized: The @rcu_work fields have been initialized 3092 * @count: Number of objects for which GP not started 3093 * @bkvcache: 3094 * A simple cache list that contains objects for reuse purpose. 3095 * In order to save some per-cpu space the list is singular. 3096 * Even though it is lockless an access has to be protected by the 3097 * per-cpu lock. 3098 * @page_cache_work: A work to refill the cache when it is empty 3099 * @work_in_progress: Indicates that page_cache_work is running 3100 * @hrtimer: A hrtimer for scheduling a page_cache_work 3101 * @nr_bkv_objs: number of allocated objects at @bkvcache. 3102 * 3103 * This is a per-CPU structure. The reason that it is not included in 3104 * the rcu_data structure is to permit this code to be extracted from 3105 * the RCU files. Such extraction could allow further optimization of 3106 * the interactions with the slab allocators. 3107 */ 3108 struct kfree_rcu_cpu { 3109 struct rcu_head *head; 3110 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS]; 3111 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; 3112 raw_spinlock_t lock; 3113 struct delayed_work monitor_work; 3114 bool monitor_todo; 3115 bool initialized; 3116 int count; 3117 3118 struct work_struct page_cache_work; 3119 atomic_t work_in_progress; 3120 struct hrtimer hrtimer; 3121 3122 struct llist_head bkvcache; 3123 int nr_bkv_objs; 3124 }; 3125 3126 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = { 3127 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock), 3128 }; 3129 3130 static __always_inline void 3131 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead) 3132 { 3133 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3134 int i; 3135 3136 for (i = 0; i < bhead->nr_records; i++) 3137 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i])); 3138 #endif 3139 } 3140 3141 static inline struct kfree_rcu_cpu * 3142 krc_this_cpu_lock(unsigned long *flags) 3143 { 3144 struct kfree_rcu_cpu *krcp; 3145 3146 local_irq_save(*flags); // For safely calling this_cpu_ptr(). 3147 krcp = this_cpu_ptr(&krc); 3148 raw_spin_lock(&krcp->lock); 3149 3150 return krcp; 3151 } 3152 3153 static inline void 3154 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) 3155 { 3156 raw_spin_unlock(&krcp->lock); 3157 local_irq_restore(flags); 3158 } 3159 3160 static inline struct kvfree_rcu_bulk_data * 3161 get_cached_bnode(struct kfree_rcu_cpu *krcp) 3162 { 3163 if (!krcp->nr_bkv_objs) 3164 return NULL; 3165 3166 krcp->nr_bkv_objs--; 3167 return (struct kvfree_rcu_bulk_data *) 3168 llist_del_first(&krcp->bkvcache); 3169 } 3170 3171 static inline bool 3172 put_cached_bnode(struct kfree_rcu_cpu *krcp, 3173 struct kvfree_rcu_bulk_data *bnode) 3174 { 3175 // Check the limit. 3176 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) 3177 return false; 3178 3179 llist_add((struct llist_node *) bnode, &krcp->bkvcache); 3180 krcp->nr_bkv_objs++; 3181 return true; 3182 3183 } 3184 3185 /* 3186 * This function is invoked in workqueue context after a grace period. 3187 * It frees all the objects queued on ->bhead_free or ->head_free. 3188 */ 3189 static void kfree_rcu_work(struct work_struct *work) 3190 { 3191 unsigned long flags; 3192 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext; 3193 struct rcu_head *head, *next; 3194 struct kfree_rcu_cpu *krcp; 3195 struct kfree_rcu_cpu_work *krwp; 3196 int i, j; 3197 3198 krwp = container_of(to_rcu_work(work), 3199 struct kfree_rcu_cpu_work, rcu_work); 3200 krcp = krwp->krcp; 3201 3202 raw_spin_lock_irqsave(&krcp->lock, flags); 3203 // Channels 1 and 2. 3204 for (i = 0; i < FREE_N_CHANNELS; i++) { 3205 bkvhead[i] = krwp->bkvhead_free[i]; 3206 krwp->bkvhead_free[i] = NULL; 3207 } 3208 3209 // Channel 3. 3210 head = krwp->head_free; 3211 krwp->head_free = NULL; 3212 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3213 3214 // Handle two first channels. 3215 for (i = 0; i < FREE_N_CHANNELS; i++) { 3216 for (; bkvhead[i]; bkvhead[i] = bnext) { 3217 bnext = bkvhead[i]->next; 3218 debug_rcu_bhead_unqueue(bkvhead[i]); 3219 3220 rcu_lock_acquire(&rcu_callback_map); 3221 if (i == 0) { // kmalloc() / kfree(). 3222 trace_rcu_invoke_kfree_bulk_callback( 3223 rcu_state.name, bkvhead[i]->nr_records, 3224 bkvhead[i]->records); 3225 3226 kfree_bulk(bkvhead[i]->nr_records, 3227 bkvhead[i]->records); 3228 } else { // vmalloc() / vfree(). 3229 for (j = 0; j < bkvhead[i]->nr_records; j++) { 3230 trace_rcu_invoke_kvfree_callback( 3231 rcu_state.name, 3232 bkvhead[i]->records[j], 0); 3233 3234 vfree(bkvhead[i]->records[j]); 3235 } 3236 } 3237 rcu_lock_release(&rcu_callback_map); 3238 3239 raw_spin_lock_irqsave(&krcp->lock, flags); 3240 if (put_cached_bnode(krcp, bkvhead[i])) 3241 bkvhead[i] = NULL; 3242 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3243 3244 if (bkvhead[i]) 3245 free_page((unsigned long) bkvhead[i]); 3246 3247 cond_resched_tasks_rcu_qs(); 3248 } 3249 } 3250 3251 /* 3252 * Emergency case only. It can happen under low memory 3253 * condition when an allocation gets failed, so the "bulk" 3254 * path can not be temporary maintained. 3255 */ 3256 for (; head; head = next) { 3257 unsigned long offset = (unsigned long)head->func; 3258 void *ptr = (void *)head - offset; 3259 3260 next = head->next; 3261 debug_rcu_head_unqueue((struct rcu_head *)ptr); 3262 rcu_lock_acquire(&rcu_callback_map); 3263 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); 3264 3265 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) 3266 kvfree(ptr); 3267 3268 rcu_lock_release(&rcu_callback_map); 3269 cond_resched_tasks_rcu_qs(); 3270 } 3271 } 3272 3273 /* 3274 * Schedule the kfree batch RCU work to run in workqueue context after a GP. 3275 * 3276 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES 3277 * timeout has been reached. 3278 */ 3279 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) 3280 { 3281 struct kfree_rcu_cpu_work *krwp; 3282 bool repeat = false; 3283 int i, j; 3284 3285 lockdep_assert_held(&krcp->lock); 3286 3287 for (i = 0; i < KFREE_N_BATCHES; i++) { 3288 krwp = &(krcp->krw_arr[i]); 3289 3290 /* 3291 * Try to detach bkvhead or head and attach it over any 3292 * available corresponding free channel. It can be that 3293 * a previous RCU batch is in progress, it means that 3294 * immediately to queue another one is not possible so 3295 * return false to tell caller to retry. 3296 */ 3297 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || 3298 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || 3299 (krcp->head && !krwp->head_free)) { 3300 // Channel 1 corresponds to SLAB ptrs. 3301 // Channel 2 corresponds to vmalloc ptrs. 3302 for (j = 0; j < FREE_N_CHANNELS; j++) { 3303 if (!krwp->bkvhead_free[j]) { 3304 krwp->bkvhead_free[j] = krcp->bkvhead[j]; 3305 krcp->bkvhead[j] = NULL; 3306 } 3307 } 3308 3309 // Channel 3 corresponds to emergency path. 3310 if (!krwp->head_free) { 3311 krwp->head_free = krcp->head; 3312 krcp->head = NULL; 3313 } 3314 3315 WRITE_ONCE(krcp->count, 0); 3316 3317 /* 3318 * One work is per one batch, so there are three 3319 * "free channels", the batch can handle. It can 3320 * be that the work is in the pending state when 3321 * channels have been detached following by each 3322 * other. 3323 */ 3324 queue_rcu_work(system_wq, &krwp->rcu_work); 3325 } 3326 3327 // Repeat if any "free" corresponding channel is still busy. 3328 if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) 3329 repeat = true; 3330 } 3331 3332 return !repeat; 3333 } 3334 3335 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp, 3336 unsigned long flags) 3337 { 3338 // Attempt to start a new batch. 3339 krcp->monitor_todo = false; 3340 if (queue_kfree_rcu_work(krcp)) { 3341 // Success! Our job is done here. 3342 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3343 return; 3344 } 3345 3346 // Previous RCU batch still in progress, try again later. 3347 krcp->monitor_todo = true; 3348 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3349 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3350 } 3351 3352 /* 3353 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. 3354 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch. 3355 */ 3356 static void kfree_rcu_monitor(struct work_struct *work) 3357 { 3358 unsigned long flags; 3359 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu, 3360 monitor_work.work); 3361 3362 raw_spin_lock_irqsave(&krcp->lock, flags); 3363 if (krcp->monitor_todo) 3364 kfree_rcu_drain_unlock(krcp, flags); 3365 else 3366 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3367 } 3368 3369 static enum hrtimer_restart 3370 schedule_page_work_fn(struct hrtimer *t) 3371 { 3372 struct kfree_rcu_cpu *krcp = 3373 container_of(t, struct kfree_rcu_cpu, hrtimer); 3374 3375 queue_work(system_highpri_wq, &krcp->page_cache_work); 3376 return HRTIMER_NORESTART; 3377 } 3378 3379 static void fill_page_cache_func(struct work_struct *work) 3380 { 3381 struct kvfree_rcu_bulk_data *bnode; 3382 struct kfree_rcu_cpu *krcp = 3383 container_of(work, struct kfree_rcu_cpu, 3384 page_cache_work); 3385 unsigned long flags; 3386 bool pushed; 3387 int i; 3388 3389 for (i = 0; i < rcu_min_cached_objs; i++) { 3390 bnode = (struct kvfree_rcu_bulk_data *) 3391 __get_free_page(GFP_KERNEL | __GFP_NOWARN); 3392 3393 if (bnode) { 3394 raw_spin_lock_irqsave(&krcp->lock, flags); 3395 pushed = put_cached_bnode(krcp, bnode); 3396 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3397 3398 if (!pushed) { 3399 free_page((unsigned long) bnode); 3400 break; 3401 } 3402 } 3403 } 3404 3405 atomic_set(&krcp->work_in_progress, 0); 3406 } 3407 3408 static void 3409 run_page_cache_worker(struct kfree_rcu_cpu *krcp) 3410 { 3411 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3412 !atomic_xchg(&krcp->work_in_progress, 1)) { 3413 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, 3414 HRTIMER_MODE_REL); 3415 krcp->hrtimer.function = schedule_page_work_fn; 3416 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); 3417 } 3418 } 3419 3420 static inline bool 3421 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) 3422 { 3423 struct kvfree_rcu_bulk_data *bnode; 3424 int idx; 3425 3426 if (unlikely(!krcp->initialized)) 3427 return false; 3428 3429 lockdep_assert_held(&krcp->lock); 3430 idx = !!is_vmalloc_addr(ptr); 3431 3432 /* Check if a new block is required. */ 3433 if (!krcp->bkvhead[idx] || 3434 krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { 3435 bnode = get_cached_bnode(krcp); 3436 /* Switch to emergency path. */ 3437 if (!bnode) 3438 return false; 3439 3440 /* Initialize the new block. */ 3441 bnode->nr_records = 0; 3442 bnode->next = krcp->bkvhead[idx]; 3443 3444 /* Attach it to the head. */ 3445 krcp->bkvhead[idx] = bnode; 3446 } 3447 3448 /* Finally insert. */ 3449 krcp->bkvhead[idx]->records 3450 [krcp->bkvhead[idx]->nr_records++] = ptr; 3451 3452 return true; 3453 } 3454 3455 /* 3456 * Queue a request for lazy invocation of appropriate free routine after a 3457 * grace period. Please note there are three paths are maintained, two are the 3458 * main ones that use array of pointers interface and third one is emergency 3459 * one, that is used only when the main path can not be maintained temporary, 3460 * due to memory pressure. 3461 * 3462 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained 3463 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will 3464 * be free'd in workqueue context. This allows us to: batch requests together to 3465 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. 3466 */ 3467 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 3468 { 3469 unsigned long flags; 3470 struct kfree_rcu_cpu *krcp; 3471 bool success; 3472 void *ptr; 3473 3474 if (head) { 3475 ptr = (void *) head - (unsigned long) func; 3476 } else { 3477 /* 3478 * Please note there is a limitation for the head-less 3479 * variant, that is why there is a clear rule for such 3480 * objects: it can be used from might_sleep() context 3481 * only. For other places please embed an rcu_head to 3482 * your data. 3483 */ 3484 might_sleep(); 3485 ptr = (unsigned long *) func; 3486 } 3487 3488 krcp = krc_this_cpu_lock(&flags); 3489 3490 // Queue the object but don't yet schedule the batch. 3491 if (debug_rcu_head_queue(ptr)) { 3492 // Probable double kfree_rcu(), just leak. 3493 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", 3494 __func__, head); 3495 3496 // Mark as success and leave. 3497 success = true; 3498 goto unlock_return; 3499 } 3500 3501 success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); 3502 if (!success) { 3503 run_page_cache_worker(krcp); 3504 3505 if (head == NULL) 3506 // Inline if kvfree_rcu(one_arg) call. 3507 goto unlock_return; 3508 3509 head->func = func; 3510 head->next = krcp->head; 3511 krcp->head = head; 3512 success = true; 3513 } 3514 3515 WRITE_ONCE(krcp->count, krcp->count + 1); 3516 3517 // Set timer to drain after KFREE_DRAIN_JIFFIES. 3518 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3519 !krcp->monitor_todo) { 3520 krcp->monitor_todo = true; 3521 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3522 } 3523 3524 unlock_return: 3525 krc_this_cpu_unlock(krcp, flags); 3526 3527 /* 3528 * Inline kvfree() after synchronize_rcu(). We can do 3529 * it from might_sleep() context only, so the current 3530 * CPU can pass the QS state. 3531 */ 3532 if (!success) { 3533 debug_rcu_head_unqueue((struct rcu_head *) ptr); 3534 synchronize_rcu(); 3535 kvfree(ptr); 3536 } 3537 } 3538 EXPORT_SYMBOL_GPL(kvfree_call_rcu); 3539 3540 static unsigned long 3541 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 3542 { 3543 int cpu; 3544 unsigned long count = 0; 3545 3546 /* Snapshot count of all CPUs */ 3547 for_each_possible_cpu(cpu) { 3548 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3549 3550 count += READ_ONCE(krcp->count); 3551 } 3552 3553 return count; 3554 } 3555 3556 static unsigned long 3557 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 3558 { 3559 int cpu, freed = 0; 3560 unsigned long flags; 3561 3562 for_each_possible_cpu(cpu) { 3563 int count; 3564 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3565 3566 count = krcp->count; 3567 raw_spin_lock_irqsave(&krcp->lock, flags); 3568 if (krcp->monitor_todo) 3569 kfree_rcu_drain_unlock(krcp, flags); 3570 else 3571 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3572 3573 sc->nr_to_scan -= count; 3574 freed += count; 3575 3576 if (sc->nr_to_scan <= 0) 3577 break; 3578 } 3579 3580 return freed == 0 ? SHRINK_STOP : freed; 3581 } 3582 3583 static struct shrinker kfree_rcu_shrinker = { 3584 .count_objects = kfree_rcu_shrink_count, 3585 .scan_objects = kfree_rcu_shrink_scan, 3586 .batch = 0, 3587 .seeks = DEFAULT_SEEKS, 3588 }; 3589 3590 void __init kfree_rcu_scheduler_running(void) 3591 { 3592 int cpu; 3593 unsigned long flags; 3594 3595 for_each_possible_cpu(cpu) { 3596 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3597 3598 raw_spin_lock_irqsave(&krcp->lock, flags); 3599 if (!krcp->head || krcp->monitor_todo) { 3600 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3601 continue; 3602 } 3603 krcp->monitor_todo = true; 3604 schedule_delayed_work_on(cpu, &krcp->monitor_work, 3605 KFREE_DRAIN_JIFFIES); 3606 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3607 } 3608 } 3609 3610 /* 3611 * During early boot, any blocking grace-period wait automatically 3612 * implies a grace period. Later on, this is never the case for PREEMPTION. 3613 * 3614 * However, because a context switch is a grace period for !PREEMPTION, any 3615 * blocking grace-period wait automatically implies a grace period if 3616 * there is only one CPU online at any point time during execution of 3617 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 3618 * occasionally incorrectly indicate that there are multiple CPUs online 3619 * when there was in fact only one the whole time, as this just adds some 3620 * overhead: RCU still operates correctly. 3621 */ 3622 static int rcu_blocking_is_gp(void) 3623 { 3624 int ret; 3625 3626 if (IS_ENABLED(CONFIG_PREEMPTION)) 3627 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 3628 might_sleep(); /* Check for RCU read-side critical section. */ 3629 preempt_disable(); 3630 /* 3631 * If the rcu_state.n_online_cpus counter is equal to one, 3632 * there is only one CPU, and that CPU sees all prior accesses 3633 * made by any CPU that was online at the time of its access. 3634 * Furthermore, if this counter is equal to one, its value cannot 3635 * change until after the preempt_enable() below. 3636 * 3637 * Furthermore, if rcu_state.n_online_cpus is equal to one here, 3638 * all later CPUs (both this one and any that come online later 3639 * on) are guaranteed to see all accesses prior to this point 3640 * in the code, without the need for additional memory barriers. 3641 * Those memory barriers are provided by CPU-hotplug code. 3642 */ 3643 ret = READ_ONCE(rcu_state.n_online_cpus) <= 1; 3644 preempt_enable(); 3645 return ret; 3646 } 3647 3648 /** 3649 * synchronize_rcu - wait until a grace period has elapsed. 3650 * 3651 * Control will return to the caller some time after a full grace 3652 * period has elapsed, in other words after all currently executing RCU 3653 * read-side critical sections have completed. Note, however, that 3654 * upon return from synchronize_rcu(), the caller might well be executing 3655 * concurrently with new RCU read-side critical sections that began while 3656 * synchronize_rcu() was waiting. RCU read-side critical sections are 3657 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 3658 * In addition, regions of code across which interrupts, preemption, or 3659 * softirqs have been disabled also serve as RCU read-side critical 3660 * sections. This includes hardware interrupt handlers, softirq handlers, 3661 * and NMI handlers. 3662 * 3663 * Note that this guarantee implies further memory-ordering guarantees. 3664 * On systems with more than one CPU, when synchronize_rcu() returns, 3665 * each CPU is guaranteed to have executed a full memory barrier since 3666 * the end of its last RCU read-side critical section whose beginning 3667 * preceded the call to synchronize_rcu(). In addition, each CPU having 3668 * an RCU read-side critical section that extends beyond the return from 3669 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3670 * after the beginning of synchronize_rcu() and before the beginning of 3671 * that RCU read-side critical section. Note that these guarantees include 3672 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3673 * that are executing in the kernel. 3674 * 3675 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3676 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3677 * to have executed a full memory barrier during the execution of 3678 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3679 * again only if the system has more than one CPU). 3680 */ 3681 void synchronize_rcu(void) 3682 { 3683 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3684 lock_is_held(&rcu_lock_map) || 3685 lock_is_held(&rcu_sched_lock_map), 3686 "Illegal synchronize_rcu() in RCU read-side critical section"); 3687 if (rcu_blocking_is_gp()) 3688 return; // Context allows vacuous grace periods. 3689 if (rcu_gp_is_expedited()) 3690 synchronize_rcu_expedited(); 3691 else 3692 wait_rcu_gp(call_rcu); 3693 } 3694 EXPORT_SYMBOL_GPL(synchronize_rcu); 3695 3696 /** 3697 * get_state_synchronize_rcu - Snapshot current RCU state 3698 * 3699 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3700 * to determine whether or not a full grace period has elapsed in the 3701 * meantime. 3702 */ 3703 unsigned long get_state_synchronize_rcu(void) 3704 { 3705 /* 3706 * Any prior manipulation of RCU-protected data must happen 3707 * before the load from ->gp_seq. 3708 */ 3709 smp_mb(); /* ^^^ */ 3710 return rcu_seq_snap(&rcu_state.gp_seq); 3711 } 3712 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3713 3714 /** 3715 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3716 * 3717 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 3718 * 3719 * If a full RCU grace period has elapsed since the earlier call to 3720 * get_state_synchronize_rcu(), just return. Otherwise, invoke 3721 * synchronize_rcu() to wait for a full grace period. 3722 * 3723 * Yes, this function does not take counter wrap into account. But 3724 * counter wrap is harmless. If the counter wraps, we have waited for 3725 * more than 2 billion grace periods (and way more on a 64-bit system!), 3726 * so waiting for one additional grace period should be just fine. 3727 */ 3728 void cond_synchronize_rcu(unsigned long oldstate) 3729 { 3730 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) 3731 synchronize_rcu(); 3732 else 3733 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3734 } 3735 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3736 3737 /* 3738 * Check to see if there is any immediate RCU-related work to be done by 3739 * the current CPU, returning 1 if so and zero otherwise. The checks are 3740 * in order of increasing expense: checks that can be carried out against 3741 * CPU-local state are performed first. However, we must check for CPU 3742 * stalls first, else we might not get a chance. 3743 */ 3744 static int rcu_pending(int user) 3745 { 3746 bool gp_in_progress; 3747 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3748 struct rcu_node *rnp = rdp->mynode; 3749 3750 /* Check for CPU stalls, if enabled. */ 3751 check_cpu_stall(rdp); 3752 3753 /* Does this CPU need a deferred NOCB wakeup? */ 3754 if (rcu_nocb_need_deferred_wakeup(rdp)) 3755 return 1; 3756 3757 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3758 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) 3759 return 0; 3760 3761 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3762 gp_in_progress = rcu_gp_in_progress(); 3763 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3764 return 1; 3765 3766 /* Does this CPU have callbacks ready to invoke? */ 3767 if (!rcu_segcblist_is_offloaded(&rdp->cblist) && 3768 rcu_segcblist_ready_cbs(&rdp->cblist)) 3769 return 1; 3770 3771 /* Has RCU gone idle with this CPU needing another grace period? */ 3772 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3773 !rcu_segcblist_is_offloaded(&rdp->cblist) && 3774 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3775 return 1; 3776 3777 /* Have RCU grace period completed or started? */ 3778 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3779 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3780 return 1; 3781 3782 /* nothing to do */ 3783 return 0; 3784 } 3785 3786 /* 3787 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3788 * the compiler is expected to optimize this away. 3789 */ 3790 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3791 { 3792 trace_rcu_barrier(rcu_state.name, s, cpu, 3793 atomic_read(&rcu_state.barrier_cpu_count), done); 3794 } 3795 3796 /* 3797 * RCU callback function for rcu_barrier(). If we are last, wake 3798 * up the task executing rcu_barrier(). 3799 * 3800 * Note that the value of rcu_state.barrier_sequence must be captured 3801 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3802 * other CPUs might count the value down to zero before this CPU gets 3803 * around to invoking rcu_barrier_trace(), which might result in bogus 3804 * data from the next instance of rcu_barrier(). 3805 */ 3806 static void rcu_barrier_callback(struct rcu_head *rhp) 3807 { 3808 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3809 3810 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3811 rcu_barrier_trace(TPS("LastCB"), -1, s); 3812 complete(&rcu_state.barrier_completion); 3813 } else { 3814 rcu_barrier_trace(TPS("CB"), -1, s); 3815 } 3816 } 3817 3818 /* 3819 * Called with preemption disabled, and from cross-cpu IRQ context. 3820 */ 3821 static void rcu_barrier_func(void *cpu_in) 3822 { 3823 uintptr_t cpu = (uintptr_t)cpu_in; 3824 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3825 3826 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3827 rdp->barrier_head.func = rcu_barrier_callback; 3828 debug_rcu_head_queue(&rdp->barrier_head); 3829 rcu_nocb_lock(rdp); 3830 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); 3831 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3832 atomic_inc(&rcu_state.barrier_cpu_count); 3833 } else { 3834 debug_rcu_head_unqueue(&rdp->barrier_head); 3835 rcu_barrier_trace(TPS("IRQNQ"), -1, 3836 rcu_state.barrier_sequence); 3837 } 3838 rcu_nocb_unlock(rdp); 3839 } 3840 3841 /** 3842 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3843 * 3844 * Note that this primitive does not necessarily wait for an RCU grace period 3845 * to complete. For example, if there are no RCU callbacks queued anywhere 3846 * in the system, then rcu_barrier() is within its rights to return 3847 * immediately, without waiting for anything, much less an RCU grace period. 3848 */ 3849 void rcu_barrier(void) 3850 { 3851 uintptr_t cpu; 3852 struct rcu_data *rdp; 3853 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3854 3855 rcu_barrier_trace(TPS("Begin"), -1, s); 3856 3857 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3858 mutex_lock(&rcu_state.barrier_mutex); 3859 3860 /* Did someone else do our work for us? */ 3861 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3862 rcu_barrier_trace(TPS("EarlyExit"), -1, 3863 rcu_state.barrier_sequence); 3864 smp_mb(); /* caller's subsequent code after above check. */ 3865 mutex_unlock(&rcu_state.barrier_mutex); 3866 return; 3867 } 3868 3869 /* Mark the start of the barrier operation. */ 3870 rcu_seq_start(&rcu_state.barrier_sequence); 3871 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 3872 3873 /* 3874 * Initialize the count to two rather than to zero in order 3875 * to avoid a too-soon return to zero in case of an immediate 3876 * invocation of the just-enqueued callback (or preemption of 3877 * this task). Exclude CPU-hotplug operations to ensure that no 3878 * offline non-offloaded CPU has callbacks queued. 3879 */ 3880 init_completion(&rcu_state.barrier_completion); 3881 atomic_set(&rcu_state.barrier_cpu_count, 2); 3882 get_online_cpus(); 3883 3884 /* 3885 * Force each CPU with callbacks to register a new callback. 3886 * When that callback is invoked, we will know that all of the 3887 * corresponding CPU's preceding callbacks have been invoked. 3888 */ 3889 for_each_possible_cpu(cpu) { 3890 rdp = per_cpu_ptr(&rcu_data, cpu); 3891 if (cpu_is_offline(cpu) && 3892 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3893 continue; 3894 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { 3895 rcu_barrier_trace(TPS("OnlineQ"), cpu, 3896 rcu_state.barrier_sequence); 3897 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1); 3898 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && 3899 cpu_is_offline(cpu)) { 3900 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, 3901 rcu_state.barrier_sequence); 3902 local_irq_disable(); 3903 rcu_barrier_func((void *)cpu); 3904 local_irq_enable(); 3905 } else if (cpu_is_offline(cpu)) { 3906 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu, 3907 rcu_state.barrier_sequence); 3908 } else { 3909 rcu_barrier_trace(TPS("OnlineNQ"), cpu, 3910 rcu_state.barrier_sequence); 3911 } 3912 } 3913 put_online_cpus(); 3914 3915 /* 3916 * Now that we have an rcu_barrier_callback() callback on each 3917 * CPU, and thus each counted, remove the initial count. 3918 */ 3919 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 3920 complete(&rcu_state.barrier_completion); 3921 3922 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3923 wait_for_completion(&rcu_state.barrier_completion); 3924 3925 /* Mark the end of the barrier operation. */ 3926 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 3927 rcu_seq_end(&rcu_state.barrier_sequence); 3928 3929 /* Other rcu_barrier() invocations can now safely proceed. */ 3930 mutex_unlock(&rcu_state.barrier_mutex); 3931 } 3932 EXPORT_SYMBOL_GPL(rcu_barrier); 3933 3934 /* 3935 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 3936 * first CPU in a given leaf rcu_node structure coming online. The caller 3937 * must hold the corresponding leaf rcu_node ->lock with interrrupts 3938 * disabled. 3939 */ 3940 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3941 { 3942 long mask; 3943 long oldmask; 3944 struct rcu_node *rnp = rnp_leaf; 3945 3946 raw_lockdep_assert_held_rcu_node(rnp_leaf); 3947 WARN_ON_ONCE(rnp->wait_blkd_tasks); 3948 for (;;) { 3949 mask = rnp->grpmask; 3950 rnp = rnp->parent; 3951 if (rnp == NULL) 3952 return; 3953 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3954 oldmask = rnp->qsmaskinit; 3955 rnp->qsmaskinit |= mask; 3956 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3957 if (oldmask) 3958 return; 3959 } 3960 } 3961 3962 /* 3963 * Do boot-time initialization of a CPU's per-CPU RCU data. 3964 */ 3965 static void __init 3966 rcu_boot_init_percpu_data(int cpu) 3967 { 3968 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3969 3970 /* Set up local state, ensuring consistent view of global state. */ 3971 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3972 INIT_WORK(&rdp->strict_work, strict_work_handler); 3973 WARN_ON_ONCE(rdp->dynticks_nesting != 1); 3974 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); 3975 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 3976 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 3977 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 3978 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 3979 rdp->cpu = cpu; 3980 rcu_boot_init_nocb_percpu_data(rdp); 3981 } 3982 3983 /* 3984 * Invoked early in the CPU-online process, when pretty much all services 3985 * are available. The incoming CPU is not present. 3986 * 3987 * Initializes a CPU's per-CPU RCU data. Note that only one online or 3988 * offline event can be happening at a given time. Note also that we can 3989 * accept some slop in the rsp->gp_seq access due to the fact that this 3990 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 3991 * And any offloaded callbacks are being numbered elsewhere. 3992 */ 3993 int rcutree_prepare_cpu(unsigned int cpu) 3994 { 3995 unsigned long flags; 3996 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3997 struct rcu_node *rnp = rcu_get_root(); 3998 3999 /* Set up local state, ensuring consistent view of global state. */ 4000 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4001 rdp->qlen_last_fqs_check = 0; 4002 rdp->n_force_qs_snap = rcu_state.n_force_qs; 4003 rdp->blimit = blimit; 4004 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 4005 !rcu_segcblist_is_offloaded(&rdp->cblist)) 4006 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 4007 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ 4008 rcu_dynticks_eqs_online(); 4009 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4010 4011 /* 4012 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4013 * propagation up the rcu_node tree will happen at the beginning 4014 * of the next grace period. 4015 */ 4016 rnp = rdp->mynode; 4017 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4018 rdp->beenonline = true; /* We have now been online. */ 4019 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 4020 rdp->gp_seq_needed = rdp->gp_seq; 4021 rdp->cpu_no_qs.b.norm = true; 4022 rdp->core_needs_qs = false; 4023 rdp->rcu_iw_pending = false; 4024 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4025 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4026 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4027 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4028 rcu_prepare_kthreads(cpu); 4029 rcu_spawn_cpu_nocb_kthread(cpu); 4030 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4031 4032 return 0; 4033 } 4034 4035 /* 4036 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 4037 */ 4038 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 4039 { 4040 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4041 4042 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 4043 } 4044 4045 /* 4046 * Near the end of the CPU-online process. Pretty much all services 4047 * enabled, and the CPU is now very much alive. 4048 */ 4049 int rcutree_online_cpu(unsigned int cpu) 4050 { 4051 unsigned long flags; 4052 struct rcu_data *rdp; 4053 struct rcu_node *rnp; 4054 4055 rdp = per_cpu_ptr(&rcu_data, cpu); 4056 rnp = rdp->mynode; 4057 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4058 rnp->ffmask |= rdp->grpmask; 4059 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4060 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 4061 return 0; /* Too early in boot for scheduler work. */ 4062 sync_sched_exp_online_cleanup(cpu); 4063 rcutree_affinity_setting(cpu, -1); 4064 4065 // Stop-machine done, so allow nohz_full to disable tick. 4066 tick_dep_clear(TICK_DEP_BIT_RCU); 4067 return 0; 4068 } 4069 4070 /* 4071 * Near the beginning of the process. The CPU is still very much alive 4072 * with pretty much all services enabled. 4073 */ 4074 int rcutree_offline_cpu(unsigned int cpu) 4075 { 4076 unsigned long flags; 4077 struct rcu_data *rdp; 4078 struct rcu_node *rnp; 4079 4080 rdp = per_cpu_ptr(&rcu_data, cpu); 4081 rnp = rdp->mynode; 4082 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4083 rnp->ffmask &= ~rdp->grpmask; 4084 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4085 4086 rcutree_affinity_setting(cpu, cpu); 4087 4088 // nohz_full CPUs need the tick for stop-machine to work quickly 4089 tick_dep_set(TICK_DEP_BIT_RCU); 4090 return 0; 4091 } 4092 4093 /* 4094 * Mark the specified CPU as being online so that subsequent grace periods 4095 * (both expedited and normal) will wait on it. Note that this means that 4096 * incoming CPUs are not allowed to use RCU read-side critical sections 4097 * until this function is called. Failing to observe this restriction 4098 * will result in lockdep splats. 4099 * 4100 * Note that this function is special in that it is invoked directly 4101 * from the incoming CPU rather than from the cpuhp_step mechanism. 4102 * This is because this function must be invoked at a precise location. 4103 */ 4104 void rcu_cpu_starting(unsigned int cpu) 4105 { 4106 unsigned long flags; 4107 unsigned long mask; 4108 struct rcu_data *rdp; 4109 struct rcu_node *rnp; 4110 bool newcpu; 4111 4112 rdp = per_cpu_ptr(&rcu_data, cpu); 4113 if (rdp->cpu_started) 4114 return; 4115 rdp->cpu_started = true; 4116 4117 rnp = rdp->mynode; 4118 mask = rdp->grpmask; 4119 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); 4120 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); 4121 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). 4122 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4123 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 4124 newcpu = !(rnp->expmaskinitnext & mask); 4125 rnp->expmaskinitnext |= mask; 4126 /* Allow lockless access for expedited grace periods. */ 4127 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 4128 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 4129 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 4130 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4131 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4132 4133 /* An incoming CPU should never be blocking a grace period. */ 4134 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ 4135 rcu_disable_urgency_upon_qs(rdp); 4136 /* Report QS -after- changing ->qsmaskinitnext! */ 4137 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4138 } else { 4139 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4140 } 4141 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). 4142 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); 4143 WARN_ON_ONCE(rnp->ofl_seq & 0x1); 4144 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 4145 } 4146 4147 /* 4148 * The outgoing function has no further need of RCU, so remove it from 4149 * the rcu_node tree's ->qsmaskinitnext bit masks. 4150 * 4151 * Note that this function is special in that it is invoked directly 4152 * from the outgoing CPU rather than from the cpuhp_step mechanism. 4153 * This is because this function must be invoked at a precise location. 4154 */ 4155 void rcu_report_dead(unsigned int cpu) 4156 { 4157 unsigned long flags; 4158 unsigned long mask; 4159 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4160 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4161 4162 /* QS for any half-done expedited grace period. */ 4163 preempt_disable(); 4164 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 4165 preempt_enable(); 4166 rcu_preempt_deferred_qs(current); 4167 4168 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4169 mask = rdp->grpmask; 4170 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); 4171 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); 4172 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). 4173 raw_spin_lock(&rcu_state.ofl_lock); 4174 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4175 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4176 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4177 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 4178 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 4179 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4180 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4181 } 4182 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 4183 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4184 raw_spin_unlock(&rcu_state.ofl_lock); 4185 smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). 4186 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); 4187 WARN_ON_ONCE(rnp->ofl_seq & 0x1); 4188 4189 rdp->cpu_started = false; 4190 } 4191 4192 #ifdef CONFIG_HOTPLUG_CPU 4193 /* 4194 * The outgoing CPU has just passed through the dying-idle state, and we 4195 * are being invoked from the CPU that was IPIed to continue the offline 4196 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 4197 */ 4198 void rcutree_migrate_callbacks(int cpu) 4199 { 4200 unsigned long flags; 4201 struct rcu_data *my_rdp; 4202 struct rcu_node *my_rnp; 4203 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4204 bool needwake; 4205 4206 if (rcu_segcblist_is_offloaded(&rdp->cblist) || 4207 rcu_segcblist_empty(&rdp->cblist)) 4208 return; /* No callbacks to migrate. */ 4209 4210 local_irq_save(flags); 4211 my_rdp = this_cpu_ptr(&rcu_data); 4212 my_rnp = my_rdp->mynode; 4213 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 4214 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies)); 4215 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 4216 /* Leverage recent GPs and set GP for new callbacks. */ 4217 needwake = rcu_advance_cbs(my_rnp, rdp) || 4218 rcu_advance_cbs(my_rnp, my_rdp); 4219 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 4220 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 4221 rcu_segcblist_disable(&rdp->cblist); 4222 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 4223 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 4224 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) { 4225 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4226 __call_rcu_nocb_wake(my_rdp, true, flags); 4227 } else { 4228 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 4229 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); 4230 } 4231 if (needwake) 4232 rcu_gp_kthread_wake(); 4233 lockdep_assert_irqs_enabled(); 4234 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 4235 !rcu_segcblist_empty(&rdp->cblist), 4236 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 4237 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 4238 rcu_segcblist_first_cb(&rdp->cblist)); 4239 } 4240 #endif 4241 4242 /* 4243 * On non-huge systems, use expedited RCU grace periods to make suspend 4244 * and hibernation run faster. 4245 */ 4246 static int rcu_pm_notify(struct notifier_block *self, 4247 unsigned long action, void *hcpu) 4248 { 4249 switch (action) { 4250 case PM_HIBERNATION_PREPARE: 4251 case PM_SUSPEND_PREPARE: 4252 rcu_expedite_gp(); 4253 break; 4254 case PM_POST_HIBERNATION: 4255 case PM_POST_SUSPEND: 4256 rcu_unexpedite_gp(); 4257 break; 4258 default: 4259 break; 4260 } 4261 return NOTIFY_OK; 4262 } 4263 4264 /* 4265 * Spawn the kthreads that handle RCU's grace periods. 4266 */ 4267 static int __init rcu_spawn_gp_kthread(void) 4268 { 4269 unsigned long flags; 4270 int kthread_prio_in = kthread_prio; 4271 struct rcu_node *rnp; 4272 struct sched_param sp; 4273 struct task_struct *t; 4274 4275 /* Force priority into range. */ 4276 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4277 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4278 kthread_prio = 2; 4279 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4280 kthread_prio = 1; 4281 else if (kthread_prio < 0) 4282 kthread_prio = 0; 4283 else if (kthread_prio > 99) 4284 kthread_prio = 99; 4285 4286 if (kthread_prio != kthread_prio_in) 4287 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 4288 kthread_prio, kthread_prio_in); 4289 4290 rcu_scheduler_fully_active = 1; 4291 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4292 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4293 return 0; 4294 if (kthread_prio) { 4295 sp.sched_priority = kthread_prio; 4296 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4297 } 4298 rnp = rcu_get_root(); 4299 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4300 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4301 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4302 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4303 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4304 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4305 wake_up_process(t); 4306 rcu_spawn_nocb_kthreads(); 4307 rcu_spawn_boost_kthreads(); 4308 return 0; 4309 } 4310 early_initcall(rcu_spawn_gp_kthread); 4311 4312 /* 4313 * This function is invoked towards the end of the scheduler's 4314 * initialization process. Before this is called, the idle task might 4315 * contain synchronous grace-period primitives (during which time, this idle 4316 * task is booting the system, and such primitives are no-ops). After this 4317 * function is called, any synchronous grace-period primitives are run as 4318 * expedited, with the requesting task driving the grace period forward. 4319 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4320 * runtime RCU functionality. 4321 */ 4322 void rcu_scheduler_starting(void) 4323 { 4324 WARN_ON(num_online_cpus() != 1); 4325 WARN_ON(nr_context_switches() > 0); 4326 rcu_test_sync_prims(); 4327 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4328 rcu_test_sync_prims(); 4329 } 4330 4331 /* 4332 * Helper function for rcu_init() that initializes the rcu_state structure. 4333 */ 4334 static void __init rcu_init_one(void) 4335 { 4336 static const char * const buf[] = RCU_NODE_NAME_INIT; 4337 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4338 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4339 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4340 4341 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4342 int cpustride = 1; 4343 int i; 4344 int j; 4345 struct rcu_node *rnp; 4346 4347 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4348 4349 /* Silence gcc 4.8 false positive about array index out of range. */ 4350 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4351 panic("rcu_init_one: rcu_num_lvls out of range"); 4352 4353 /* Initialize the level-tracking arrays. */ 4354 4355 for (i = 1; i < rcu_num_lvls; i++) 4356 rcu_state.level[i] = 4357 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4358 rcu_init_levelspread(levelspread, num_rcu_lvl); 4359 4360 /* Initialize the elements themselves, starting from the leaves. */ 4361 4362 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4363 cpustride *= levelspread[i]; 4364 rnp = rcu_state.level[i]; 4365 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4366 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4367 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4368 &rcu_node_class[i], buf[i]); 4369 raw_spin_lock_init(&rnp->fqslock); 4370 lockdep_set_class_and_name(&rnp->fqslock, 4371 &rcu_fqs_class[i], fqs[i]); 4372 rnp->gp_seq = rcu_state.gp_seq; 4373 rnp->gp_seq_needed = rcu_state.gp_seq; 4374 rnp->completedqs = rcu_state.gp_seq; 4375 rnp->qsmask = 0; 4376 rnp->qsmaskinit = 0; 4377 rnp->grplo = j * cpustride; 4378 rnp->grphi = (j + 1) * cpustride - 1; 4379 if (rnp->grphi >= nr_cpu_ids) 4380 rnp->grphi = nr_cpu_ids - 1; 4381 if (i == 0) { 4382 rnp->grpnum = 0; 4383 rnp->grpmask = 0; 4384 rnp->parent = NULL; 4385 } else { 4386 rnp->grpnum = j % levelspread[i - 1]; 4387 rnp->grpmask = BIT(rnp->grpnum); 4388 rnp->parent = rcu_state.level[i - 1] + 4389 j / levelspread[i - 1]; 4390 } 4391 rnp->level = i; 4392 INIT_LIST_HEAD(&rnp->blkd_tasks); 4393 rcu_init_one_nocb(rnp); 4394 init_waitqueue_head(&rnp->exp_wq[0]); 4395 init_waitqueue_head(&rnp->exp_wq[1]); 4396 init_waitqueue_head(&rnp->exp_wq[2]); 4397 init_waitqueue_head(&rnp->exp_wq[3]); 4398 spin_lock_init(&rnp->exp_lock); 4399 } 4400 } 4401 4402 init_swait_queue_head(&rcu_state.gp_wq); 4403 init_swait_queue_head(&rcu_state.expedited_wq); 4404 rnp = rcu_first_leaf_node(); 4405 for_each_possible_cpu(i) { 4406 while (i > rnp->grphi) 4407 rnp++; 4408 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4409 rcu_boot_init_percpu_data(i); 4410 } 4411 } 4412 4413 /* 4414 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4415 * replace the definitions in tree.h because those are needed to size 4416 * the ->node array in the rcu_state structure. 4417 */ 4418 static void __init rcu_init_geometry(void) 4419 { 4420 ulong d; 4421 int i; 4422 int rcu_capacity[RCU_NUM_LVLS]; 4423 4424 /* 4425 * Initialize any unspecified boot parameters. 4426 * The default values of jiffies_till_first_fqs and 4427 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4428 * value, which is a function of HZ, then adding one for each 4429 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4430 */ 4431 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4432 if (jiffies_till_first_fqs == ULONG_MAX) 4433 jiffies_till_first_fqs = d; 4434 if (jiffies_till_next_fqs == ULONG_MAX) 4435 jiffies_till_next_fqs = d; 4436 adjust_jiffies_till_sched_qs(); 4437 4438 /* If the compile-time values are accurate, just leave. */ 4439 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4440 nr_cpu_ids == NR_CPUS) 4441 return; 4442 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4443 rcu_fanout_leaf, nr_cpu_ids); 4444 4445 /* 4446 * The boot-time rcu_fanout_leaf parameter must be at least two 4447 * and cannot exceed the number of bits in the rcu_node masks. 4448 * Complain and fall back to the compile-time values if this 4449 * limit is exceeded. 4450 */ 4451 if (rcu_fanout_leaf < 2 || 4452 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4453 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4454 WARN_ON(1); 4455 return; 4456 } 4457 4458 /* 4459 * Compute number of nodes that can be handled an rcu_node tree 4460 * with the given number of levels. 4461 */ 4462 rcu_capacity[0] = rcu_fanout_leaf; 4463 for (i = 1; i < RCU_NUM_LVLS; i++) 4464 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4465 4466 /* 4467 * The tree must be able to accommodate the configured number of CPUs. 4468 * If this limit is exceeded, fall back to the compile-time values. 4469 */ 4470 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4471 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4472 WARN_ON(1); 4473 return; 4474 } 4475 4476 /* Calculate the number of levels in the tree. */ 4477 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4478 } 4479 rcu_num_lvls = i + 1; 4480 4481 /* Calculate the number of rcu_nodes at each level of the tree. */ 4482 for (i = 0; i < rcu_num_lvls; i++) { 4483 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4484 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4485 } 4486 4487 /* Calculate the total number of rcu_node structures. */ 4488 rcu_num_nodes = 0; 4489 for (i = 0; i < rcu_num_lvls; i++) 4490 rcu_num_nodes += num_rcu_lvl[i]; 4491 } 4492 4493 /* 4494 * Dump out the structure of the rcu_node combining tree associated 4495 * with the rcu_state structure. 4496 */ 4497 static void __init rcu_dump_rcu_node_tree(void) 4498 { 4499 int level = 0; 4500 struct rcu_node *rnp; 4501 4502 pr_info("rcu_node tree layout dump\n"); 4503 pr_info(" "); 4504 rcu_for_each_node_breadth_first(rnp) { 4505 if (rnp->level != level) { 4506 pr_cont("\n"); 4507 pr_info(" "); 4508 level = rnp->level; 4509 } 4510 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4511 } 4512 pr_cont("\n"); 4513 } 4514 4515 struct workqueue_struct *rcu_gp_wq; 4516 struct workqueue_struct *rcu_par_gp_wq; 4517 4518 static void __init kfree_rcu_batch_init(void) 4519 { 4520 int cpu; 4521 int i; 4522 4523 for_each_possible_cpu(cpu) { 4524 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 4525 4526 for (i = 0; i < KFREE_N_BATCHES; i++) { 4527 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); 4528 krcp->krw_arr[i].krcp = krcp; 4529 } 4530 4531 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); 4532 INIT_WORK(&krcp->page_cache_work, fill_page_cache_func); 4533 krcp->initialized = true; 4534 } 4535 if (register_shrinker(&kfree_rcu_shrinker)) 4536 pr_err("Failed to register kfree_rcu() shrinker!\n"); 4537 } 4538 4539 void __init rcu_init(void) 4540 { 4541 int cpu; 4542 4543 rcu_early_boot_tests(); 4544 4545 kfree_rcu_batch_init(); 4546 rcu_bootup_announce(); 4547 rcu_init_geometry(); 4548 rcu_init_one(); 4549 if (dump_tree) 4550 rcu_dump_rcu_node_tree(); 4551 if (use_softirq) 4552 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4553 4554 /* 4555 * We don't need protection against CPU-hotplug here because 4556 * this is called early in boot, before either interrupts 4557 * or the scheduler are operational. 4558 */ 4559 pm_notifier(rcu_pm_notify, 0); 4560 for_each_online_cpu(cpu) { 4561 rcutree_prepare_cpu(cpu); 4562 rcu_cpu_starting(cpu); 4563 rcutree_online_cpu(cpu); 4564 } 4565 4566 /* Create workqueue for expedited GPs and for Tree SRCU. */ 4567 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 4568 WARN_ON(!rcu_gp_wq); 4569 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 4570 WARN_ON(!rcu_par_gp_wq); 4571 srcu_init(); 4572 4573 /* Fill in default value for rcutree.qovld boot parameter. */ 4574 /* -After- the rcu_node ->lock fields are initialized! */ 4575 if (qovld < 0) 4576 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4577 else 4578 qovld_calc = qovld; 4579 } 4580 4581 #include "tree_stall.h" 4582 #include "tree_exp.h" 4583 #include "tree_plugin.h" 4584