1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * Paul E. McKenney <paulmck@linux.ibm.com> 10 * 11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/moduleparam.h> 35 #include <linux/percpu.h> 36 #include <linux/notifier.h> 37 #include <linux/cpu.h> 38 #include <linux/mutex.h> 39 #include <linux/time.h> 40 #include <linux/kernel_stat.h> 41 #include <linux/wait.h> 42 #include <linux/kthread.h> 43 #include <uapi/linux/sched/types.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 #include <linux/random.h> 47 #include <linux/trace_events.h> 48 #include <linux/suspend.h> 49 #include <linux/ftrace.h> 50 #include <linux/tick.h> 51 #include <linux/sysrq.h> 52 #include <linux/kprobes.h> 53 #include <linux/gfp.h> 54 #include <linux/oom.h> 55 #include <linux/smpboot.h> 56 #include <linux/jiffies.h> 57 #include <linux/slab.h> 58 #include <linux/sched/isolation.h> 59 #include <linux/sched/clock.h> 60 #include <linux/vmalloc.h> 61 #include <linux/mm.h> 62 #include <linux/kasan.h> 63 #include "../time/tick-internal.h" 64 65 #include "tree.h" 66 #include "rcu.h" 67 68 #ifdef MODULE_PARAM_PREFIX 69 #undef MODULE_PARAM_PREFIX 70 #endif 71 #define MODULE_PARAM_PREFIX "rcutree." 72 73 #ifndef data_race 74 #define data_race(expr) \ 75 ({ \ 76 expr; \ 77 }) 78 #endif 79 #ifndef ASSERT_EXCLUSIVE_WRITER 80 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0) 81 #endif 82 #ifndef ASSERT_EXCLUSIVE_ACCESS 83 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0) 84 #endif 85 86 /* Data structures. */ 87 88 /* 89 * Steal a bit from the bottom of ->dynticks for idle entry/exit 90 * control. Initially this is for TLB flushing. 91 */ 92 #define RCU_DYNTICK_CTRL_MASK 0x1 93 #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1) 94 95 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 96 .dynticks_nesting = 1, 97 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, 98 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), 99 }; 100 static struct rcu_state rcu_state = { 101 .level = { &rcu_state.node[0] }, 102 .gp_state = RCU_GP_IDLE, 103 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 104 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 105 .name = RCU_NAME, 106 .abbr = RCU_ABBR, 107 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 108 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 109 .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock), 110 }; 111 112 /* Dump rcu_node combining tree at boot to verify correct setup. */ 113 static bool dump_tree; 114 module_param(dump_tree, bool, 0444); 115 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 116 static bool use_softirq = true; 117 module_param(use_softirq, bool, 0444); 118 /* Control rcu_node-tree auto-balancing at boot time. */ 119 static bool rcu_fanout_exact; 120 module_param(rcu_fanout_exact, bool, 0444); 121 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 122 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 123 module_param(rcu_fanout_leaf, int, 0444); 124 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 125 /* Number of rcu_nodes at specified level. */ 126 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 127 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 128 129 /* 130 * The rcu_scheduler_active variable is initialized to the value 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 133 * RCU can assume that there is but one task, allowing RCU to (for example) 134 * optimize synchronize_rcu() to a simple barrier(). When this variable 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 136 * to detect real grace periods. This variable is also used to suppress 137 * boot-time false positives from lockdep-RCU error checking. Finally, it 138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 139 * is fully initialized, including all of its kthreads having been spawned. 140 */ 141 int rcu_scheduler_active __read_mostly; 142 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 143 144 /* 145 * The rcu_scheduler_fully_active variable transitions from zero to one 146 * during the early_initcall() processing, which is after the scheduler 147 * is capable of creating new tasks. So RCU processing (for example, 148 * creating tasks for RCU priority boosting) must be delayed until after 149 * rcu_scheduler_fully_active transitions from zero to one. We also 150 * currently delay invocation of any RCU callbacks until after this point. 151 * 152 * It might later prove better for people registering RCU callbacks during 153 * early boot to take responsibility for these callbacks, but one step at 154 * a time. 155 */ 156 static int rcu_scheduler_fully_active __read_mostly; 157 158 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 159 unsigned long gps, unsigned long flags); 160 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 161 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 162 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 163 static void invoke_rcu_core(void); 164 static void rcu_report_exp_rdp(struct rcu_data *rdp); 165 static void sync_sched_exp_online_cleanup(int cpu); 166 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 167 168 /* rcuc/rcub kthread realtime priority */ 169 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 170 module_param(kthread_prio, int, 0444); 171 172 /* Delay in jiffies for grace-period initialization delays, debug only. */ 173 174 static int gp_preinit_delay; 175 module_param(gp_preinit_delay, int, 0444); 176 static int gp_init_delay; 177 module_param(gp_init_delay, int, 0444); 178 static int gp_cleanup_delay; 179 module_param(gp_cleanup_delay, int, 0444); 180 181 /* 182 * This rcu parameter is runtime-read-only. It reflects 183 * a minimum allowed number of objects which can be cached 184 * per-CPU. Object size is equal to one page. This value 185 * can be changed at boot time. 186 */ 187 static int rcu_min_cached_objs = 2; 188 module_param(rcu_min_cached_objs, int, 0444); 189 190 /* Retrieve RCU kthreads priority for rcutorture */ 191 int rcu_get_gp_kthreads_prio(void) 192 { 193 return kthread_prio; 194 } 195 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 196 197 /* 198 * Number of grace periods between delays, normalized by the duration of 199 * the delay. The longer the delay, the more the grace periods between 200 * each delay. The reason for this normalization is that it means that, 201 * for non-zero delays, the overall slowdown of grace periods is constant 202 * regardless of the duration of the delay. This arrangement balances 203 * the need for long delays to increase some race probabilities with the 204 * need for fast grace periods to increase other race probabilities. 205 */ 206 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 207 208 /* 209 * Compute the mask of online CPUs for the specified rcu_node structure. 210 * This will not be stable unless the rcu_node structure's ->lock is 211 * held, but the bit corresponding to the current CPU will be stable 212 * in most contexts. 213 */ 214 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 215 { 216 return READ_ONCE(rnp->qsmaskinitnext); 217 } 218 219 /* 220 * Return true if an RCU grace period is in progress. The READ_ONCE()s 221 * permit this function to be invoked without holding the root rcu_node 222 * structure's ->lock, but of course results can be subject to change. 223 */ 224 static int rcu_gp_in_progress(void) 225 { 226 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 227 } 228 229 /* 230 * Return the number of callbacks queued on the specified CPU. 231 * Handles both the nocbs and normal cases. 232 */ 233 static long rcu_get_n_cbs_cpu(int cpu) 234 { 235 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 236 237 if (rcu_segcblist_is_enabled(&rdp->cblist)) 238 return rcu_segcblist_n_cbs(&rdp->cblist); 239 return 0; 240 } 241 242 void rcu_softirq_qs(void) 243 { 244 rcu_qs(); 245 rcu_preempt_deferred_qs(current); 246 } 247 248 /* 249 * Record entry into an extended quiescent state. This is only to be 250 * called when not already in an extended quiescent state, that is, 251 * RCU is watching prior to the call to this function and is no longer 252 * watching upon return. 253 */ 254 static noinstr void rcu_dynticks_eqs_enter(void) 255 { 256 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 257 int seq; 258 259 /* 260 * CPUs seeing atomic_add_return() must see prior RCU read-side 261 * critical sections, and we also must force ordering with the 262 * next idle sojourn. 263 */ 264 rcu_dynticks_task_trace_enter(); // Before ->dynticks update! 265 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 266 // RCU is no longer watching. Better be in extended quiescent state! 267 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 268 (seq & RCU_DYNTICK_CTRL_CTR)); 269 /* Better not have special action (TLB flush) pending! */ 270 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 271 (seq & RCU_DYNTICK_CTRL_MASK)); 272 } 273 274 /* 275 * Record exit from an extended quiescent state. This is only to be 276 * called from an extended quiescent state, that is, RCU is not watching 277 * prior to the call to this function and is watching upon return. 278 */ 279 static noinstr void rcu_dynticks_eqs_exit(void) 280 { 281 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 282 int seq; 283 284 /* 285 * CPUs seeing atomic_add_return() must see prior idle sojourns, 286 * and we also must force ordering with the next RCU read-side 287 * critical section. 288 */ 289 seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 290 // RCU is now watching. Better not be in an extended quiescent state! 291 rcu_dynticks_task_trace_exit(); // After ->dynticks update! 292 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 293 !(seq & RCU_DYNTICK_CTRL_CTR)); 294 if (seq & RCU_DYNTICK_CTRL_MASK) { 295 arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); 296 smp_mb__after_atomic(); /* _exit after clearing mask. */ 297 } 298 } 299 300 /* 301 * Reset the current CPU's ->dynticks counter to indicate that the 302 * newly onlined CPU is no longer in an extended quiescent state. 303 * This will either leave the counter unchanged, or increment it 304 * to the next non-quiescent value. 305 * 306 * The non-atomic test/increment sequence works because the upper bits 307 * of the ->dynticks counter are manipulated only by the corresponding CPU, 308 * or when the corresponding CPU is offline. 309 */ 310 static void rcu_dynticks_eqs_online(void) 311 { 312 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 313 314 if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) 315 return; 316 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); 317 } 318 319 /* 320 * Is the current CPU in an extended quiescent state? 321 * 322 * No ordering, as we are sampling CPU-local information. 323 */ 324 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) 325 { 326 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 327 328 return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); 329 } 330 331 /* 332 * Snapshot the ->dynticks counter with full ordering so as to allow 333 * stable comparison of this counter with past and future snapshots. 334 */ 335 static int rcu_dynticks_snap(struct rcu_data *rdp) 336 { 337 int snap = atomic_add_return(0, &rdp->dynticks); 338 339 return snap & ~RCU_DYNTICK_CTRL_MASK; 340 } 341 342 /* 343 * Return true if the snapshot returned from rcu_dynticks_snap() 344 * indicates that RCU is in an extended quiescent state. 345 */ 346 static bool rcu_dynticks_in_eqs(int snap) 347 { 348 return !(snap & RCU_DYNTICK_CTRL_CTR); 349 } 350 351 /* 352 * Return true if the CPU corresponding to the specified rcu_data 353 * structure has spent some time in an extended quiescent state since 354 * rcu_dynticks_snap() returned the specified snapshot. 355 */ 356 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) 357 { 358 return snap != rcu_dynticks_snap(rdp); 359 } 360 361 /* 362 * Return true if the referenced integer is zero while the specified 363 * CPU remains within a single extended quiescent state. 364 */ 365 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) 366 { 367 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 368 int snap; 369 370 // If not quiescent, force back to earlier extended quiescent state. 371 snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK | 372 RCU_DYNTICK_CTRL_CTR); 373 374 smp_rmb(); // Order ->dynticks and *vp reads. 375 if (READ_ONCE(*vp)) 376 return false; // Non-zero, so report failure; 377 smp_rmb(); // Order *vp read and ->dynticks re-read. 378 379 // If still in the same extended quiescent state, we are good! 380 return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK); 381 } 382 383 /* 384 * Set the special (bottom) bit of the specified CPU so that it 385 * will take special action (such as flushing its TLB) on the 386 * next exit from an extended quiescent state. Returns true if 387 * the bit was successfully set, or false if the CPU was not in 388 * an extended quiescent state. 389 */ 390 bool rcu_eqs_special_set(int cpu) 391 { 392 int old; 393 int new; 394 int new_old; 395 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 396 397 new_old = atomic_read(&rdp->dynticks); 398 do { 399 old = new_old; 400 if (old & RCU_DYNTICK_CTRL_CTR) 401 return false; 402 new = old | RCU_DYNTICK_CTRL_MASK; 403 new_old = atomic_cmpxchg(&rdp->dynticks, old, new); 404 } while (new_old != old); 405 return true; 406 } 407 408 /* 409 * Let the RCU core know that this CPU has gone through the scheduler, 410 * which is a quiescent state. This is called when the need for a 411 * quiescent state is urgent, so we burn an atomic operation and full 412 * memory barriers to let the RCU core know about it, regardless of what 413 * this CPU might (or might not) do in the near future. 414 * 415 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 416 * 417 * The caller must have disabled interrupts and must not be idle. 418 */ 419 void rcu_momentary_dyntick_idle(void) 420 { 421 int special; 422 423 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 424 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, 425 &this_cpu_ptr(&rcu_data)->dynticks); 426 /* It is illegal to call this from idle state. */ 427 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); 428 rcu_preempt_deferred_qs(current); 429 } 430 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 431 432 /** 433 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 434 * 435 * If the current CPU is idle and running at a first-level (not nested) 436 * interrupt, or directly, from idle, return true. 437 * 438 * The caller must have at least disabled IRQs. 439 */ 440 static int rcu_is_cpu_rrupt_from_idle(void) 441 { 442 long nesting; 443 444 /* 445 * Usually called from the tick; but also used from smp_function_call() 446 * for expedited grace periods. This latter can result in running from 447 * the idle task, instead of an actual IPI. 448 */ 449 lockdep_assert_irqs_disabled(); 450 451 /* Check for counter underflows */ 452 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0, 453 "RCU dynticks_nesting counter underflow!"); 454 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0, 455 "RCU dynticks_nmi_nesting counter underflow/zero!"); 456 457 /* Are we at first interrupt nesting level? */ 458 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting); 459 if (nesting > 1) 460 return false; 461 462 /* 463 * If we're not in an interrupt, we must be in the idle task! 464 */ 465 WARN_ON_ONCE(!nesting && !is_idle_task(current)); 466 467 /* Does CPU appear to be idle from an RCU standpoint? */ 468 return __this_cpu_read(rcu_data.dynticks_nesting) == 0; 469 } 470 471 #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */ 472 #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */ 473 static long blimit = DEFAULT_RCU_BLIMIT; 474 #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */ 475 static long qhimark = DEFAULT_RCU_QHIMARK; 476 #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */ 477 static long qlowmark = DEFAULT_RCU_QLOMARK; 478 #define DEFAULT_RCU_QOVLD_MULT 2 479 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 480 static long qovld = DEFAULT_RCU_QOVLD; /* If this many pending, hammer QS. */ 481 static long qovld_calc = -1; /* No pre-initialization lock acquisitions! */ 482 483 module_param(blimit, long, 0444); 484 module_param(qhimark, long, 0444); 485 module_param(qlowmark, long, 0444); 486 module_param(qovld, long, 0444); 487 488 static ulong jiffies_till_first_fqs = ULONG_MAX; 489 static ulong jiffies_till_next_fqs = ULONG_MAX; 490 static bool rcu_kick_kthreads; 491 static int rcu_divisor = 7; 492 module_param(rcu_divisor, int, 0644); 493 494 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 495 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 496 module_param(rcu_resched_ns, long, 0644); 497 498 /* 499 * How long the grace period must be before we start recruiting 500 * quiescent-state help from rcu_note_context_switch(). 501 */ 502 static ulong jiffies_till_sched_qs = ULONG_MAX; 503 module_param(jiffies_till_sched_qs, ulong, 0444); 504 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 505 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 506 507 /* 508 * Make sure that we give the grace-period kthread time to detect any 509 * idle CPUs before taking active measures to force quiescent states. 510 * However, don't go below 100 milliseconds, adjusted upwards for really 511 * large systems. 512 */ 513 static void adjust_jiffies_till_sched_qs(void) 514 { 515 unsigned long j; 516 517 /* If jiffies_till_sched_qs was specified, respect the request. */ 518 if (jiffies_till_sched_qs != ULONG_MAX) { 519 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 520 return; 521 } 522 /* Otherwise, set to third fqs scan, but bound below on large system. */ 523 j = READ_ONCE(jiffies_till_first_fqs) + 524 2 * READ_ONCE(jiffies_till_next_fqs); 525 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 526 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 527 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 528 WRITE_ONCE(jiffies_to_sched_qs, j); 529 } 530 531 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 532 { 533 ulong j; 534 int ret = kstrtoul(val, 0, &j); 535 536 if (!ret) { 537 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 538 adjust_jiffies_till_sched_qs(); 539 } 540 return ret; 541 } 542 543 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 544 { 545 ulong j; 546 int ret = kstrtoul(val, 0, &j); 547 548 if (!ret) { 549 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 550 adjust_jiffies_till_sched_qs(); 551 } 552 return ret; 553 } 554 555 static struct kernel_param_ops first_fqs_jiffies_ops = { 556 .set = param_set_first_fqs_jiffies, 557 .get = param_get_ulong, 558 }; 559 560 static struct kernel_param_ops next_fqs_jiffies_ops = { 561 .set = param_set_next_fqs_jiffies, 562 .get = param_get_ulong, 563 }; 564 565 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 566 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 567 module_param(rcu_kick_kthreads, bool, 0644); 568 569 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 570 static int rcu_pending(int user); 571 572 /* 573 * Return the number of RCU GPs completed thus far for debug & stats. 574 */ 575 unsigned long rcu_get_gp_seq(void) 576 { 577 return READ_ONCE(rcu_state.gp_seq); 578 } 579 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 580 581 /* 582 * Return the number of RCU expedited batches completed thus far for 583 * debug & stats. Odd numbers mean that a batch is in progress, even 584 * numbers mean idle. The value returned will thus be roughly double 585 * the cumulative batches since boot. 586 */ 587 unsigned long rcu_exp_batches_completed(void) 588 { 589 return rcu_state.expedited_sequence; 590 } 591 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 592 593 /* 594 * Return the root node of the rcu_state structure. 595 */ 596 static struct rcu_node *rcu_get_root(void) 597 { 598 return &rcu_state.node[0]; 599 } 600 601 /* 602 * Send along grace-period-related data for rcutorture diagnostics. 603 */ 604 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 605 unsigned long *gp_seq) 606 { 607 switch (test_type) { 608 case RCU_FLAVOR: 609 *flags = READ_ONCE(rcu_state.gp_flags); 610 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 611 break; 612 default: 613 break; 614 } 615 } 616 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 617 618 /* 619 * Enter an RCU extended quiescent state, which can be either the 620 * idle loop or adaptive-tickless usermode execution. 621 * 622 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for 623 * the possibility of usermode upcalls having messed up our count 624 * of interrupt nesting level during the prior busy period. 625 */ 626 static noinstr void rcu_eqs_enter(bool user) 627 { 628 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 629 630 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); 631 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); 632 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 633 rdp->dynticks_nesting == 0); 634 if (rdp->dynticks_nesting != 1) { 635 // RCU will still be watching, so just do accounting and leave. 636 rdp->dynticks_nesting--; 637 return; 638 } 639 640 lockdep_assert_irqs_disabled(); 641 instrumentation_begin(); 642 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); 643 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 644 rdp = this_cpu_ptr(&rcu_data); 645 do_nocb_deferred_wakeup(rdp); 646 rcu_prepare_for_idle(); 647 rcu_preempt_deferred_qs(current); 648 649 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 650 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 651 652 instrumentation_end(); 653 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ 654 // RCU is watching here ... 655 rcu_dynticks_eqs_enter(); 656 // ... but is no longer watching here. 657 rcu_dynticks_task_enter(); 658 } 659 660 /** 661 * rcu_idle_enter - inform RCU that current CPU is entering idle 662 * 663 * Enter idle mode, in other words, -leave- the mode in which RCU 664 * read-side critical sections can occur. (Though RCU read-side 665 * critical sections can occur in irq handlers in idle, a possibility 666 * handled by irq_enter() and irq_exit().) 667 * 668 * If you add or remove a call to rcu_idle_enter(), be sure to test with 669 * CONFIG_RCU_EQS_DEBUG=y. 670 */ 671 void rcu_idle_enter(void) 672 { 673 lockdep_assert_irqs_disabled(); 674 rcu_eqs_enter(false); 675 } 676 EXPORT_SYMBOL_GPL(rcu_idle_enter); 677 678 #ifdef CONFIG_NO_HZ_FULL 679 /** 680 * rcu_user_enter - inform RCU that we are resuming userspace. 681 * 682 * Enter RCU idle mode right before resuming userspace. No use of RCU 683 * is permitted between this call and rcu_user_exit(). This way the 684 * CPU doesn't need to maintain the tick for RCU maintenance purposes 685 * when the CPU runs in userspace. 686 * 687 * If you add or remove a call to rcu_user_enter(), be sure to test with 688 * CONFIG_RCU_EQS_DEBUG=y. 689 */ 690 noinstr void rcu_user_enter(void) 691 { 692 lockdep_assert_irqs_disabled(); 693 rcu_eqs_enter(true); 694 } 695 #endif /* CONFIG_NO_HZ_FULL */ 696 697 /** 698 * rcu_nmi_exit - inform RCU of exit from NMI context 699 * 700 * If we are returning from the outermost NMI handler that interrupted an 701 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting 702 * to let the RCU grace-period handling know that the CPU is back to 703 * being RCU-idle. 704 * 705 * If you add or remove a call to rcu_nmi_exit(), be sure to test 706 * with CONFIG_RCU_EQS_DEBUG=y. 707 */ 708 noinstr void rcu_nmi_exit(void) 709 { 710 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 711 712 instrumentation_begin(); 713 /* 714 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. 715 * (We are exiting an NMI handler, so RCU better be paying attention 716 * to us!) 717 */ 718 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); 719 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()); 720 721 /* 722 * If the nesting level is not 1, the CPU wasn't RCU-idle, so 723 * leave it in non-RCU-idle state. 724 */ 725 if (rdp->dynticks_nmi_nesting != 1) { 726 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, 727 atomic_read(&rdp->dynticks)); 728 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ 729 rdp->dynticks_nmi_nesting - 2); 730 instrumentation_end(); 731 return; 732 } 733 734 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 735 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); 736 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ 737 738 if (!in_nmi()) 739 rcu_prepare_for_idle(); 740 741 // instrumentation for the noinstr rcu_dynticks_eqs_enter() 742 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 743 instrumentation_end(); 744 745 // RCU is watching here ... 746 rcu_dynticks_eqs_enter(); 747 // ... but is no longer watching here. 748 749 if (!in_nmi()) 750 rcu_dynticks_task_enter(); 751 } 752 753 /** 754 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 755 * 756 * Exit from an interrupt handler, which might possibly result in entering 757 * idle mode, in other words, leaving the mode in which read-side critical 758 * sections can occur. The caller must have disabled interrupts. 759 * 760 * This code assumes that the idle loop never does anything that might 761 * result in unbalanced calls to irq_enter() and irq_exit(). If your 762 * architecture's idle loop violates this assumption, RCU will give you what 763 * you deserve, good and hard. But very infrequently and irreproducibly. 764 * 765 * Use things like work queues to work around this limitation. 766 * 767 * You have been warned. 768 * 769 * If you add or remove a call to rcu_irq_exit(), be sure to test with 770 * CONFIG_RCU_EQS_DEBUG=y. 771 */ 772 void noinstr rcu_irq_exit(void) 773 { 774 lockdep_assert_irqs_disabled(); 775 rcu_nmi_exit(); 776 } 777 778 /** 779 * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq 780 * towards in kernel preemption 781 * 782 * Same as rcu_irq_exit() but has a sanity check that scheduling is safe 783 * from RCU point of view. Invoked from return from interrupt before kernel 784 * preemption. 785 */ 786 void rcu_irq_exit_preempt(void) 787 { 788 lockdep_assert_irqs_disabled(); 789 rcu_nmi_exit(); 790 791 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 792 "RCU dynticks_nesting counter underflow/zero!"); 793 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 794 DYNTICK_IRQ_NONIDLE, 795 "Bad RCU dynticks_nmi_nesting counter\n"); 796 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 797 "RCU in extended quiescent state!"); 798 } 799 800 #ifdef CONFIG_PROVE_RCU 801 /** 802 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 803 */ 804 void rcu_irq_exit_check_preempt(void) 805 { 806 lockdep_assert_irqs_disabled(); 807 808 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, 809 "RCU dynticks_nesting counter underflow/zero!"); 810 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 811 DYNTICK_IRQ_NONIDLE, 812 "Bad RCU dynticks_nmi_nesting counter\n"); 813 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 814 "RCU in extended quiescent state!"); 815 } 816 #endif /* #ifdef CONFIG_PROVE_RCU */ 817 818 /* 819 * Wrapper for rcu_irq_exit() where interrupts are enabled. 820 * 821 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test 822 * with CONFIG_RCU_EQS_DEBUG=y. 823 */ 824 void rcu_irq_exit_irqson(void) 825 { 826 unsigned long flags; 827 828 local_irq_save(flags); 829 rcu_irq_exit(); 830 local_irq_restore(flags); 831 } 832 833 /* 834 * Exit an RCU extended quiescent state, which can be either the 835 * idle loop or adaptive-tickless usermode execution. 836 * 837 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to 838 * allow for the possibility of usermode upcalls messing up our count of 839 * interrupt nesting level during the busy period that is just now starting. 840 */ 841 static void noinstr rcu_eqs_exit(bool user) 842 { 843 struct rcu_data *rdp; 844 long oldval; 845 846 lockdep_assert_irqs_disabled(); 847 rdp = this_cpu_ptr(&rcu_data); 848 oldval = rdp->dynticks_nesting; 849 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); 850 if (oldval) { 851 // RCU was already watching, so just do accounting and leave. 852 rdp->dynticks_nesting++; 853 return; 854 } 855 rcu_dynticks_task_exit(); 856 // RCU is not watching here ... 857 rcu_dynticks_eqs_exit(); 858 // ... but is watching here. 859 instrumentation_begin(); 860 861 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 862 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 863 864 rcu_cleanup_after_idle(); 865 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); 866 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 867 WRITE_ONCE(rdp->dynticks_nesting, 1); 868 WARN_ON_ONCE(rdp->dynticks_nmi_nesting); 869 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); 870 instrumentation_end(); 871 } 872 873 /** 874 * rcu_idle_exit - inform RCU that current CPU is leaving idle 875 * 876 * Exit idle mode, in other words, -enter- the mode in which RCU 877 * read-side critical sections can occur. 878 * 879 * If you add or remove a call to rcu_idle_exit(), be sure to test with 880 * CONFIG_RCU_EQS_DEBUG=y. 881 */ 882 void rcu_idle_exit(void) 883 { 884 unsigned long flags; 885 886 local_irq_save(flags); 887 rcu_eqs_exit(false); 888 local_irq_restore(flags); 889 } 890 EXPORT_SYMBOL_GPL(rcu_idle_exit); 891 892 #ifdef CONFIG_NO_HZ_FULL 893 /** 894 * rcu_user_exit - inform RCU that we are exiting userspace. 895 * 896 * Exit RCU idle mode while entering the kernel because it can 897 * run a RCU read side critical section anytime. 898 * 899 * If you add or remove a call to rcu_user_exit(), be sure to test with 900 * CONFIG_RCU_EQS_DEBUG=y. 901 */ 902 void noinstr rcu_user_exit(void) 903 { 904 rcu_eqs_exit(1); 905 } 906 907 /** 908 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 909 * 910 * The scheduler tick is not normally enabled when CPUs enter the kernel 911 * from nohz_full userspace execution. After all, nohz_full userspace 912 * execution is an RCU quiescent state and the time executing in the kernel 913 * is quite short. Except of course when it isn't. And it is not hard to 914 * cause a large system to spend tens of seconds or even minutes looping 915 * in the kernel, which can cause a number of problems, include RCU CPU 916 * stall warnings. 917 * 918 * Therefore, if a nohz_full CPU fails to report a quiescent state 919 * in a timely manner, the RCU grace-period kthread sets that CPU's 920 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 921 * exception will invoke this function, which will turn on the scheduler 922 * tick, which will enable RCU to detect that CPU's quiescent states, 923 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 924 * The tick will be disabled once a quiescent state is reported for 925 * this CPU. 926 * 927 * Of course, in carefully tuned systems, there might never be an 928 * interrupt or exception. In that case, the RCU grace-period kthread 929 * will eventually cause one to happen. However, in less carefully 930 * controlled environments, this function allows RCU to get what it 931 * needs without creating otherwise useless interruptions. 932 */ 933 void __rcu_irq_enter_check_tick(void) 934 { 935 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 936 937 // Enabling the tick is unsafe in NMI handlers. 938 if (WARN_ON_ONCE(in_nmi())) 939 return; 940 941 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), 942 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 943 944 if (!tick_nohz_full_cpu(rdp->cpu) || 945 !READ_ONCE(rdp->rcu_urgent_qs) || 946 READ_ONCE(rdp->rcu_forced_tick)) { 947 // RCU doesn't need nohz_full help from this CPU, or it is 948 // already getting that help. 949 return; 950 } 951 952 // We get here only when not in an extended quiescent state and 953 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 954 // already watching and (2) The fact that we are in an interrupt 955 // handler and that the rcu_node lock is an irq-disabled lock 956 // prevents self-deadlock. So we can safely recheck under the lock. 957 // Note that the nohz_full state currently cannot change. 958 raw_spin_lock_rcu_node(rdp->mynode); 959 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { 960 // A nohz_full CPU is in the kernel and RCU needs a 961 // quiescent state. Turn on the tick! 962 WRITE_ONCE(rdp->rcu_forced_tick, true); 963 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 964 } 965 raw_spin_unlock_rcu_node(rdp->mynode); 966 } 967 #endif /* CONFIG_NO_HZ_FULL */ 968 969 /** 970 * rcu_nmi_enter - inform RCU of entry to NMI context 971 * 972 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and 973 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know 974 * that the CPU is active. This implementation permits nested NMIs, as 975 * long as the nesting level does not overflow an int. (You will probably 976 * run out of stack space first.) 977 * 978 * If you add or remove a call to rcu_nmi_enter(), be sure to test 979 * with CONFIG_RCU_EQS_DEBUG=y. 980 */ 981 noinstr void rcu_nmi_enter(void) 982 { 983 long incby = 2; 984 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 985 986 /* Complain about underflow. */ 987 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); 988 989 /* 990 * If idle from RCU viewpoint, atomically increment ->dynticks 991 * to mark non-idle and increment ->dynticks_nmi_nesting by one. 992 * Otherwise, increment ->dynticks_nmi_nesting by two. This means 993 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed 994 * to be in the outermost NMI handler that interrupted an RCU-idle 995 * period (observation due to Andy Lutomirski). 996 */ 997 if (rcu_dynticks_curr_cpu_in_eqs()) { 998 999 if (!in_nmi()) 1000 rcu_dynticks_task_exit(); 1001 1002 // RCU is not watching here ... 1003 rcu_dynticks_eqs_exit(); 1004 // ... but is watching here. 1005 1006 if (!in_nmi()) { 1007 instrumentation_begin(); 1008 rcu_cleanup_after_idle(); 1009 instrumentation_end(); 1010 } 1011 1012 instrumentation_begin(); 1013 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() 1014 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); 1015 // instrumentation for the noinstr rcu_dynticks_eqs_exit() 1016 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); 1017 1018 incby = 1; 1019 } else if (!in_nmi()) { 1020 instrumentation_begin(); 1021 rcu_irq_enter_check_tick(); 1022 instrumentation_end(); 1023 } else { 1024 instrumentation_begin(); 1025 } 1026 1027 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 1028 rdp->dynticks_nmi_nesting, 1029 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); 1030 instrumentation_end(); 1031 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ 1032 rdp->dynticks_nmi_nesting + incby); 1033 barrier(); 1034 } 1035 1036 /** 1037 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 1038 * 1039 * Enter an interrupt handler, which might possibly result in exiting 1040 * idle mode, in other words, entering the mode in which read-side critical 1041 * sections can occur. The caller must have disabled interrupts. 1042 * 1043 * Note that the Linux kernel is fully capable of entering an interrupt 1044 * handler that it never exits, for example when doing upcalls to user mode! 1045 * This code assumes that the idle loop never does upcalls to user mode. 1046 * If your architecture's idle loop does do upcalls to user mode (or does 1047 * anything else that results in unbalanced calls to the irq_enter() and 1048 * irq_exit() functions), RCU will give you what you deserve, good and hard. 1049 * But very infrequently and irreproducibly. 1050 * 1051 * Use things like work queues to work around this limitation. 1052 * 1053 * You have been warned. 1054 * 1055 * If you add or remove a call to rcu_irq_enter(), be sure to test with 1056 * CONFIG_RCU_EQS_DEBUG=y. 1057 */ 1058 noinstr void rcu_irq_enter(void) 1059 { 1060 lockdep_assert_irqs_disabled(); 1061 rcu_nmi_enter(); 1062 } 1063 1064 /* 1065 * Wrapper for rcu_irq_enter() where interrupts are enabled. 1066 * 1067 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test 1068 * with CONFIG_RCU_EQS_DEBUG=y. 1069 */ 1070 void rcu_irq_enter_irqson(void) 1071 { 1072 unsigned long flags; 1073 1074 local_irq_save(flags); 1075 rcu_irq_enter(); 1076 local_irq_restore(flags); 1077 } 1078 1079 /* 1080 * If any sort of urgency was applied to the current CPU (for example, 1081 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 1082 * to get to a quiescent state, disable it. 1083 */ 1084 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 1085 { 1086 raw_lockdep_assert_held_rcu_node(rdp->mynode); 1087 WRITE_ONCE(rdp->rcu_urgent_qs, false); 1088 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 1089 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 1090 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 1091 WRITE_ONCE(rdp->rcu_forced_tick, false); 1092 } 1093 } 1094 1095 noinstr bool __rcu_is_watching(void) 1096 { 1097 return !rcu_dynticks_curr_cpu_in_eqs(); 1098 } 1099 1100 /** 1101 * rcu_is_watching - see if RCU thinks that the current CPU is not idle 1102 * 1103 * Return true if RCU is watching the running CPU, which means that this 1104 * CPU can safely enter RCU read-side critical sections. In other words, 1105 * if the current CPU is not in its idle loop or is in an interrupt or 1106 * NMI handler, return true. 1107 */ 1108 bool rcu_is_watching(void) 1109 { 1110 bool ret; 1111 1112 preempt_disable_notrace(); 1113 ret = !rcu_dynticks_curr_cpu_in_eqs(); 1114 preempt_enable_notrace(); 1115 return ret; 1116 } 1117 EXPORT_SYMBOL_GPL(rcu_is_watching); 1118 1119 /* 1120 * If a holdout task is actually running, request an urgent quiescent 1121 * state from its CPU. This is unsynchronized, so migrations can cause 1122 * the request to go to the wrong CPU. Which is OK, all that will happen 1123 * is that the CPU's next context switch will be a bit slower and next 1124 * time around this task will generate another request. 1125 */ 1126 void rcu_request_urgent_qs_task(struct task_struct *t) 1127 { 1128 int cpu; 1129 1130 barrier(); 1131 cpu = task_cpu(t); 1132 if (!task_curr(t)) 1133 return; /* This task is not running on that CPU. */ 1134 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 1135 } 1136 1137 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1138 1139 /* 1140 * Is the current CPU online as far as RCU is concerned? 1141 * 1142 * Disable preemption to avoid false positives that could otherwise 1143 * happen due to the current CPU number being sampled, this task being 1144 * preempted, its old CPU being taken offline, resuming on some other CPU, 1145 * then determining that its old CPU is now offline. 1146 * 1147 * Disable checking if in an NMI handler because we cannot safely 1148 * report errors from NMI handlers anyway. In addition, it is OK to use 1149 * RCU on an offline processor during initial boot, hence the check for 1150 * rcu_scheduler_fully_active. 1151 */ 1152 bool rcu_lockdep_current_cpu_online(void) 1153 { 1154 struct rcu_data *rdp; 1155 struct rcu_node *rnp; 1156 bool ret = false; 1157 1158 if (in_nmi() || !rcu_scheduler_fully_active) 1159 return true; 1160 preempt_disable_notrace(); 1161 rdp = this_cpu_ptr(&rcu_data); 1162 rnp = rdp->mynode; 1163 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) 1164 ret = true; 1165 preempt_enable_notrace(); 1166 return ret; 1167 } 1168 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1169 1170 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 1171 1172 /* 1173 * We are reporting a quiescent state on behalf of some other CPU, so 1174 * it is our responsibility to check for and handle potential overflow 1175 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 1176 * After all, the CPU might be in deep idle state, and thus executing no 1177 * code whatsoever. 1178 */ 1179 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1180 { 1181 raw_lockdep_assert_held_rcu_node(rnp); 1182 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 1183 rnp->gp_seq)) 1184 WRITE_ONCE(rdp->gpwrap, true); 1185 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 1186 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 1187 } 1188 1189 /* 1190 * Snapshot the specified CPU's dynticks counter so that we can later 1191 * credit them with an implicit quiescent state. Return 1 if this CPU 1192 * is in dynticks idle mode, which is an extended quiescent state. 1193 */ 1194 static int dyntick_save_progress_counter(struct rcu_data *rdp) 1195 { 1196 rdp->dynticks_snap = rcu_dynticks_snap(rdp); 1197 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1198 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1199 rcu_gpnum_ovf(rdp->mynode, rdp); 1200 return 1; 1201 } 1202 return 0; 1203 } 1204 1205 /* 1206 * Return true if the specified CPU has passed through a quiescent 1207 * state by virtue of being in or having passed through an dynticks 1208 * idle state since the last call to dyntick_save_progress_counter() 1209 * for this same CPU, or by virtue of having been offline. 1210 */ 1211 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 1212 { 1213 unsigned long jtsq; 1214 bool *rnhqp; 1215 bool *ruqp; 1216 struct rcu_node *rnp = rdp->mynode; 1217 1218 /* 1219 * If the CPU passed through or entered a dynticks idle phase with 1220 * no active irq/NMI handlers, then we can safely pretend that the CPU 1221 * already acknowledged the request to pass through a quiescent 1222 * state. Either way, that CPU cannot possibly be in an RCU 1223 * read-side critical section that started before the beginning 1224 * of the current RCU grace period. 1225 */ 1226 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { 1227 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 1228 rcu_gpnum_ovf(rnp, rdp); 1229 return 1; 1230 } 1231 1232 /* If waiting too long on an offline CPU, complain. */ 1233 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && 1234 time_after(jiffies, rcu_state.gp_start + HZ)) { 1235 bool onl; 1236 struct rcu_node *rnp1; 1237 1238 WARN_ON(1); /* Offline CPUs are supposed to report QS! */ 1239 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 1240 __func__, rnp->grplo, rnp->grphi, rnp->level, 1241 (long)rnp->gp_seq, (long)rnp->completedqs); 1242 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 1243 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 1244 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 1245 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); 1246 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 1247 __func__, rdp->cpu, ".o"[onl], 1248 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, 1249 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); 1250 return 1; /* Break things loose after complaining. */ 1251 } 1252 1253 /* 1254 * A CPU running for an extended time within the kernel can 1255 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 1256 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 1257 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 1258 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 1259 * variable are safe because the assignments are repeated if this 1260 * CPU failed to pass through a quiescent state. This code 1261 * also checks .jiffies_resched in case jiffies_to_sched_qs 1262 * is set way high. 1263 */ 1264 jtsq = READ_ONCE(jiffies_to_sched_qs); 1265 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); 1266 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); 1267 if (!READ_ONCE(*rnhqp) && 1268 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 1269 time_after(jiffies, rcu_state.jiffies_resched) || 1270 rcu_state.cbovld)) { 1271 WRITE_ONCE(*rnhqp, true); 1272 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 1273 smp_store_release(ruqp, true); 1274 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 1275 WRITE_ONCE(*ruqp, true); 1276 } 1277 1278 /* 1279 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 1280 * The above code handles this, but only for straight cond_resched(). 1281 * And some in-kernel loops check need_resched() before calling 1282 * cond_resched(), which defeats the above code for CPUs that are 1283 * running in-kernel with scheduling-clock interrupts disabled. 1284 * So hit them over the head with the resched_cpu() hammer! 1285 */ 1286 if (tick_nohz_full_cpu(rdp->cpu) && 1287 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 1288 rcu_state.cbovld)) { 1289 WRITE_ONCE(*ruqp, true); 1290 resched_cpu(rdp->cpu); 1291 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1292 } 1293 1294 /* 1295 * If more than halfway to RCU CPU stall-warning time, invoke 1296 * resched_cpu() more frequently to try to loosen things up a bit. 1297 * Also check to see if the CPU is getting hammered with interrupts, 1298 * but only once per grace period, just to keep the IPIs down to 1299 * a dull roar. 1300 */ 1301 if (time_after(jiffies, rcu_state.jiffies_resched)) { 1302 if (time_after(jiffies, 1303 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 1304 resched_cpu(rdp->cpu); 1305 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 1306 } 1307 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1308 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 1309 (rnp->ffmask & rdp->grpmask)) { 1310 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1311 atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ); 1312 rdp->rcu_iw_pending = true; 1313 rdp->rcu_iw_gp_seq = rnp->gp_seq; 1314 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1315 } 1316 } 1317 1318 return 0; 1319 } 1320 1321 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1322 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1323 unsigned long gp_seq_req, const char *s) 1324 { 1325 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 1326 gp_seq_req, rnp->level, 1327 rnp->grplo, rnp->grphi, s); 1328 } 1329 1330 /* 1331 * rcu_start_this_gp - Request the start of a particular grace period 1332 * @rnp_start: The leaf node of the CPU from which to start. 1333 * @rdp: The rcu_data corresponding to the CPU from which to start. 1334 * @gp_seq_req: The gp_seq of the grace period to start. 1335 * 1336 * Start the specified grace period, as needed to handle newly arrived 1337 * callbacks. The required future grace periods are recorded in each 1338 * rcu_node structure's ->gp_seq_needed field. Returns true if there 1339 * is reason to awaken the grace-period kthread. 1340 * 1341 * The caller must hold the specified rcu_node structure's ->lock, which 1342 * is why the caller is responsible for waking the grace-period kthread. 1343 * 1344 * Returns true if the GP thread needs to be awakened else false. 1345 */ 1346 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 1347 unsigned long gp_seq_req) 1348 { 1349 bool ret = false; 1350 struct rcu_node *rnp; 1351 1352 /* 1353 * Use funnel locking to either acquire the root rcu_node 1354 * structure's lock or bail out if the need for this grace period 1355 * has already been recorded -- or if that grace period has in 1356 * fact already started. If there is already a grace period in 1357 * progress in a non-leaf node, no recording is needed because the 1358 * end of the grace period will scan the leaf rcu_node structures. 1359 * Note that rnp_start->lock must not be released. 1360 */ 1361 raw_lockdep_assert_held_rcu_node(rnp_start); 1362 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 1363 for (rnp = rnp_start; 1; rnp = rnp->parent) { 1364 if (rnp != rnp_start) 1365 raw_spin_lock_rcu_node(rnp); 1366 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 1367 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 1368 (rnp != rnp_start && 1369 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1370 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1371 TPS("Prestarted")); 1372 goto unlock_out; 1373 } 1374 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1375 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1376 /* 1377 * We just marked the leaf or internal node, and a 1378 * grace period is in progress, which means that 1379 * rcu_gp_cleanup() will see the marking. Bail to 1380 * reduce contention. 1381 */ 1382 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1383 TPS("Startedleaf")); 1384 goto unlock_out; 1385 } 1386 if (rnp != rnp_start && rnp->parent != NULL) 1387 raw_spin_unlock_rcu_node(rnp); 1388 if (!rnp->parent) 1389 break; /* At root, and perhaps also leaf. */ 1390 } 1391 1392 /* If GP already in progress, just leave, otherwise start one. */ 1393 if (rcu_gp_in_progress()) { 1394 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1395 goto unlock_out; 1396 } 1397 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1398 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1399 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1400 if (!READ_ONCE(rcu_state.gp_kthread)) { 1401 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1402 goto unlock_out; 1403 } 1404 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1405 ret = true; /* Caller must wake GP kthread. */ 1406 unlock_out: 1407 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1408 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1409 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1410 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1411 } 1412 if (rnp != rnp_start) 1413 raw_spin_unlock_rcu_node(rnp); 1414 return ret; 1415 } 1416 1417 /* 1418 * Clean up any old requests for the just-ended grace period. Also return 1419 * whether any additional grace periods have been requested. 1420 */ 1421 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1422 { 1423 bool needmore; 1424 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1425 1426 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1427 if (!needmore) 1428 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1429 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1430 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1431 return needmore; 1432 } 1433 1434 /* 1435 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1436 * interrupt or softirq handler, in which case we just might immediately 1437 * sleep upon return, resulting in a grace-period hang), and don't bother 1438 * awakening when there is nothing for the grace-period kthread to do 1439 * (as in several CPUs raced to awaken, we lost), and finally don't try 1440 * to awaken a kthread that has not yet been created. If all those checks 1441 * are passed, track some debug information and awaken. 1442 * 1443 * So why do the self-wakeup when in an interrupt or softirq handler 1444 * in the grace-period kthread's context? Because the kthread might have 1445 * been interrupted just as it was going to sleep, and just after the final 1446 * pre-sleep check of the awaken condition. In this case, a wakeup really 1447 * is required, and is therefore supplied. 1448 */ 1449 static void rcu_gp_kthread_wake(void) 1450 { 1451 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1452 1453 if ((current == t && !in_irq() && !in_serving_softirq()) || 1454 !READ_ONCE(rcu_state.gp_flags) || !t) 1455 return; 1456 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1457 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1458 swake_up_one(&rcu_state.gp_wq); 1459 } 1460 1461 /* 1462 * If there is room, assign a ->gp_seq number to any callbacks on this 1463 * CPU that have not already been assigned. Also accelerate any callbacks 1464 * that were previously assigned a ->gp_seq number that has since proven 1465 * to be too conservative, which can happen if callbacks get assigned a 1466 * ->gp_seq number while RCU is idle, but with reference to a non-root 1467 * rcu_node structure. This function is idempotent, so it does not hurt 1468 * to call it repeatedly. Returns an flag saying that we should awaken 1469 * the RCU grace-period kthread. 1470 * 1471 * The caller must hold rnp->lock with interrupts disabled. 1472 */ 1473 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1474 { 1475 unsigned long gp_seq_req; 1476 bool ret = false; 1477 1478 rcu_lockdep_assert_cblist_protected(rdp); 1479 raw_lockdep_assert_held_rcu_node(rnp); 1480 1481 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1482 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1483 return false; 1484 1485 /* 1486 * Callbacks are often registered with incomplete grace-period 1487 * information. Something about the fact that getting exact 1488 * information requires acquiring a global lock... RCU therefore 1489 * makes a conservative estimate of the grace period number at which 1490 * a given callback will become ready to invoke. The following 1491 * code checks this estimate and improves it when possible, thus 1492 * accelerating callback invocation to an earlier grace-period 1493 * number. 1494 */ 1495 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1496 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1497 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1498 1499 /* Trace depending on how much we were able to accelerate. */ 1500 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1501 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); 1502 else 1503 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); 1504 return ret; 1505 } 1506 1507 /* 1508 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1509 * rcu_node structure's ->lock be held. It consults the cached value 1510 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1511 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1512 * while holding the leaf rcu_node structure's ->lock. 1513 */ 1514 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1515 struct rcu_data *rdp) 1516 { 1517 unsigned long c; 1518 bool needwake; 1519 1520 rcu_lockdep_assert_cblist_protected(rdp); 1521 c = rcu_seq_snap(&rcu_state.gp_seq); 1522 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1523 /* Old request still live, so mark recent callbacks. */ 1524 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1525 return; 1526 } 1527 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1528 needwake = rcu_accelerate_cbs(rnp, rdp); 1529 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1530 if (needwake) 1531 rcu_gp_kthread_wake(); 1532 } 1533 1534 /* 1535 * Move any callbacks whose grace period has completed to the 1536 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1537 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1538 * sublist. This function is idempotent, so it does not hurt to 1539 * invoke it repeatedly. As long as it is not invoked -too- often... 1540 * Returns true if the RCU grace-period kthread needs to be awakened. 1541 * 1542 * The caller must hold rnp->lock with interrupts disabled. 1543 */ 1544 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1545 { 1546 rcu_lockdep_assert_cblist_protected(rdp); 1547 raw_lockdep_assert_held_rcu_node(rnp); 1548 1549 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1550 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1551 return false; 1552 1553 /* 1554 * Find all callbacks whose ->gp_seq numbers indicate that they 1555 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1556 */ 1557 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1558 1559 /* Classify any remaining callbacks. */ 1560 return rcu_accelerate_cbs(rnp, rdp); 1561 } 1562 1563 /* 1564 * Move and classify callbacks, but only if doing so won't require 1565 * that the RCU grace-period kthread be awakened. 1566 */ 1567 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1568 struct rcu_data *rdp) 1569 { 1570 rcu_lockdep_assert_cblist_protected(rdp); 1571 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || 1572 !raw_spin_trylock_rcu_node(rnp)) 1573 return; 1574 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1575 raw_spin_unlock_rcu_node(rnp); 1576 } 1577 1578 /* 1579 * Update CPU-local rcu_data state to record the beginnings and ends of 1580 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1581 * structure corresponding to the current CPU, and must have irqs disabled. 1582 * Returns true if the grace-period kthread needs to be awakened. 1583 */ 1584 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1585 { 1586 bool ret = false; 1587 bool need_qs; 1588 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 1589 rcu_segcblist_is_offloaded(&rdp->cblist); 1590 1591 raw_lockdep_assert_held_rcu_node(rnp); 1592 1593 if (rdp->gp_seq == rnp->gp_seq) 1594 return false; /* Nothing to do. */ 1595 1596 /* Handle the ends of any preceding grace periods first. */ 1597 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1598 unlikely(READ_ONCE(rdp->gpwrap))) { 1599 if (!offloaded) 1600 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1601 rdp->core_needs_qs = false; 1602 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1603 } else { 1604 if (!offloaded) 1605 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1606 if (rdp->core_needs_qs) 1607 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1608 } 1609 1610 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1611 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1612 unlikely(READ_ONCE(rdp->gpwrap))) { 1613 /* 1614 * If the current grace period is waiting for this CPU, 1615 * set up to detect a quiescent state, otherwise don't 1616 * go looking for one. 1617 */ 1618 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1619 need_qs = !!(rnp->qsmask & rdp->grpmask); 1620 rdp->cpu_no_qs.b.norm = need_qs; 1621 rdp->core_needs_qs = need_qs; 1622 zero_cpu_stall_ticks(rdp); 1623 } 1624 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1625 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1626 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1627 WRITE_ONCE(rdp->gpwrap, false); 1628 rcu_gpnum_ovf(rnp, rdp); 1629 return ret; 1630 } 1631 1632 static void note_gp_changes(struct rcu_data *rdp) 1633 { 1634 unsigned long flags; 1635 bool needwake; 1636 struct rcu_node *rnp; 1637 1638 local_irq_save(flags); 1639 rnp = rdp->mynode; 1640 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1641 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1642 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1643 local_irq_restore(flags); 1644 return; 1645 } 1646 needwake = __note_gp_changes(rnp, rdp); 1647 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1648 if (needwake) 1649 rcu_gp_kthread_wake(); 1650 } 1651 1652 static void rcu_gp_slow(int delay) 1653 { 1654 if (delay > 0 && 1655 !(rcu_seq_ctr(rcu_state.gp_seq) % 1656 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1657 schedule_timeout_idle(delay); 1658 } 1659 1660 static unsigned long sleep_duration; 1661 1662 /* Allow rcutorture to stall the grace-period kthread. */ 1663 void rcu_gp_set_torture_wait(int duration) 1664 { 1665 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1666 WRITE_ONCE(sleep_duration, duration); 1667 } 1668 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1669 1670 /* Actually implement the aforementioned wait. */ 1671 static void rcu_gp_torture_wait(void) 1672 { 1673 unsigned long duration; 1674 1675 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1676 return; 1677 duration = xchg(&sleep_duration, 0UL); 1678 if (duration > 0) { 1679 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1680 schedule_timeout_idle(duration); 1681 pr_alert("%s: Wait complete\n", __func__); 1682 } 1683 } 1684 1685 /* 1686 * Initialize a new grace period. Return false if no grace period required. 1687 */ 1688 static bool rcu_gp_init(void) 1689 { 1690 unsigned long flags; 1691 unsigned long oldmask; 1692 unsigned long mask; 1693 struct rcu_data *rdp; 1694 struct rcu_node *rnp = rcu_get_root(); 1695 1696 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1697 raw_spin_lock_irq_rcu_node(rnp); 1698 if (!READ_ONCE(rcu_state.gp_flags)) { 1699 /* Spurious wakeup, tell caller to go back to sleep. */ 1700 raw_spin_unlock_irq_rcu_node(rnp); 1701 return false; 1702 } 1703 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1704 1705 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1706 /* 1707 * Grace period already in progress, don't start another. 1708 * Not supposed to be able to happen. 1709 */ 1710 raw_spin_unlock_irq_rcu_node(rnp); 1711 return false; 1712 } 1713 1714 /* Advance to a new grace period and initialize state. */ 1715 record_gp_stall_check_time(); 1716 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1717 rcu_seq_start(&rcu_state.gp_seq); 1718 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1719 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1720 raw_spin_unlock_irq_rcu_node(rnp); 1721 1722 /* 1723 * Apply per-leaf buffered online and offline operations to the 1724 * rcu_node tree. Note that this new grace period need not wait 1725 * for subsequent online CPUs, and that quiescent-state forcing 1726 * will handle subsequent offline CPUs. 1727 */ 1728 rcu_state.gp_state = RCU_GP_ONOFF; 1729 rcu_for_each_leaf_node(rnp) { 1730 raw_spin_lock(&rcu_state.ofl_lock); 1731 raw_spin_lock_irq_rcu_node(rnp); 1732 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1733 !rnp->wait_blkd_tasks) { 1734 /* Nothing to do on this leaf rcu_node structure. */ 1735 raw_spin_unlock_irq_rcu_node(rnp); 1736 raw_spin_unlock(&rcu_state.ofl_lock); 1737 continue; 1738 } 1739 1740 /* Record old state, apply changes to ->qsmaskinit field. */ 1741 oldmask = rnp->qsmaskinit; 1742 rnp->qsmaskinit = rnp->qsmaskinitnext; 1743 1744 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1745 if (!oldmask != !rnp->qsmaskinit) { 1746 if (!oldmask) { /* First online CPU for rcu_node. */ 1747 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1748 rcu_init_new_rnp(rnp); 1749 } else if (rcu_preempt_has_tasks(rnp)) { 1750 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1751 } else { /* Last offline CPU and can propagate. */ 1752 rcu_cleanup_dead_rnp(rnp); 1753 } 1754 } 1755 1756 /* 1757 * If all waited-on tasks from prior grace period are 1758 * done, and if all this rcu_node structure's CPUs are 1759 * still offline, propagate up the rcu_node tree and 1760 * clear ->wait_blkd_tasks. Otherwise, if one of this 1761 * rcu_node structure's CPUs has since come back online, 1762 * simply clear ->wait_blkd_tasks. 1763 */ 1764 if (rnp->wait_blkd_tasks && 1765 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1766 rnp->wait_blkd_tasks = false; 1767 if (!rnp->qsmaskinit) 1768 rcu_cleanup_dead_rnp(rnp); 1769 } 1770 1771 raw_spin_unlock_irq_rcu_node(rnp); 1772 raw_spin_unlock(&rcu_state.ofl_lock); 1773 } 1774 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1775 1776 /* 1777 * Set the quiescent-state-needed bits in all the rcu_node 1778 * structures for all currently online CPUs in breadth-first 1779 * order, starting from the root rcu_node structure, relying on the 1780 * layout of the tree within the rcu_state.node[] array. Note that 1781 * other CPUs will access only the leaves of the hierarchy, thus 1782 * seeing that no grace period is in progress, at least until the 1783 * corresponding leaf node has been initialized. 1784 * 1785 * The grace period cannot complete until the initialization 1786 * process finishes, because this kthread handles both. 1787 */ 1788 rcu_state.gp_state = RCU_GP_INIT; 1789 rcu_for_each_node_breadth_first(rnp) { 1790 rcu_gp_slow(gp_init_delay); 1791 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1792 rdp = this_cpu_ptr(&rcu_data); 1793 rcu_preempt_check_blocked_tasks(rnp); 1794 rnp->qsmask = rnp->qsmaskinit; 1795 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1796 if (rnp == rdp->mynode) 1797 (void)__note_gp_changes(rnp, rdp); 1798 rcu_preempt_boost_start_gp(rnp); 1799 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1800 rnp->level, rnp->grplo, 1801 rnp->grphi, rnp->qsmask); 1802 /* Quiescent states for tasks on any now-offline CPUs. */ 1803 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1804 rnp->rcu_gp_init_mask = mask; 1805 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1806 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1807 else 1808 raw_spin_unlock_irq_rcu_node(rnp); 1809 cond_resched_tasks_rcu_qs(); 1810 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1811 } 1812 1813 return true; 1814 } 1815 1816 /* 1817 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1818 * time. 1819 */ 1820 static bool rcu_gp_fqs_check_wake(int *gfp) 1821 { 1822 struct rcu_node *rnp = rcu_get_root(); 1823 1824 // If under overload conditions, force an immediate FQS scan. 1825 if (*gfp & RCU_GP_FLAG_OVLD) 1826 return true; 1827 1828 // Someone like call_rcu() requested a force-quiescent-state scan. 1829 *gfp = READ_ONCE(rcu_state.gp_flags); 1830 if (*gfp & RCU_GP_FLAG_FQS) 1831 return true; 1832 1833 // The current grace period has completed. 1834 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1835 return true; 1836 1837 return false; 1838 } 1839 1840 /* 1841 * Do one round of quiescent-state forcing. 1842 */ 1843 static void rcu_gp_fqs(bool first_time) 1844 { 1845 struct rcu_node *rnp = rcu_get_root(); 1846 1847 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1848 rcu_state.n_force_qs++; 1849 if (first_time) { 1850 /* Collect dyntick-idle snapshots. */ 1851 force_qs_rnp(dyntick_save_progress_counter); 1852 } else { 1853 /* Handle dyntick-idle and offline CPUs. */ 1854 force_qs_rnp(rcu_implicit_dynticks_qs); 1855 } 1856 /* Clear flag to prevent immediate re-entry. */ 1857 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 1858 raw_spin_lock_irq_rcu_node(rnp); 1859 WRITE_ONCE(rcu_state.gp_flags, 1860 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); 1861 raw_spin_unlock_irq_rcu_node(rnp); 1862 } 1863 } 1864 1865 /* 1866 * Loop doing repeated quiescent-state forcing until the grace period ends. 1867 */ 1868 static void rcu_gp_fqs_loop(void) 1869 { 1870 bool first_gp_fqs; 1871 int gf = 0; 1872 unsigned long j; 1873 int ret; 1874 struct rcu_node *rnp = rcu_get_root(); 1875 1876 first_gp_fqs = true; 1877 j = READ_ONCE(jiffies_till_first_fqs); 1878 if (rcu_state.cbovld) 1879 gf = RCU_GP_FLAG_OVLD; 1880 ret = 0; 1881 for (;;) { 1882 if (!ret) { 1883 rcu_state.jiffies_force_qs = jiffies + j; 1884 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 1885 jiffies + (j ? 3 * j : 2)); 1886 } 1887 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1888 TPS("fqswait")); 1889 rcu_state.gp_state = RCU_GP_WAIT_FQS; 1890 ret = swait_event_idle_timeout_exclusive( 1891 rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); 1892 rcu_gp_torture_wait(); 1893 rcu_state.gp_state = RCU_GP_DOING_FQS; 1894 /* Locking provides needed memory barriers. */ 1895 /* If grace period done, leave loop. */ 1896 if (!READ_ONCE(rnp->qsmask) && 1897 !rcu_preempt_blocked_readers_cgp(rnp)) 1898 break; 1899 /* If time for quiescent-state forcing, do it. */ 1900 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 1901 (gf & RCU_GP_FLAG_FQS)) { 1902 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1903 TPS("fqsstart")); 1904 rcu_gp_fqs(first_gp_fqs); 1905 gf = 0; 1906 if (first_gp_fqs) { 1907 first_gp_fqs = false; 1908 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 1909 } 1910 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1911 TPS("fqsend")); 1912 cond_resched_tasks_rcu_qs(); 1913 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1914 ret = 0; /* Force full wait till next FQS. */ 1915 j = READ_ONCE(jiffies_till_next_fqs); 1916 } else { 1917 /* Deal with stray signal. */ 1918 cond_resched_tasks_rcu_qs(); 1919 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1920 WARN_ON(signal_pending(current)); 1921 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 1922 TPS("fqswaitsig")); 1923 ret = 1; /* Keep old FQS timing. */ 1924 j = jiffies; 1925 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 1926 j = 1; 1927 else 1928 j = rcu_state.jiffies_force_qs - j; 1929 gf = 0; 1930 } 1931 } 1932 } 1933 1934 /* 1935 * Clean up after the old grace period. 1936 */ 1937 static void rcu_gp_cleanup(void) 1938 { 1939 int cpu; 1940 bool needgp = false; 1941 unsigned long gp_duration; 1942 unsigned long new_gp_seq; 1943 bool offloaded; 1944 struct rcu_data *rdp; 1945 struct rcu_node *rnp = rcu_get_root(); 1946 struct swait_queue_head *sq; 1947 1948 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1949 raw_spin_lock_irq_rcu_node(rnp); 1950 rcu_state.gp_end = jiffies; 1951 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 1952 if (gp_duration > rcu_state.gp_max) 1953 rcu_state.gp_max = gp_duration; 1954 1955 /* 1956 * We know the grace period is complete, but to everyone else 1957 * it appears to still be ongoing. But it is also the case 1958 * that to everyone else it looks like there is nothing that 1959 * they can do to advance the grace period. It is therefore 1960 * safe for us to drop the lock in order to mark the grace 1961 * period as completed in all of the rcu_node structures. 1962 */ 1963 raw_spin_unlock_irq_rcu_node(rnp); 1964 1965 /* 1966 * Propagate new ->gp_seq value to rcu_node structures so that 1967 * other CPUs don't have to wait until the start of the next grace 1968 * period to process their callbacks. This also avoids some nasty 1969 * RCU grace-period initialization races by forcing the end of 1970 * the current grace period to be completely recorded in all of 1971 * the rcu_node structures before the beginning of the next grace 1972 * period is recorded in any of the rcu_node structures. 1973 */ 1974 new_gp_seq = rcu_state.gp_seq; 1975 rcu_seq_end(&new_gp_seq); 1976 rcu_for_each_node_breadth_first(rnp) { 1977 raw_spin_lock_irq_rcu_node(rnp); 1978 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 1979 dump_blkd_tasks(rnp, 10); 1980 WARN_ON_ONCE(rnp->qsmask); 1981 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 1982 rdp = this_cpu_ptr(&rcu_data); 1983 if (rnp == rdp->mynode) 1984 needgp = __note_gp_changes(rnp, rdp) || needgp; 1985 /* smp_mb() provided by prior unlock-lock pair. */ 1986 needgp = rcu_future_gp_cleanup(rnp) || needgp; 1987 // Reset overload indication for CPUs no longer overloaded 1988 if (rcu_is_leaf_node(rnp)) 1989 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 1990 rdp = per_cpu_ptr(&rcu_data, cpu); 1991 check_cb_ovld_locked(rdp, rnp); 1992 } 1993 sq = rcu_nocb_gp_get(rnp); 1994 raw_spin_unlock_irq_rcu_node(rnp); 1995 rcu_nocb_gp_cleanup(sq); 1996 cond_resched_tasks_rcu_qs(); 1997 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1998 rcu_gp_slow(gp_cleanup_delay); 1999 } 2000 rnp = rcu_get_root(); 2001 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 2002 2003 /* Declare grace period done, trace first to use old GP number. */ 2004 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 2005 rcu_seq_end(&rcu_state.gp_seq); 2006 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 2007 rcu_state.gp_state = RCU_GP_IDLE; 2008 /* Check for GP requests since above loop. */ 2009 rdp = this_cpu_ptr(&rcu_data); 2010 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 2011 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 2012 TPS("CleanupMore")); 2013 needgp = true; 2014 } 2015 /* Advance CBs to reduce false positives below. */ 2016 offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2017 rcu_segcblist_is_offloaded(&rdp->cblist); 2018 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2019 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2020 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2021 trace_rcu_grace_period(rcu_state.name, 2022 rcu_state.gp_seq, 2023 TPS("newreq")); 2024 } else { 2025 WRITE_ONCE(rcu_state.gp_flags, 2026 rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2027 } 2028 raw_spin_unlock_irq_rcu_node(rnp); 2029 } 2030 2031 /* 2032 * Body of kthread that handles grace periods. 2033 */ 2034 static int __noreturn rcu_gp_kthread(void *unused) 2035 { 2036 rcu_bind_gp_kthread(); 2037 for (;;) { 2038 2039 /* Handle grace-period start. */ 2040 for (;;) { 2041 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2042 TPS("reqwait")); 2043 rcu_state.gp_state = RCU_GP_WAIT_GPS; 2044 swait_event_idle_exclusive(rcu_state.gp_wq, 2045 READ_ONCE(rcu_state.gp_flags) & 2046 RCU_GP_FLAG_INIT); 2047 rcu_gp_torture_wait(); 2048 rcu_state.gp_state = RCU_GP_DONE_GPS; 2049 /* Locking provides needed memory barrier. */ 2050 if (rcu_gp_init()) 2051 break; 2052 cond_resched_tasks_rcu_qs(); 2053 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2054 WARN_ON(signal_pending(current)); 2055 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2056 TPS("reqwaitsig")); 2057 } 2058 2059 /* Handle quiescent-state forcing. */ 2060 rcu_gp_fqs_loop(); 2061 2062 /* Handle grace-period end. */ 2063 rcu_state.gp_state = RCU_GP_CLEANUP; 2064 rcu_gp_cleanup(); 2065 rcu_state.gp_state = RCU_GP_CLEANED; 2066 } 2067 } 2068 2069 /* 2070 * Report a full set of quiescent states to the rcu_state data structure. 2071 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2072 * another grace period is required. Whether we wake the grace-period 2073 * kthread or it awakens itself for the next round of quiescent-state 2074 * forcing, that kthread will clean up after the just-completed grace 2075 * period. Note that the caller must hold rnp->lock, which is released 2076 * before return. 2077 */ 2078 static void rcu_report_qs_rsp(unsigned long flags) 2079 __releases(rcu_get_root()->lock) 2080 { 2081 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2082 WARN_ON_ONCE(!rcu_gp_in_progress()); 2083 WRITE_ONCE(rcu_state.gp_flags, 2084 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2085 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2086 rcu_gp_kthread_wake(); 2087 } 2088 2089 /* 2090 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2091 * Allows quiescent states for a group of CPUs to be reported at one go 2092 * to the specified rcu_node structure, though all the CPUs in the group 2093 * must be represented by the same rcu_node structure (which need not be a 2094 * leaf rcu_node structure, though it often will be). The gps parameter 2095 * is the grace-period snapshot, which means that the quiescent states 2096 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2097 * must be held upon entry, and it is released before return. 2098 * 2099 * As a special case, if mask is zero, the bit-already-cleared check is 2100 * disabled. This allows propagating quiescent state due to resumed tasks 2101 * during grace-period initialization. 2102 */ 2103 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2104 unsigned long gps, unsigned long flags) 2105 __releases(rnp->lock) 2106 { 2107 unsigned long oldmask = 0; 2108 struct rcu_node *rnp_c; 2109 2110 raw_lockdep_assert_held_rcu_node(rnp); 2111 2112 /* Walk up the rcu_node hierarchy. */ 2113 for (;;) { 2114 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2115 2116 /* 2117 * Our bit has already been cleared, or the 2118 * relevant grace period is already over, so done. 2119 */ 2120 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2121 return; 2122 } 2123 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2124 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2125 rcu_preempt_blocked_readers_cgp(rnp)); 2126 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2127 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2128 mask, rnp->qsmask, rnp->level, 2129 rnp->grplo, rnp->grphi, 2130 !!rnp->gp_tasks); 2131 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2132 2133 /* Other bits still set at this level, so done. */ 2134 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2135 return; 2136 } 2137 rnp->completedqs = rnp->gp_seq; 2138 mask = rnp->grpmask; 2139 if (rnp->parent == NULL) { 2140 2141 /* No more levels. Exit loop holding root lock. */ 2142 2143 break; 2144 } 2145 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2146 rnp_c = rnp; 2147 rnp = rnp->parent; 2148 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2149 oldmask = READ_ONCE(rnp_c->qsmask); 2150 } 2151 2152 /* 2153 * Get here if we are the last CPU to pass through a quiescent 2154 * state for this grace period. Invoke rcu_report_qs_rsp() 2155 * to clean up and start the next grace period if one is needed. 2156 */ 2157 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2158 } 2159 2160 /* 2161 * Record a quiescent state for all tasks that were previously queued 2162 * on the specified rcu_node structure and that were blocking the current 2163 * RCU grace period. The caller must hold the corresponding rnp->lock with 2164 * irqs disabled, and this lock is released upon return, but irqs remain 2165 * disabled. 2166 */ 2167 static void __maybe_unused 2168 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2169 __releases(rnp->lock) 2170 { 2171 unsigned long gps; 2172 unsigned long mask; 2173 struct rcu_node *rnp_p; 2174 2175 raw_lockdep_assert_held_rcu_node(rnp); 2176 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2177 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2178 rnp->qsmask != 0) { 2179 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2180 return; /* Still need more quiescent states! */ 2181 } 2182 2183 rnp->completedqs = rnp->gp_seq; 2184 rnp_p = rnp->parent; 2185 if (rnp_p == NULL) { 2186 /* 2187 * Only one rcu_node structure in the tree, so don't 2188 * try to report up to its nonexistent parent! 2189 */ 2190 rcu_report_qs_rsp(flags); 2191 return; 2192 } 2193 2194 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2195 gps = rnp->gp_seq; 2196 mask = rnp->grpmask; 2197 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2198 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2199 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2200 } 2201 2202 /* 2203 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2204 * structure. This must be called from the specified CPU. 2205 */ 2206 static void 2207 rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) 2208 { 2209 unsigned long flags; 2210 unsigned long mask; 2211 bool needwake = false; 2212 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2213 rcu_segcblist_is_offloaded(&rdp->cblist); 2214 struct rcu_node *rnp; 2215 2216 rnp = rdp->mynode; 2217 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2218 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2219 rdp->gpwrap) { 2220 2221 /* 2222 * The grace period in which this quiescent state was 2223 * recorded has ended, so don't report it upwards. 2224 * We will instead need a new quiescent state that lies 2225 * within the current grace period. 2226 */ 2227 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2228 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2229 return; 2230 } 2231 mask = rdp->grpmask; 2232 if (rdp->cpu == smp_processor_id()) 2233 rdp->core_needs_qs = false; 2234 if ((rnp->qsmask & mask) == 0) { 2235 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2236 } else { 2237 /* 2238 * This GP can't end until cpu checks in, so all of our 2239 * callbacks can be processed during the next GP. 2240 */ 2241 if (!offloaded) 2242 needwake = rcu_accelerate_cbs(rnp, rdp); 2243 2244 rcu_disable_urgency_upon_qs(rdp); 2245 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2246 /* ^^^ Released rnp->lock */ 2247 if (needwake) 2248 rcu_gp_kthread_wake(); 2249 } 2250 } 2251 2252 /* 2253 * Check to see if there is a new grace period of which this CPU 2254 * is not yet aware, and if so, set up local rcu_data state for it. 2255 * Otherwise, see if this CPU has just passed through its first 2256 * quiescent state for this grace period, and record that fact if so. 2257 */ 2258 static void 2259 rcu_check_quiescent_state(struct rcu_data *rdp) 2260 { 2261 /* Check for grace-period ends and beginnings. */ 2262 note_gp_changes(rdp); 2263 2264 /* 2265 * Does this CPU still need to do its part for current grace period? 2266 * If no, return and let the other CPUs do their part as well. 2267 */ 2268 if (!rdp->core_needs_qs) 2269 return; 2270 2271 /* 2272 * Was there a quiescent state since the beginning of the grace 2273 * period? If no, then exit and wait for the next call. 2274 */ 2275 if (rdp->cpu_no_qs.b.norm) 2276 return; 2277 2278 /* 2279 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2280 * judge of that). 2281 */ 2282 rcu_report_qs_rdp(rdp->cpu, rdp); 2283 } 2284 2285 /* 2286 * Near the end of the offline process. Trace the fact that this CPU 2287 * is going offline. 2288 */ 2289 int rcutree_dying_cpu(unsigned int cpu) 2290 { 2291 bool blkd; 2292 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 2293 struct rcu_node *rnp = rdp->mynode; 2294 2295 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2296 return 0; 2297 2298 blkd = !!(rnp->qsmask & rdp->grpmask); 2299 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 2300 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 2301 return 0; 2302 } 2303 2304 /* 2305 * All CPUs for the specified rcu_node structure have gone offline, 2306 * and all tasks that were preempted within an RCU read-side critical 2307 * section while running on one of those CPUs have since exited their RCU 2308 * read-side critical section. Some other CPU is reporting this fact with 2309 * the specified rcu_node structure's ->lock held and interrupts disabled. 2310 * This function therefore goes up the tree of rcu_node structures, 2311 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2312 * the leaf rcu_node structure's ->qsmaskinit field has already been 2313 * updated. 2314 * 2315 * This function does check that the specified rcu_node structure has 2316 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2317 * prematurely. That said, invoking it after the fact will cost you 2318 * a needless lock acquisition. So once it has done its work, don't 2319 * invoke it again. 2320 */ 2321 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 2322 { 2323 long mask; 2324 struct rcu_node *rnp = rnp_leaf; 2325 2326 raw_lockdep_assert_held_rcu_node(rnp_leaf); 2327 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2328 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 2329 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 2330 return; 2331 for (;;) { 2332 mask = rnp->grpmask; 2333 rnp = rnp->parent; 2334 if (!rnp) 2335 break; 2336 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2337 rnp->qsmaskinit &= ~mask; 2338 /* Between grace periods, so better already be zero! */ 2339 WARN_ON_ONCE(rnp->qsmask); 2340 if (rnp->qsmaskinit) { 2341 raw_spin_unlock_rcu_node(rnp); 2342 /* irqs remain disabled. */ 2343 return; 2344 } 2345 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2346 } 2347 } 2348 2349 /* 2350 * The CPU has been completely removed, and some other CPU is reporting 2351 * this fact from process context. Do the remainder of the cleanup. 2352 * There can only be one CPU hotplug operation at a time, so no need for 2353 * explicit locking. 2354 */ 2355 int rcutree_dead_cpu(unsigned int cpu) 2356 { 2357 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 2358 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2359 2360 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2361 return 0; 2362 2363 /* Adjust any no-longer-needed kthreads. */ 2364 rcu_boost_kthread_setaffinity(rnp, -1); 2365 /* Do any needed no-CB deferred wakeups from this CPU. */ 2366 do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu)); 2367 2368 // Stop-machine done, so allow nohz_full to disable tick. 2369 tick_dep_clear(TICK_DEP_BIT_RCU); 2370 return 0; 2371 } 2372 2373 /* 2374 * Invoke any RCU callbacks that have made it to the end of their grace 2375 * period. Thottle as specified by rdp->blimit. 2376 */ 2377 static void rcu_do_batch(struct rcu_data *rdp) 2378 { 2379 unsigned long flags; 2380 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2381 rcu_segcblist_is_offloaded(&rdp->cblist); 2382 struct rcu_head *rhp; 2383 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2384 long bl, count; 2385 long pending, tlimit = 0; 2386 2387 /* If no callbacks are ready, just return. */ 2388 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2389 trace_rcu_batch_start(rcu_state.name, 2390 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2391 trace_rcu_batch_end(rcu_state.name, 0, 2392 !rcu_segcblist_empty(&rdp->cblist), 2393 need_resched(), is_idle_task(current), 2394 rcu_is_callbacks_kthread()); 2395 return; 2396 } 2397 2398 /* 2399 * Extract the list of ready callbacks, disabling to prevent 2400 * races with call_rcu() from interrupt handlers. Leave the 2401 * callback counts, as rcu_barrier() needs to be conservative. 2402 */ 2403 local_irq_save(flags); 2404 rcu_nocb_lock(rdp); 2405 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2406 pending = rcu_segcblist_n_cbs(&rdp->cblist); 2407 bl = max(rdp->blimit, pending >> rcu_divisor); 2408 if (unlikely(bl > 100)) 2409 tlimit = local_clock() + rcu_resched_ns; 2410 trace_rcu_batch_start(rcu_state.name, 2411 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2412 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2413 if (offloaded) 2414 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2415 rcu_nocb_unlock_irqrestore(rdp, flags); 2416 2417 /* Invoke callbacks. */ 2418 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2419 rhp = rcu_cblist_dequeue(&rcl); 2420 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2421 rcu_callback_t f; 2422 2423 debug_rcu_head_unqueue(rhp); 2424 2425 rcu_lock_acquire(&rcu_callback_map); 2426 trace_rcu_invoke_callback(rcu_state.name, rhp); 2427 2428 f = rhp->func; 2429 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2430 f(rhp); 2431 2432 rcu_lock_release(&rcu_callback_map); 2433 2434 /* 2435 * Stop only if limit reached and CPU has something to do. 2436 * Note: The rcl structure counts down from zero. 2437 */ 2438 if (-rcl.len >= bl && !offloaded && 2439 (need_resched() || 2440 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2441 break; 2442 if (unlikely(tlimit)) { 2443 /* only call local_clock() every 32 callbacks */ 2444 if (likely((-rcl.len & 31) || local_clock() < tlimit)) 2445 continue; 2446 /* Exceeded the time limit, so leave. */ 2447 break; 2448 } 2449 if (offloaded) { 2450 WARN_ON_ONCE(in_serving_softirq()); 2451 local_bh_enable(); 2452 lockdep_assert_irqs_enabled(); 2453 cond_resched_tasks_rcu_qs(); 2454 lockdep_assert_irqs_enabled(); 2455 local_bh_disable(); 2456 } 2457 } 2458 2459 local_irq_save(flags); 2460 rcu_nocb_lock(rdp); 2461 count = -rcl.len; 2462 rdp->n_cbs_invoked += count; 2463 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2464 is_idle_task(current), rcu_is_callbacks_kthread()); 2465 2466 /* Update counts and requeue any remaining callbacks. */ 2467 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2468 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2469 rcu_segcblist_insert_count(&rdp->cblist, &rcl); 2470 2471 /* Reinstate batch limit if we have worked down the excess. */ 2472 count = rcu_segcblist_n_cbs(&rdp->cblist); 2473 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2474 rdp->blimit = blimit; 2475 2476 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2477 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2478 rdp->qlen_last_fqs_check = 0; 2479 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2480 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2481 rdp->qlen_last_fqs_check = count; 2482 2483 /* 2484 * The following usually indicates a double call_rcu(). To track 2485 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2486 */ 2487 WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); 2488 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2489 count != 0 && rcu_segcblist_empty(&rdp->cblist)); 2490 2491 rcu_nocb_unlock_irqrestore(rdp, flags); 2492 2493 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2494 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) 2495 invoke_rcu_core(); 2496 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2497 } 2498 2499 /* 2500 * This function is invoked from each scheduling-clock interrupt, 2501 * and checks to see if this CPU is in a non-context-switch quiescent 2502 * state, for example, user mode or idle loop. It also schedules RCU 2503 * core processing. If the current grace period has gone on too long, 2504 * it will ask the scheduler to manufacture a context switch for the sole 2505 * purpose of providing a providing the needed quiescent state. 2506 */ 2507 void rcu_sched_clock_irq(int user) 2508 { 2509 trace_rcu_utilization(TPS("Start scheduler-tick")); 2510 raw_cpu_inc(rcu_data.ticks_this_gp); 2511 /* The load-acquire pairs with the store-release setting to true. */ 2512 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2513 /* Idle and userspace execution already are quiescent states. */ 2514 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2515 set_tsk_need_resched(current); 2516 set_preempt_need_resched(); 2517 } 2518 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2519 } 2520 rcu_flavor_sched_clock_irq(user); 2521 if (rcu_pending(user)) 2522 invoke_rcu_core(); 2523 2524 trace_rcu_utilization(TPS("End scheduler-tick")); 2525 } 2526 2527 /* 2528 * Scan the leaf rcu_node structures. For each structure on which all 2529 * CPUs have reported a quiescent state and on which there are tasks 2530 * blocking the current grace period, initiate RCU priority boosting. 2531 * Otherwise, invoke the specified function to check dyntick state for 2532 * each CPU that has not yet reported a quiescent state. 2533 */ 2534 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2535 { 2536 int cpu; 2537 unsigned long flags; 2538 unsigned long mask; 2539 struct rcu_data *rdp; 2540 struct rcu_node *rnp; 2541 2542 rcu_state.cbovld = rcu_state.cbovldnext; 2543 rcu_state.cbovldnext = false; 2544 rcu_for_each_leaf_node(rnp) { 2545 cond_resched_tasks_rcu_qs(); 2546 mask = 0; 2547 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2548 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2549 if (rnp->qsmask == 0) { 2550 if (!IS_ENABLED(CONFIG_PREEMPT_RCU) || 2551 rcu_preempt_blocked_readers_cgp(rnp)) { 2552 /* 2553 * No point in scanning bits because they 2554 * are all zero. But we might need to 2555 * priority-boost blocked readers. 2556 */ 2557 rcu_initiate_boost(rnp, flags); 2558 /* rcu_initiate_boost() releases rnp->lock */ 2559 continue; 2560 } 2561 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2562 continue; 2563 } 2564 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2565 rdp = per_cpu_ptr(&rcu_data, cpu); 2566 if (f(rdp)) { 2567 mask |= rdp->grpmask; 2568 rcu_disable_urgency_upon_qs(rdp); 2569 } 2570 } 2571 if (mask != 0) { 2572 /* Idle/offline CPUs, report (releases rnp->lock). */ 2573 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2574 } else { 2575 /* Nothing to do here, so just drop the lock. */ 2576 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2577 } 2578 } 2579 } 2580 2581 /* 2582 * Force quiescent states on reluctant CPUs, and also detect which 2583 * CPUs are in dyntick-idle mode. 2584 */ 2585 void rcu_force_quiescent_state(void) 2586 { 2587 unsigned long flags; 2588 bool ret; 2589 struct rcu_node *rnp; 2590 struct rcu_node *rnp_old = NULL; 2591 2592 /* Funnel through hierarchy to reduce memory contention. */ 2593 rnp = __this_cpu_read(rcu_data.mynode); 2594 for (; rnp != NULL; rnp = rnp->parent) { 2595 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2596 !raw_spin_trylock(&rnp->fqslock); 2597 if (rnp_old != NULL) 2598 raw_spin_unlock(&rnp_old->fqslock); 2599 if (ret) 2600 return; 2601 rnp_old = rnp; 2602 } 2603 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2604 2605 /* Reached the root of the rcu_node tree, acquire lock. */ 2606 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2607 raw_spin_unlock(&rnp_old->fqslock); 2608 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2609 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2610 return; /* Someone beat us to it. */ 2611 } 2612 WRITE_ONCE(rcu_state.gp_flags, 2613 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); 2614 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2615 rcu_gp_kthread_wake(); 2616 } 2617 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2618 2619 /* Perform RCU core processing work for the current CPU. */ 2620 static __latent_entropy void rcu_core(void) 2621 { 2622 unsigned long flags; 2623 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2624 struct rcu_node *rnp = rdp->mynode; 2625 const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2626 rcu_segcblist_is_offloaded(&rdp->cblist); 2627 2628 if (cpu_is_offline(smp_processor_id())) 2629 return; 2630 trace_rcu_utilization(TPS("Start RCU core")); 2631 WARN_ON_ONCE(!rdp->beenonline); 2632 2633 /* Report any deferred quiescent states if preemption enabled. */ 2634 if (!(preempt_count() & PREEMPT_MASK)) { 2635 rcu_preempt_deferred_qs(current); 2636 } else if (rcu_preempt_need_deferred_qs(current)) { 2637 set_tsk_need_resched(current); 2638 set_preempt_need_resched(); 2639 } 2640 2641 /* Update RCU state based on any recent quiescent states. */ 2642 rcu_check_quiescent_state(rdp); 2643 2644 /* No grace period and unregistered callbacks? */ 2645 if (!rcu_gp_in_progress() && 2646 rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { 2647 local_irq_save(flags); 2648 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2649 rcu_accelerate_cbs_unlocked(rnp, rdp); 2650 local_irq_restore(flags); 2651 } 2652 2653 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2654 2655 /* If there are callbacks ready, invoke them. */ 2656 if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && 2657 likely(READ_ONCE(rcu_scheduler_fully_active))) 2658 rcu_do_batch(rdp); 2659 2660 /* Do any needed deferred wakeups of rcuo kthreads. */ 2661 do_nocb_deferred_wakeup(rdp); 2662 trace_rcu_utilization(TPS("End RCU core")); 2663 } 2664 2665 static void rcu_core_si(struct softirq_action *h) 2666 { 2667 rcu_core(); 2668 } 2669 2670 static void rcu_wake_cond(struct task_struct *t, int status) 2671 { 2672 /* 2673 * If the thread is yielding, only wake it when this 2674 * is invoked from idle 2675 */ 2676 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2677 wake_up_process(t); 2678 } 2679 2680 static void invoke_rcu_core_kthread(void) 2681 { 2682 struct task_struct *t; 2683 unsigned long flags; 2684 2685 local_irq_save(flags); 2686 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2687 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2688 if (t != NULL && t != current) 2689 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2690 local_irq_restore(flags); 2691 } 2692 2693 /* 2694 * Wake up this CPU's rcuc kthread to do RCU core processing. 2695 */ 2696 static void invoke_rcu_core(void) 2697 { 2698 if (!cpu_online(smp_processor_id())) 2699 return; 2700 if (use_softirq) 2701 raise_softirq(RCU_SOFTIRQ); 2702 else 2703 invoke_rcu_core_kthread(); 2704 } 2705 2706 static void rcu_cpu_kthread_park(unsigned int cpu) 2707 { 2708 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2709 } 2710 2711 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2712 { 2713 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2714 } 2715 2716 /* 2717 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2718 * the RCU softirq used in configurations of RCU that do not support RCU 2719 * priority boosting. 2720 */ 2721 static void rcu_cpu_kthread(unsigned int cpu) 2722 { 2723 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2724 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2725 int spincnt; 2726 2727 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2728 for (spincnt = 0; spincnt < 10; spincnt++) { 2729 local_bh_disable(); 2730 *statusp = RCU_KTHREAD_RUNNING; 2731 local_irq_disable(); 2732 work = *workp; 2733 *workp = 0; 2734 local_irq_enable(); 2735 if (work) 2736 rcu_core(); 2737 local_bh_enable(); 2738 if (*workp == 0) { 2739 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2740 *statusp = RCU_KTHREAD_WAITING; 2741 return; 2742 } 2743 } 2744 *statusp = RCU_KTHREAD_YIELDING; 2745 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2746 schedule_timeout_idle(2); 2747 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2748 *statusp = RCU_KTHREAD_WAITING; 2749 } 2750 2751 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2752 .store = &rcu_data.rcu_cpu_kthread_task, 2753 .thread_should_run = rcu_cpu_kthread_should_run, 2754 .thread_fn = rcu_cpu_kthread, 2755 .thread_comm = "rcuc/%u", 2756 .setup = rcu_cpu_kthread_setup, 2757 .park = rcu_cpu_kthread_park, 2758 }; 2759 2760 /* 2761 * Spawn per-CPU RCU core processing kthreads. 2762 */ 2763 static int __init rcu_spawn_core_kthreads(void) 2764 { 2765 int cpu; 2766 2767 for_each_possible_cpu(cpu) 2768 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2769 if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq) 2770 return 0; 2771 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2772 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2773 return 0; 2774 } 2775 early_initcall(rcu_spawn_core_kthreads); 2776 2777 /* 2778 * Handle any core-RCU processing required by a call_rcu() invocation. 2779 */ 2780 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 2781 unsigned long flags) 2782 { 2783 /* 2784 * If called from an extended quiescent state, invoke the RCU 2785 * core in order to force a re-evaluation of RCU's idleness. 2786 */ 2787 if (!rcu_is_watching()) 2788 invoke_rcu_core(); 2789 2790 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2791 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2792 return; 2793 2794 /* 2795 * Force the grace period if too many callbacks or too long waiting. 2796 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 2797 * if some other CPU has recently done so. Also, don't bother 2798 * invoking rcu_force_quiescent_state() if the newly enqueued callback 2799 * is the only one waiting for a grace period to complete. 2800 */ 2801 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 2802 rdp->qlen_last_fqs_check + qhimark)) { 2803 2804 /* Are we ignoring a completed grace period? */ 2805 note_gp_changes(rdp); 2806 2807 /* Start a new grace period if one not already started. */ 2808 if (!rcu_gp_in_progress()) { 2809 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 2810 } else { 2811 /* Give the grace period a kick. */ 2812 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 2813 if (rcu_state.n_force_qs == rdp->n_force_qs_snap && 2814 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 2815 rcu_force_quiescent_state(); 2816 rdp->n_force_qs_snap = rcu_state.n_force_qs; 2817 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2818 } 2819 } 2820 } 2821 2822 /* 2823 * RCU callback function to leak a callback. 2824 */ 2825 static void rcu_leak_callback(struct rcu_head *rhp) 2826 { 2827 } 2828 2829 /* 2830 * Check and if necessary update the leaf rcu_node structure's 2831 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2832 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 2833 * structure's ->lock. 2834 */ 2835 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 2836 { 2837 raw_lockdep_assert_held_rcu_node(rnp); 2838 if (qovld_calc <= 0) 2839 return; // Early boot and wildcard value set. 2840 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 2841 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 2842 else 2843 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 2844 } 2845 2846 /* 2847 * Check and if necessary update the leaf rcu_node structure's 2848 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 2849 * number of queued RCU callbacks. No locks need be held, but the 2850 * caller must have disabled interrupts. 2851 * 2852 * Note that this function ignores the possibility that there are a lot 2853 * of callbacks all of which have already seen the end of their respective 2854 * grace periods. This omission is due to the need for no-CBs CPUs to 2855 * be holding ->nocb_lock to do this check, which is too heavy for a 2856 * common-case operation. 2857 */ 2858 static void check_cb_ovld(struct rcu_data *rdp) 2859 { 2860 struct rcu_node *const rnp = rdp->mynode; 2861 2862 if (qovld_calc <= 0 || 2863 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 2864 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 2865 return; // Early boot wildcard value or already set correctly. 2866 raw_spin_lock_rcu_node(rnp); 2867 check_cb_ovld_locked(rdp, rnp); 2868 raw_spin_unlock_rcu_node(rnp); 2869 } 2870 2871 /* Helper function for call_rcu() and friends. */ 2872 static void 2873 __call_rcu(struct rcu_head *head, rcu_callback_t func) 2874 { 2875 unsigned long flags; 2876 struct rcu_data *rdp; 2877 bool was_alldone; 2878 2879 /* Misaligned rcu_head! */ 2880 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 2881 2882 if (debug_rcu_head_queue(head)) { 2883 /* 2884 * Probable double call_rcu(), so leak the callback. 2885 * Use rcu:rcu_callback trace event to find the previous 2886 * time callback was passed to __call_rcu(). 2887 */ 2888 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", 2889 head, head->func); 2890 WRITE_ONCE(head->func, rcu_leak_callback); 2891 return; 2892 } 2893 head->func = func; 2894 head->next = NULL; 2895 local_irq_save(flags); 2896 kasan_record_aux_stack(head); 2897 rdp = this_cpu_ptr(&rcu_data); 2898 2899 /* Add the callback to our list. */ 2900 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 2901 // This can trigger due to call_rcu() from offline CPU: 2902 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 2903 WARN_ON_ONCE(!rcu_is_watching()); 2904 // Very early boot, before rcu_init(). Initialize if needed 2905 // and then drop through to queue the callback. 2906 if (rcu_segcblist_empty(&rdp->cblist)) 2907 rcu_segcblist_init(&rdp->cblist); 2908 } 2909 2910 check_cb_ovld(rdp); 2911 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) 2912 return; // Enqueued onto ->nocb_bypass, so just leave. 2913 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. 2914 rcu_segcblist_enqueue(&rdp->cblist, head); 2915 if (__is_kvfree_rcu_offset((unsigned long)func)) 2916 trace_rcu_kvfree_callback(rcu_state.name, head, 2917 (unsigned long)func, 2918 rcu_segcblist_n_cbs(&rdp->cblist)); 2919 else 2920 trace_rcu_callback(rcu_state.name, head, 2921 rcu_segcblist_n_cbs(&rdp->cblist)); 2922 2923 /* Go handle any RCU core processing required. */ 2924 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2925 unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { 2926 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ 2927 } else { 2928 __call_rcu_core(rdp, head, flags); 2929 local_irq_restore(flags); 2930 } 2931 } 2932 2933 /** 2934 * call_rcu() - Queue an RCU callback for invocation after a grace period. 2935 * @head: structure to be used for queueing the RCU updates. 2936 * @func: actual callback function to be invoked after the grace period 2937 * 2938 * The callback function will be invoked some time after a full grace 2939 * period elapses, in other words after all pre-existing RCU read-side 2940 * critical sections have completed. However, the callback function 2941 * might well execute concurrently with RCU read-side critical sections 2942 * that started after call_rcu() was invoked. RCU read-side critical 2943 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and 2944 * may be nested. In addition, regions of code across which interrupts, 2945 * preemption, or softirqs have been disabled also serve as RCU read-side 2946 * critical sections. This includes hardware interrupt handlers, softirq 2947 * handlers, and NMI handlers. 2948 * 2949 * Note that all CPUs must agree that the grace period extended beyond 2950 * all pre-existing RCU read-side critical section. On systems with more 2951 * than one CPU, this means that when "func()" is invoked, each CPU is 2952 * guaranteed to have executed a full memory barrier since the end of its 2953 * last RCU read-side critical section whose beginning preceded the call 2954 * to call_rcu(). It also means that each CPU executing an RCU read-side 2955 * critical section that continues beyond the start of "func()" must have 2956 * executed a memory barrier after the call_rcu() but before the beginning 2957 * of that RCU read-side critical section. Note that these guarantees 2958 * include CPUs that are offline, idle, or executing in user mode, as 2959 * well as CPUs that are executing in the kernel. 2960 * 2961 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 2962 * resulting RCU callback function "func()", then both CPU A and CPU B are 2963 * guaranteed to execute a full memory barrier during the time interval 2964 * between the call to call_rcu() and the invocation of "func()" -- even 2965 * if CPU A and CPU B are the same CPU (but again only if the system has 2966 * more than one CPU). 2967 */ 2968 void call_rcu(struct rcu_head *head, rcu_callback_t func) 2969 { 2970 __call_rcu(head, func); 2971 } 2972 EXPORT_SYMBOL_GPL(call_rcu); 2973 2974 2975 /* Maximum number of jiffies to wait before draining a batch. */ 2976 #define KFREE_DRAIN_JIFFIES (HZ / 50) 2977 #define KFREE_N_BATCHES 2 2978 #define FREE_N_CHANNELS 2 2979 2980 /** 2981 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers 2982 * @nr_records: Number of active pointers in the array 2983 * @next: Next bulk object in the block chain 2984 * @records: Array of the kvfree_rcu() pointers 2985 */ 2986 struct kvfree_rcu_bulk_data { 2987 unsigned long nr_records; 2988 struct kvfree_rcu_bulk_data *next; 2989 void *records[]; 2990 }; 2991 2992 /* 2993 * This macro defines how many entries the "records" array 2994 * will contain. It is based on the fact that the size of 2995 * kvfree_rcu_bulk_data structure becomes exactly one page. 2996 */ 2997 #define KVFREE_BULK_MAX_ENTR \ 2998 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *)) 2999 3000 /** 3001 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests 3002 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period 3003 * @head_free: List of kfree_rcu() objects waiting for a grace period 3004 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period 3005 * @krcp: Pointer to @kfree_rcu_cpu structure 3006 */ 3007 3008 struct kfree_rcu_cpu_work { 3009 struct rcu_work rcu_work; 3010 struct rcu_head *head_free; 3011 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS]; 3012 struct kfree_rcu_cpu *krcp; 3013 }; 3014 3015 /** 3016 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period 3017 * @head: List of kfree_rcu() objects not yet waiting for a grace period 3018 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period 3019 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period 3020 * @lock: Synchronize access to this structure 3021 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES 3022 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending 3023 * @initialized: The @rcu_work fields have been initialized 3024 * @count: Number of objects for which GP not started 3025 * 3026 * This is a per-CPU structure. The reason that it is not included in 3027 * the rcu_data structure is to permit this code to be extracted from 3028 * the RCU files. Such extraction could allow further optimization of 3029 * the interactions with the slab allocators. 3030 */ 3031 struct kfree_rcu_cpu { 3032 struct rcu_head *head; 3033 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS]; 3034 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; 3035 raw_spinlock_t lock; 3036 struct delayed_work monitor_work; 3037 bool monitor_todo; 3038 bool initialized; 3039 int count; 3040 3041 /* 3042 * A simple cache list that contains objects for 3043 * reuse purpose. In order to save some per-cpu 3044 * space the list is singular. Even though it is 3045 * lockless an access has to be protected by the 3046 * per-cpu lock. 3047 */ 3048 struct llist_head bkvcache; 3049 int nr_bkv_objs; 3050 }; 3051 3052 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = { 3053 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock), 3054 }; 3055 3056 static __always_inline void 3057 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead) 3058 { 3059 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3060 int i; 3061 3062 for (i = 0; i < bhead->nr_records; i++) 3063 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i])); 3064 #endif 3065 } 3066 3067 static inline struct kfree_rcu_cpu * 3068 krc_this_cpu_lock(unsigned long *flags) 3069 { 3070 struct kfree_rcu_cpu *krcp; 3071 3072 local_irq_save(*flags); // For safely calling this_cpu_ptr(). 3073 krcp = this_cpu_ptr(&krc); 3074 raw_spin_lock(&krcp->lock); 3075 3076 return krcp; 3077 } 3078 3079 static inline void 3080 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) 3081 { 3082 raw_spin_unlock(&krcp->lock); 3083 local_irq_restore(flags); 3084 } 3085 3086 static inline struct kvfree_rcu_bulk_data * 3087 get_cached_bnode(struct kfree_rcu_cpu *krcp) 3088 { 3089 if (!krcp->nr_bkv_objs) 3090 return NULL; 3091 3092 krcp->nr_bkv_objs--; 3093 return (struct kvfree_rcu_bulk_data *) 3094 llist_del_first(&krcp->bkvcache); 3095 } 3096 3097 static inline bool 3098 put_cached_bnode(struct kfree_rcu_cpu *krcp, 3099 struct kvfree_rcu_bulk_data *bnode) 3100 { 3101 // Check the limit. 3102 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) 3103 return false; 3104 3105 llist_add((struct llist_node *) bnode, &krcp->bkvcache); 3106 krcp->nr_bkv_objs++; 3107 return true; 3108 3109 } 3110 3111 /* 3112 * This function is invoked in workqueue context after a grace period. 3113 * It frees all the objects queued on ->bhead_free or ->head_free. 3114 */ 3115 static void kfree_rcu_work(struct work_struct *work) 3116 { 3117 unsigned long flags; 3118 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext; 3119 struct rcu_head *head, *next; 3120 struct kfree_rcu_cpu *krcp; 3121 struct kfree_rcu_cpu_work *krwp; 3122 int i, j; 3123 3124 krwp = container_of(to_rcu_work(work), 3125 struct kfree_rcu_cpu_work, rcu_work); 3126 krcp = krwp->krcp; 3127 3128 raw_spin_lock_irqsave(&krcp->lock, flags); 3129 // Channels 1 and 2. 3130 for (i = 0; i < FREE_N_CHANNELS; i++) { 3131 bkvhead[i] = krwp->bkvhead_free[i]; 3132 krwp->bkvhead_free[i] = NULL; 3133 } 3134 3135 // Channel 3. 3136 head = krwp->head_free; 3137 krwp->head_free = NULL; 3138 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3139 3140 // Handle two first channels. 3141 for (i = 0; i < FREE_N_CHANNELS; i++) { 3142 for (; bkvhead[i]; bkvhead[i] = bnext) { 3143 bnext = bkvhead[i]->next; 3144 debug_rcu_bhead_unqueue(bkvhead[i]); 3145 3146 rcu_lock_acquire(&rcu_callback_map); 3147 if (i == 0) { // kmalloc() / kfree(). 3148 trace_rcu_invoke_kfree_bulk_callback( 3149 rcu_state.name, bkvhead[i]->nr_records, 3150 bkvhead[i]->records); 3151 3152 kfree_bulk(bkvhead[i]->nr_records, 3153 bkvhead[i]->records); 3154 } else { // vmalloc() / vfree(). 3155 for (j = 0; j < bkvhead[i]->nr_records; j++) { 3156 trace_rcu_invoke_kvfree_callback( 3157 rcu_state.name, 3158 bkvhead[i]->records[j], 0); 3159 3160 vfree(bkvhead[i]->records[j]); 3161 } 3162 } 3163 rcu_lock_release(&rcu_callback_map); 3164 3165 krcp = krc_this_cpu_lock(&flags); 3166 if (put_cached_bnode(krcp, bkvhead[i])) 3167 bkvhead[i] = NULL; 3168 krc_this_cpu_unlock(krcp, flags); 3169 3170 if (bkvhead[i]) 3171 free_page((unsigned long) bkvhead[i]); 3172 3173 cond_resched_tasks_rcu_qs(); 3174 } 3175 } 3176 3177 /* 3178 * Emergency case only. It can happen under low memory 3179 * condition when an allocation gets failed, so the "bulk" 3180 * path can not be temporary maintained. 3181 */ 3182 for (; head; head = next) { 3183 unsigned long offset = (unsigned long)head->func; 3184 void *ptr = (void *)head - offset; 3185 3186 next = head->next; 3187 debug_rcu_head_unqueue((struct rcu_head *)ptr); 3188 rcu_lock_acquire(&rcu_callback_map); 3189 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); 3190 3191 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) 3192 kvfree(ptr); 3193 3194 rcu_lock_release(&rcu_callback_map); 3195 cond_resched_tasks_rcu_qs(); 3196 } 3197 } 3198 3199 /* 3200 * Schedule the kfree batch RCU work to run in workqueue context after a GP. 3201 * 3202 * This function is invoked by kfree_rcu_monitor() when the KFREE_DRAIN_JIFFIES 3203 * timeout has been reached. 3204 */ 3205 static inline bool queue_kfree_rcu_work(struct kfree_rcu_cpu *krcp) 3206 { 3207 struct kfree_rcu_cpu_work *krwp; 3208 bool repeat = false; 3209 int i, j; 3210 3211 lockdep_assert_held(&krcp->lock); 3212 3213 for (i = 0; i < KFREE_N_BATCHES; i++) { 3214 krwp = &(krcp->krw_arr[i]); 3215 3216 /* 3217 * Try to detach bkvhead or head and attach it over any 3218 * available corresponding free channel. It can be that 3219 * a previous RCU batch is in progress, it means that 3220 * immediately to queue another one is not possible so 3221 * return false to tell caller to retry. 3222 */ 3223 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) || 3224 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) || 3225 (krcp->head && !krwp->head_free)) { 3226 // Channel 1 corresponds to SLAB ptrs. 3227 // Channel 2 corresponds to vmalloc ptrs. 3228 for (j = 0; j < FREE_N_CHANNELS; j++) { 3229 if (!krwp->bkvhead_free[j]) { 3230 krwp->bkvhead_free[j] = krcp->bkvhead[j]; 3231 krcp->bkvhead[j] = NULL; 3232 } 3233 } 3234 3235 // Channel 3 corresponds to emergency path. 3236 if (!krwp->head_free) { 3237 krwp->head_free = krcp->head; 3238 krcp->head = NULL; 3239 } 3240 3241 WRITE_ONCE(krcp->count, 0); 3242 3243 /* 3244 * One work is per one batch, so there are three 3245 * "free channels", the batch can handle. It can 3246 * be that the work is in the pending state when 3247 * channels have been detached following by each 3248 * other. 3249 */ 3250 queue_rcu_work(system_wq, &krwp->rcu_work); 3251 } 3252 3253 // Repeat if any "free" corresponding channel is still busy. 3254 if (krcp->bkvhead[0] || krcp->bkvhead[1] || krcp->head) 3255 repeat = true; 3256 } 3257 3258 return !repeat; 3259 } 3260 3261 static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp, 3262 unsigned long flags) 3263 { 3264 // Attempt to start a new batch. 3265 krcp->monitor_todo = false; 3266 if (queue_kfree_rcu_work(krcp)) { 3267 // Success! Our job is done here. 3268 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3269 return; 3270 } 3271 3272 // Previous RCU batch still in progress, try again later. 3273 krcp->monitor_todo = true; 3274 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3275 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3276 } 3277 3278 /* 3279 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. 3280 * It invokes kfree_rcu_drain_unlock() to attempt to start another batch. 3281 */ 3282 static void kfree_rcu_monitor(struct work_struct *work) 3283 { 3284 unsigned long flags; 3285 struct kfree_rcu_cpu *krcp = container_of(work, struct kfree_rcu_cpu, 3286 monitor_work.work); 3287 3288 raw_spin_lock_irqsave(&krcp->lock, flags); 3289 if (krcp->monitor_todo) 3290 kfree_rcu_drain_unlock(krcp, flags); 3291 else 3292 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3293 } 3294 3295 static inline bool 3296 kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) 3297 { 3298 struct kvfree_rcu_bulk_data *bnode; 3299 int idx; 3300 3301 if (unlikely(!krcp->initialized)) 3302 return false; 3303 3304 lockdep_assert_held(&krcp->lock); 3305 idx = !!is_vmalloc_addr(ptr); 3306 3307 /* Check if a new block is required. */ 3308 if (!krcp->bkvhead[idx] || 3309 krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) { 3310 bnode = get_cached_bnode(krcp); 3311 if (!bnode) { 3312 /* 3313 * To keep this path working on raw non-preemptible 3314 * sections, prevent the optional entry into the 3315 * allocator as it uses sleeping locks. In fact, even 3316 * if the caller of kfree_rcu() is preemptible, this 3317 * path still is not, as krcp->lock is a raw spinlock. 3318 * With additional page pre-allocation in the works, 3319 * hitting this return is going to be much less likely. 3320 */ 3321 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 3322 return false; 3323 3324 /* 3325 * NOTE: For one argument of kvfree_rcu() we can 3326 * drop the lock and get the page in sleepable 3327 * context. That would allow to maintain an array 3328 * for the CONFIG_PREEMPT_RT as well if no cached 3329 * pages are available. 3330 */ 3331 bnode = (struct kvfree_rcu_bulk_data *) 3332 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 3333 } 3334 3335 /* Switch to emergency path. */ 3336 if (unlikely(!bnode)) 3337 return false; 3338 3339 /* Initialize the new block. */ 3340 bnode->nr_records = 0; 3341 bnode->next = krcp->bkvhead[idx]; 3342 3343 /* Attach it to the head. */ 3344 krcp->bkvhead[idx] = bnode; 3345 } 3346 3347 /* Finally insert. */ 3348 krcp->bkvhead[idx]->records 3349 [krcp->bkvhead[idx]->nr_records++] = ptr; 3350 3351 return true; 3352 } 3353 3354 /* 3355 * Queue a request for lazy invocation of appropriate free routine after a 3356 * grace period. Please note there are three paths are maintained, two are the 3357 * main ones that use array of pointers interface and third one is emergency 3358 * one, that is used only when the main path can not be maintained temporary, 3359 * due to memory pressure. 3360 * 3361 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained 3362 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will 3363 * be free'd in workqueue context. This allows us to: batch requests together to 3364 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. 3365 */ 3366 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 3367 { 3368 unsigned long flags; 3369 struct kfree_rcu_cpu *krcp; 3370 bool success; 3371 void *ptr; 3372 3373 if (head) { 3374 ptr = (void *) head - (unsigned long) func; 3375 } else { 3376 /* 3377 * Please note there is a limitation for the head-less 3378 * variant, that is why there is a clear rule for such 3379 * objects: it can be used from might_sleep() context 3380 * only. For other places please embed an rcu_head to 3381 * your data. 3382 */ 3383 might_sleep(); 3384 ptr = (unsigned long *) func; 3385 } 3386 3387 krcp = krc_this_cpu_lock(&flags); 3388 3389 // Queue the object but don't yet schedule the batch. 3390 if (debug_rcu_head_queue(ptr)) { 3391 // Probable double kfree_rcu(), just leak. 3392 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", 3393 __func__, head); 3394 3395 // Mark as success and leave. 3396 success = true; 3397 goto unlock_return; 3398 } 3399 3400 /* 3401 * Under high memory pressure GFP_NOWAIT can fail, 3402 * in that case the emergency path is maintained. 3403 */ 3404 success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); 3405 if (!success) { 3406 if (head == NULL) 3407 // Inline if kvfree_rcu(one_arg) call. 3408 goto unlock_return; 3409 3410 head->func = func; 3411 head->next = krcp->head; 3412 krcp->head = head; 3413 success = true; 3414 } 3415 3416 WRITE_ONCE(krcp->count, krcp->count + 1); 3417 3418 // Set timer to drain after KFREE_DRAIN_JIFFIES. 3419 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3420 !krcp->monitor_todo) { 3421 krcp->monitor_todo = true; 3422 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); 3423 } 3424 3425 unlock_return: 3426 krc_this_cpu_unlock(krcp, flags); 3427 3428 /* 3429 * Inline kvfree() after synchronize_rcu(). We can do 3430 * it from might_sleep() context only, so the current 3431 * CPU can pass the QS state. 3432 */ 3433 if (!success) { 3434 debug_rcu_head_unqueue((struct rcu_head *) ptr); 3435 synchronize_rcu(); 3436 kvfree(ptr); 3437 } 3438 } 3439 EXPORT_SYMBOL_GPL(kvfree_call_rcu); 3440 3441 static unsigned long 3442 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 3443 { 3444 int cpu; 3445 unsigned long count = 0; 3446 3447 /* Snapshot count of all CPUs */ 3448 for_each_online_cpu(cpu) { 3449 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3450 3451 count += READ_ONCE(krcp->count); 3452 } 3453 3454 return count; 3455 } 3456 3457 static unsigned long 3458 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 3459 { 3460 int cpu, freed = 0; 3461 unsigned long flags; 3462 3463 for_each_online_cpu(cpu) { 3464 int count; 3465 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3466 3467 count = krcp->count; 3468 raw_spin_lock_irqsave(&krcp->lock, flags); 3469 if (krcp->monitor_todo) 3470 kfree_rcu_drain_unlock(krcp, flags); 3471 else 3472 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3473 3474 sc->nr_to_scan -= count; 3475 freed += count; 3476 3477 if (sc->nr_to_scan <= 0) 3478 break; 3479 } 3480 3481 return freed == 0 ? SHRINK_STOP : freed; 3482 } 3483 3484 static struct shrinker kfree_rcu_shrinker = { 3485 .count_objects = kfree_rcu_shrink_count, 3486 .scan_objects = kfree_rcu_shrink_scan, 3487 .batch = 0, 3488 .seeks = DEFAULT_SEEKS, 3489 }; 3490 3491 void __init kfree_rcu_scheduler_running(void) 3492 { 3493 int cpu; 3494 unsigned long flags; 3495 3496 for_each_online_cpu(cpu) { 3497 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3498 3499 raw_spin_lock_irqsave(&krcp->lock, flags); 3500 if (!krcp->head || krcp->monitor_todo) { 3501 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3502 continue; 3503 } 3504 krcp->monitor_todo = true; 3505 schedule_delayed_work_on(cpu, &krcp->monitor_work, 3506 KFREE_DRAIN_JIFFIES); 3507 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3508 } 3509 } 3510 3511 /* 3512 * During early boot, any blocking grace-period wait automatically 3513 * implies a grace period. Later on, this is never the case for PREEMPTION. 3514 * 3515 * Howevr, because a context switch is a grace period for !PREEMPTION, any 3516 * blocking grace-period wait automatically implies a grace period if 3517 * there is only one CPU online at any point time during execution of 3518 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to 3519 * occasionally incorrectly indicate that there are multiple CPUs online 3520 * when there was in fact only one the whole time, as this just adds some 3521 * overhead: RCU still operates correctly. 3522 */ 3523 static int rcu_blocking_is_gp(void) 3524 { 3525 int ret; 3526 3527 if (IS_ENABLED(CONFIG_PREEMPTION)) 3528 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE; 3529 might_sleep(); /* Check for RCU read-side critical section. */ 3530 preempt_disable(); 3531 ret = num_online_cpus() <= 1; 3532 preempt_enable(); 3533 return ret; 3534 } 3535 3536 /** 3537 * synchronize_rcu - wait until a grace period has elapsed. 3538 * 3539 * Control will return to the caller some time after a full grace 3540 * period has elapsed, in other words after all currently executing RCU 3541 * read-side critical sections have completed. Note, however, that 3542 * upon return from synchronize_rcu(), the caller might well be executing 3543 * concurrently with new RCU read-side critical sections that began while 3544 * synchronize_rcu() was waiting. RCU read-side critical sections are 3545 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested. 3546 * In addition, regions of code across which interrupts, preemption, or 3547 * softirqs have been disabled also serve as RCU read-side critical 3548 * sections. This includes hardware interrupt handlers, softirq handlers, 3549 * and NMI handlers. 3550 * 3551 * Note that this guarantee implies further memory-ordering guarantees. 3552 * On systems with more than one CPU, when synchronize_rcu() returns, 3553 * each CPU is guaranteed to have executed a full memory barrier since 3554 * the end of its last RCU read-side critical section whose beginning 3555 * preceded the call to synchronize_rcu(). In addition, each CPU having 3556 * an RCU read-side critical section that extends beyond the return from 3557 * synchronize_rcu() is guaranteed to have executed a full memory barrier 3558 * after the beginning of synchronize_rcu() and before the beginning of 3559 * that RCU read-side critical section. Note that these guarantees include 3560 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 3561 * that are executing in the kernel. 3562 * 3563 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 3564 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 3565 * to have executed a full memory barrier during the execution of 3566 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 3567 * again only if the system has more than one CPU). 3568 */ 3569 void synchronize_rcu(void) 3570 { 3571 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 3572 lock_is_held(&rcu_lock_map) || 3573 lock_is_held(&rcu_sched_lock_map), 3574 "Illegal synchronize_rcu() in RCU read-side critical section"); 3575 if (rcu_blocking_is_gp()) 3576 return; 3577 if (rcu_gp_is_expedited()) 3578 synchronize_rcu_expedited(); 3579 else 3580 wait_rcu_gp(call_rcu); 3581 } 3582 EXPORT_SYMBOL_GPL(synchronize_rcu); 3583 3584 /** 3585 * get_state_synchronize_rcu - Snapshot current RCU state 3586 * 3587 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 3588 * to determine whether or not a full grace period has elapsed in the 3589 * meantime. 3590 */ 3591 unsigned long get_state_synchronize_rcu(void) 3592 { 3593 /* 3594 * Any prior manipulation of RCU-protected data must happen 3595 * before the load from ->gp_seq. 3596 */ 3597 smp_mb(); /* ^^^ */ 3598 return rcu_seq_snap(&rcu_state.gp_seq); 3599 } 3600 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3601 3602 /** 3603 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 3604 * 3605 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 3606 * 3607 * If a full RCU grace period has elapsed since the earlier call to 3608 * get_state_synchronize_rcu(), just return. Otherwise, invoke 3609 * synchronize_rcu() to wait for a full grace period. 3610 * 3611 * Yes, this function does not take counter wrap into account. But 3612 * counter wrap is harmless. If the counter wraps, we have waited for 3613 * more than 2 billion grace periods (and way more on a 64-bit system!), 3614 * so waiting for one additional grace period should be just fine. 3615 */ 3616 void cond_synchronize_rcu(unsigned long oldstate) 3617 { 3618 if (!rcu_seq_done(&rcu_state.gp_seq, oldstate)) 3619 synchronize_rcu(); 3620 else 3621 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 3622 } 3623 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3624 3625 /* 3626 * Check to see if there is any immediate RCU-related work to be done by 3627 * the current CPU, returning 1 if so and zero otherwise. The checks are 3628 * in order of increasing expense: checks that can be carried out against 3629 * CPU-local state are performed first. However, we must check for CPU 3630 * stalls first, else we might not get a chance. 3631 */ 3632 static int rcu_pending(int user) 3633 { 3634 bool gp_in_progress; 3635 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 3636 struct rcu_node *rnp = rdp->mynode; 3637 3638 /* Check for CPU stalls, if enabled. */ 3639 check_cpu_stall(rdp); 3640 3641 /* Does this CPU need a deferred NOCB wakeup? */ 3642 if (rcu_nocb_need_deferred_wakeup(rdp)) 3643 return 1; 3644 3645 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 3646 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) 3647 return 0; 3648 3649 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3650 gp_in_progress = rcu_gp_in_progress(); 3651 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 3652 return 1; 3653 3654 /* Does this CPU have callbacks ready to invoke? */ 3655 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 3656 return 1; 3657 3658 /* Has RCU gone idle with this CPU needing another grace period? */ 3659 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 3660 (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) || 3661 !rcu_segcblist_is_offloaded(&rdp->cblist)) && 3662 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3663 return 1; 3664 3665 /* Have RCU grace period completed or started? */ 3666 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 3667 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3668 return 1; 3669 3670 /* nothing to do */ 3671 return 0; 3672 } 3673 3674 /* 3675 * Helper function for rcu_barrier() tracing. If tracing is disabled, 3676 * the compiler is expected to optimize this away. 3677 */ 3678 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 3679 { 3680 trace_rcu_barrier(rcu_state.name, s, cpu, 3681 atomic_read(&rcu_state.barrier_cpu_count), done); 3682 } 3683 3684 /* 3685 * RCU callback function for rcu_barrier(). If we are last, wake 3686 * up the task executing rcu_barrier(). 3687 * 3688 * Note that the value of rcu_state.barrier_sequence must be captured 3689 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 3690 * other CPUs might count the value down to zero before this CPU gets 3691 * around to invoking rcu_barrier_trace(), which might result in bogus 3692 * data from the next instance of rcu_barrier(). 3693 */ 3694 static void rcu_barrier_callback(struct rcu_head *rhp) 3695 { 3696 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 3697 3698 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 3699 rcu_barrier_trace(TPS("LastCB"), -1, s); 3700 complete(&rcu_state.barrier_completion); 3701 } else { 3702 rcu_barrier_trace(TPS("CB"), -1, s); 3703 } 3704 } 3705 3706 /* 3707 * Called with preemption disabled, and from cross-cpu IRQ context. 3708 */ 3709 static void rcu_barrier_func(void *cpu_in) 3710 { 3711 uintptr_t cpu = (uintptr_t)cpu_in; 3712 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3713 3714 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 3715 rdp->barrier_head.func = rcu_barrier_callback; 3716 debug_rcu_head_queue(&rdp->barrier_head); 3717 rcu_nocb_lock(rdp); 3718 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); 3719 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 3720 atomic_inc(&rcu_state.barrier_cpu_count); 3721 } else { 3722 debug_rcu_head_unqueue(&rdp->barrier_head); 3723 rcu_barrier_trace(TPS("IRQNQ"), -1, 3724 rcu_state.barrier_sequence); 3725 } 3726 rcu_nocb_unlock(rdp); 3727 } 3728 3729 /** 3730 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 3731 * 3732 * Note that this primitive does not necessarily wait for an RCU grace period 3733 * to complete. For example, if there are no RCU callbacks queued anywhere 3734 * in the system, then rcu_barrier() is within its rights to return 3735 * immediately, without waiting for anything, much less an RCU grace period. 3736 */ 3737 void rcu_barrier(void) 3738 { 3739 uintptr_t cpu; 3740 struct rcu_data *rdp; 3741 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 3742 3743 rcu_barrier_trace(TPS("Begin"), -1, s); 3744 3745 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3746 mutex_lock(&rcu_state.barrier_mutex); 3747 3748 /* Did someone else do our work for us? */ 3749 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 3750 rcu_barrier_trace(TPS("EarlyExit"), -1, 3751 rcu_state.barrier_sequence); 3752 smp_mb(); /* caller's subsequent code after above check. */ 3753 mutex_unlock(&rcu_state.barrier_mutex); 3754 return; 3755 } 3756 3757 /* Mark the start of the barrier operation. */ 3758 rcu_seq_start(&rcu_state.barrier_sequence); 3759 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 3760 3761 /* 3762 * Initialize the count to two rather than to zero in order 3763 * to avoid a too-soon return to zero in case of an immediate 3764 * invocation of the just-enqueued callback (or preemption of 3765 * this task). Exclude CPU-hotplug operations to ensure that no 3766 * offline non-offloaded CPU has callbacks queued. 3767 */ 3768 init_completion(&rcu_state.barrier_completion); 3769 atomic_set(&rcu_state.barrier_cpu_count, 2); 3770 get_online_cpus(); 3771 3772 /* 3773 * Force each CPU with callbacks to register a new callback. 3774 * When that callback is invoked, we will know that all of the 3775 * corresponding CPU's preceding callbacks have been invoked. 3776 */ 3777 for_each_possible_cpu(cpu) { 3778 rdp = per_cpu_ptr(&rcu_data, cpu); 3779 if (cpu_is_offline(cpu) && 3780 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3781 continue; 3782 if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) { 3783 rcu_barrier_trace(TPS("OnlineQ"), cpu, 3784 rcu_state.barrier_sequence); 3785 smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1); 3786 } else if (rcu_segcblist_n_cbs(&rdp->cblist) && 3787 cpu_is_offline(cpu)) { 3788 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, 3789 rcu_state.barrier_sequence); 3790 local_irq_disable(); 3791 rcu_barrier_func((void *)cpu); 3792 local_irq_enable(); 3793 } else if (cpu_is_offline(cpu)) { 3794 rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu, 3795 rcu_state.barrier_sequence); 3796 } else { 3797 rcu_barrier_trace(TPS("OnlineNQ"), cpu, 3798 rcu_state.barrier_sequence); 3799 } 3800 } 3801 put_online_cpus(); 3802 3803 /* 3804 * Now that we have an rcu_barrier_callback() callback on each 3805 * CPU, and thus each counted, remove the initial count. 3806 */ 3807 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 3808 complete(&rcu_state.barrier_completion); 3809 3810 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3811 wait_for_completion(&rcu_state.barrier_completion); 3812 3813 /* Mark the end of the barrier operation. */ 3814 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 3815 rcu_seq_end(&rcu_state.barrier_sequence); 3816 3817 /* Other rcu_barrier() invocations can now safely proceed. */ 3818 mutex_unlock(&rcu_state.barrier_mutex); 3819 } 3820 EXPORT_SYMBOL_GPL(rcu_barrier); 3821 3822 /* 3823 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 3824 * first CPU in a given leaf rcu_node structure coming online. The caller 3825 * must hold the corresponding leaf rcu_node ->lock with interrrupts 3826 * disabled. 3827 */ 3828 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3829 { 3830 long mask; 3831 long oldmask; 3832 struct rcu_node *rnp = rnp_leaf; 3833 3834 raw_lockdep_assert_held_rcu_node(rnp_leaf); 3835 WARN_ON_ONCE(rnp->wait_blkd_tasks); 3836 for (;;) { 3837 mask = rnp->grpmask; 3838 rnp = rnp->parent; 3839 if (rnp == NULL) 3840 return; 3841 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3842 oldmask = rnp->qsmaskinit; 3843 rnp->qsmaskinit |= mask; 3844 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3845 if (oldmask) 3846 return; 3847 } 3848 } 3849 3850 /* 3851 * Do boot-time initialization of a CPU's per-CPU RCU data. 3852 */ 3853 static void __init 3854 rcu_boot_init_percpu_data(int cpu) 3855 { 3856 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3857 3858 /* Set up local state, ensuring consistent view of global state. */ 3859 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 3860 WARN_ON_ONCE(rdp->dynticks_nesting != 1); 3861 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); 3862 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 3863 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; 3864 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 3865 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; 3866 rdp->cpu = cpu; 3867 rcu_boot_init_nocb_percpu_data(rdp); 3868 } 3869 3870 /* 3871 * Invoked early in the CPU-online process, when pretty much all services 3872 * are available. The incoming CPU is not present. 3873 * 3874 * Initializes a CPU's per-CPU RCU data. Note that only one online or 3875 * offline event can be happening at a given time. Note also that we can 3876 * accept some slop in the rsp->gp_seq access due to the fact that this 3877 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 3878 * And any offloaded callbacks are being numbered elsewhere. 3879 */ 3880 int rcutree_prepare_cpu(unsigned int cpu) 3881 { 3882 unsigned long flags; 3883 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3884 struct rcu_node *rnp = rcu_get_root(); 3885 3886 /* Set up local state, ensuring consistent view of global state. */ 3887 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3888 rdp->qlen_last_fqs_check = 0; 3889 rdp->n_force_qs_snap = rcu_state.n_force_qs; 3890 rdp->blimit = blimit; 3891 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ 3892 !rcu_segcblist_is_offloaded(&rdp->cblist)) 3893 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 3894 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ 3895 rcu_dynticks_eqs_online(); 3896 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 3897 3898 /* 3899 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 3900 * propagation up the rcu_node tree will happen at the beginning 3901 * of the next grace period. 3902 */ 3903 rnp = rdp->mynode; 3904 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3905 rdp->beenonline = true; /* We have now been online. */ 3906 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 3907 rdp->gp_seq_needed = rdp->gp_seq; 3908 rdp->cpu_no_qs.b.norm = true; 3909 rdp->core_needs_qs = false; 3910 rdp->rcu_iw_pending = false; 3911 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 3912 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 3913 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3914 rcu_prepare_kthreads(cpu); 3915 rcu_spawn_cpu_nocb_kthread(cpu); 3916 3917 return 0; 3918 } 3919 3920 /* 3921 * Update RCU priority boot kthread affinity for CPU-hotplug changes. 3922 */ 3923 static void rcutree_affinity_setting(unsigned int cpu, int outgoing) 3924 { 3925 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 3926 3927 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); 3928 } 3929 3930 /* 3931 * Near the end of the CPU-online process. Pretty much all services 3932 * enabled, and the CPU is now very much alive. 3933 */ 3934 int rcutree_online_cpu(unsigned int cpu) 3935 { 3936 unsigned long flags; 3937 struct rcu_data *rdp; 3938 struct rcu_node *rnp; 3939 3940 rdp = per_cpu_ptr(&rcu_data, cpu); 3941 rnp = rdp->mynode; 3942 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3943 rnp->ffmask |= rdp->grpmask; 3944 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3945 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 3946 return 0; /* Too early in boot for scheduler work. */ 3947 sync_sched_exp_online_cleanup(cpu); 3948 rcutree_affinity_setting(cpu, -1); 3949 3950 // Stop-machine done, so allow nohz_full to disable tick. 3951 tick_dep_clear(TICK_DEP_BIT_RCU); 3952 return 0; 3953 } 3954 3955 /* 3956 * Near the beginning of the process. The CPU is still very much alive 3957 * with pretty much all services enabled. 3958 */ 3959 int rcutree_offline_cpu(unsigned int cpu) 3960 { 3961 unsigned long flags; 3962 struct rcu_data *rdp; 3963 struct rcu_node *rnp; 3964 3965 rdp = per_cpu_ptr(&rcu_data, cpu); 3966 rnp = rdp->mynode; 3967 raw_spin_lock_irqsave_rcu_node(rnp, flags); 3968 rnp->ffmask &= ~rdp->grpmask; 3969 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3970 3971 rcutree_affinity_setting(cpu, cpu); 3972 3973 // nohz_full CPUs need the tick for stop-machine to work quickly 3974 tick_dep_set(TICK_DEP_BIT_RCU); 3975 return 0; 3976 } 3977 3978 static DEFINE_PER_CPU(int, rcu_cpu_started); 3979 3980 /* 3981 * Mark the specified CPU as being online so that subsequent grace periods 3982 * (both expedited and normal) will wait on it. Note that this means that 3983 * incoming CPUs are not allowed to use RCU read-side critical sections 3984 * until this function is called. Failing to observe this restriction 3985 * will result in lockdep splats. 3986 * 3987 * Note that this function is special in that it is invoked directly 3988 * from the incoming CPU rather than from the cpuhp_step mechanism. 3989 * This is because this function must be invoked at a precise location. 3990 */ 3991 void rcu_cpu_starting(unsigned int cpu) 3992 { 3993 unsigned long flags; 3994 unsigned long mask; 3995 struct rcu_data *rdp; 3996 struct rcu_node *rnp; 3997 bool newcpu; 3998 3999 if (per_cpu(rcu_cpu_started, cpu)) 4000 return; 4001 4002 per_cpu(rcu_cpu_started, cpu) = 1; 4003 4004 rdp = per_cpu_ptr(&rcu_data, cpu); 4005 rnp = rdp->mynode; 4006 mask = rdp->grpmask; 4007 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4008 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 4009 newcpu = !(rnp->expmaskinitnext & mask); 4010 rnp->expmaskinitnext |= mask; 4011 /* Allow lockless access for expedited grace periods. */ 4012 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 4013 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 4014 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 4015 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4016 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4017 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */ 4018 rcu_disable_urgency_upon_qs(rdp); 4019 /* Report QS -after- changing ->qsmaskinitnext! */ 4020 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4021 } else { 4022 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4023 } 4024 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 4025 } 4026 4027 #ifdef CONFIG_HOTPLUG_CPU 4028 /* 4029 * The outgoing function has no further need of RCU, so remove it from 4030 * the rcu_node tree's ->qsmaskinitnext bit masks. 4031 * 4032 * Note that this function is special in that it is invoked directly 4033 * from the outgoing CPU rather than from the cpuhp_step mechanism. 4034 * This is because this function must be invoked at a precise location. 4035 */ 4036 void rcu_report_dead(unsigned int cpu) 4037 { 4038 unsigned long flags; 4039 unsigned long mask; 4040 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4041 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4042 4043 /* QS for any half-done expedited grace period. */ 4044 preempt_disable(); 4045 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); 4046 preempt_enable(); 4047 rcu_preempt_deferred_qs(current); 4048 4049 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4050 mask = rdp->grpmask; 4051 raw_spin_lock(&rcu_state.ofl_lock); 4052 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4053 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 4054 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); 4055 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 4056 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 4057 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 4058 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4059 } 4060 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 4061 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4062 raw_spin_unlock(&rcu_state.ofl_lock); 4063 4064 per_cpu(rcu_cpu_started, cpu) = 0; 4065 } 4066 4067 /* 4068 * The outgoing CPU has just passed through the dying-idle state, and we 4069 * are being invoked from the CPU that was IPIed to continue the offline 4070 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 4071 */ 4072 void rcutree_migrate_callbacks(int cpu) 4073 { 4074 unsigned long flags; 4075 struct rcu_data *my_rdp; 4076 struct rcu_node *my_rnp; 4077 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4078 bool needwake; 4079 4080 if (rcu_segcblist_is_offloaded(&rdp->cblist) || 4081 rcu_segcblist_empty(&rdp->cblist)) 4082 return; /* No callbacks to migrate. */ 4083 4084 local_irq_save(flags); 4085 my_rdp = this_cpu_ptr(&rcu_data); 4086 my_rnp = my_rdp->mynode; 4087 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 4088 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies)); 4089 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 4090 /* Leverage recent GPs and set GP for new callbacks. */ 4091 needwake = rcu_advance_cbs(my_rnp, rdp) || 4092 rcu_advance_cbs(my_rnp, my_rdp); 4093 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 4094 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 4095 rcu_segcblist_disable(&rdp->cblist); 4096 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != 4097 !rcu_segcblist_n_cbs(&my_rdp->cblist)); 4098 if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) { 4099 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 4100 __call_rcu_nocb_wake(my_rdp, true, flags); 4101 } else { 4102 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 4103 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); 4104 } 4105 if (needwake) 4106 rcu_gp_kthread_wake(); 4107 lockdep_assert_irqs_enabled(); 4108 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 4109 !rcu_segcblist_empty(&rdp->cblist), 4110 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 4111 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 4112 rcu_segcblist_first_cb(&rdp->cblist)); 4113 } 4114 #endif 4115 4116 /* 4117 * On non-huge systems, use expedited RCU grace periods to make suspend 4118 * and hibernation run faster. 4119 */ 4120 static int rcu_pm_notify(struct notifier_block *self, 4121 unsigned long action, void *hcpu) 4122 { 4123 switch (action) { 4124 case PM_HIBERNATION_PREPARE: 4125 case PM_SUSPEND_PREPARE: 4126 rcu_expedite_gp(); 4127 break; 4128 case PM_POST_HIBERNATION: 4129 case PM_POST_SUSPEND: 4130 rcu_unexpedite_gp(); 4131 break; 4132 default: 4133 break; 4134 } 4135 return NOTIFY_OK; 4136 } 4137 4138 /* 4139 * Spawn the kthreads that handle RCU's grace periods. 4140 */ 4141 static int __init rcu_spawn_gp_kthread(void) 4142 { 4143 unsigned long flags; 4144 int kthread_prio_in = kthread_prio; 4145 struct rcu_node *rnp; 4146 struct sched_param sp; 4147 struct task_struct *t; 4148 4149 /* Force priority into range. */ 4150 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 4151 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 4152 kthread_prio = 2; 4153 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 4154 kthread_prio = 1; 4155 else if (kthread_prio < 0) 4156 kthread_prio = 0; 4157 else if (kthread_prio > 99) 4158 kthread_prio = 99; 4159 4160 if (kthread_prio != kthread_prio_in) 4161 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 4162 kthread_prio, kthread_prio_in); 4163 4164 rcu_scheduler_fully_active = 1; 4165 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 4166 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 4167 return 0; 4168 if (kthread_prio) { 4169 sp.sched_priority = kthread_prio; 4170 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 4171 } 4172 rnp = rcu_get_root(); 4173 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4174 WRITE_ONCE(rcu_state.gp_activity, jiffies); 4175 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 4176 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 4177 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 4178 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4179 wake_up_process(t); 4180 rcu_spawn_nocb_kthreads(); 4181 rcu_spawn_boost_kthreads(); 4182 return 0; 4183 } 4184 early_initcall(rcu_spawn_gp_kthread); 4185 4186 /* 4187 * This function is invoked towards the end of the scheduler's 4188 * initialization process. Before this is called, the idle task might 4189 * contain synchronous grace-period primitives (during which time, this idle 4190 * task is booting the system, and such primitives are no-ops). After this 4191 * function is called, any synchronous grace-period primitives are run as 4192 * expedited, with the requesting task driving the grace period forward. 4193 * A later core_initcall() rcu_set_runtime_mode() will switch to full 4194 * runtime RCU functionality. 4195 */ 4196 void rcu_scheduler_starting(void) 4197 { 4198 WARN_ON(num_online_cpus() != 1); 4199 WARN_ON(nr_context_switches() > 0); 4200 rcu_test_sync_prims(); 4201 rcu_scheduler_active = RCU_SCHEDULER_INIT; 4202 rcu_test_sync_prims(); 4203 } 4204 4205 /* 4206 * Helper function for rcu_init() that initializes the rcu_state structure. 4207 */ 4208 static void __init rcu_init_one(void) 4209 { 4210 static const char * const buf[] = RCU_NODE_NAME_INIT; 4211 static const char * const fqs[] = RCU_FQS_NAME_INIT; 4212 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 4213 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 4214 4215 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 4216 int cpustride = 1; 4217 int i; 4218 int j; 4219 struct rcu_node *rnp; 4220 4221 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 4222 4223 /* Silence gcc 4.8 false positive about array index out of range. */ 4224 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 4225 panic("rcu_init_one: rcu_num_lvls out of range"); 4226 4227 /* Initialize the level-tracking arrays. */ 4228 4229 for (i = 1; i < rcu_num_lvls; i++) 4230 rcu_state.level[i] = 4231 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 4232 rcu_init_levelspread(levelspread, num_rcu_lvl); 4233 4234 /* Initialize the elements themselves, starting from the leaves. */ 4235 4236 for (i = rcu_num_lvls - 1; i >= 0; i--) { 4237 cpustride *= levelspread[i]; 4238 rnp = rcu_state.level[i]; 4239 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 4240 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 4241 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 4242 &rcu_node_class[i], buf[i]); 4243 raw_spin_lock_init(&rnp->fqslock); 4244 lockdep_set_class_and_name(&rnp->fqslock, 4245 &rcu_fqs_class[i], fqs[i]); 4246 rnp->gp_seq = rcu_state.gp_seq; 4247 rnp->gp_seq_needed = rcu_state.gp_seq; 4248 rnp->completedqs = rcu_state.gp_seq; 4249 rnp->qsmask = 0; 4250 rnp->qsmaskinit = 0; 4251 rnp->grplo = j * cpustride; 4252 rnp->grphi = (j + 1) * cpustride - 1; 4253 if (rnp->grphi >= nr_cpu_ids) 4254 rnp->grphi = nr_cpu_ids - 1; 4255 if (i == 0) { 4256 rnp->grpnum = 0; 4257 rnp->grpmask = 0; 4258 rnp->parent = NULL; 4259 } else { 4260 rnp->grpnum = j % levelspread[i - 1]; 4261 rnp->grpmask = BIT(rnp->grpnum); 4262 rnp->parent = rcu_state.level[i - 1] + 4263 j / levelspread[i - 1]; 4264 } 4265 rnp->level = i; 4266 INIT_LIST_HEAD(&rnp->blkd_tasks); 4267 rcu_init_one_nocb(rnp); 4268 init_waitqueue_head(&rnp->exp_wq[0]); 4269 init_waitqueue_head(&rnp->exp_wq[1]); 4270 init_waitqueue_head(&rnp->exp_wq[2]); 4271 init_waitqueue_head(&rnp->exp_wq[3]); 4272 spin_lock_init(&rnp->exp_lock); 4273 } 4274 } 4275 4276 init_swait_queue_head(&rcu_state.gp_wq); 4277 init_swait_queue_head(&rcu_state.expedited_wq); 4278 rnp = rcu_first_leaf_node(); 4279 for_each_possible_cpu(i) { 4280 while (i > rnp->grphi) 4281 rnp++; 4282 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 4283 rcu_boot_init_percpu_data(i); 4284 } 4285 } 4286 4287 /* 4288 * Compute the rcu_node tree geometry from kernel parameters. This cannot 4289 * replace the definitions in tree.h because those are needed to size 4290 * the ->node array in the rcu_state structure. 4291 */ 4292 static void __init rcu_init_geometry(void) 4293 { 4294 ulong d; 4295 int i; 4296 int rcu_capacity[RCU_NUM_LVLS]; 4297 4298 /* 4299 * Initialize any unspecified boot parameters. 4300 * The default values of jiffies_till_first_fqs and 4301 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 4302 * value, which is a function of HZ, then adding one for each 4303 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 4304 */ 4305 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 4306 if (jiffies_till_first_fqs == ULONG_MAX) 4307 jiffies_till_first_fqs = d; 4308 if (jiffies_till_next_fqs == ULONG_MAX) 4309 jiffies_till_next_fqs = d; 4310 adjust_jiffies_till_sched_qs(); 4311 4312 /* If the compile-time values are accurate, just leave. */ 4313 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4314 nr_cpu_ids == NR_CPUS) 4315 return; 4316 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4317 rcu_fanout_leaf, nr_cpu_ids); 4318 4319 /* 4320 * The boot-time rcu_fanout_leaf parameter must be at least two 4321 * and cannot exceed the number of bits in the rcu_node masks. 4322 * Complain and fall back to the compile-time values if this 4323 * limit is exceeded. 4324 */ 4325 if (rcu_fanout_leaf < 2 || 4326 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 4327 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4328 WARN_ON(1); 4329 return; 4330 } 4331 4332 /* 4333 * Compute number of nodes that can be handled an rcu_node tree 4334 * with the given number of levels. 4335 */ 4336 rcu_capacity[0] = rcu_fanout_leaf; 4337 for (i = 1; i < RCU_NUM_LVLS; i++) 4338 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 4339 4340 /* 4341 * The tree must be able to accommodate the configured number of CPUs. 4342 * If this limit is exceeded, fall back to the compile-time values. 4343 */ 4344 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 4345 rcu_fanout_leaf = RCU_FANOUT_LEAF; 4346 WARN_ON(1); 4347 return; 4348 } 4349 4350 /* Calculate the number of levels in the tree. */ 4351 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 4352 } 4353 rcu_num_lvls = i + 1; 4354 4355 /* Calculate the number of rcu_nodes at each level of the tree. */ 4356 for (i = 0; i < rcu_num_lvls; i++) { 4357 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 4358 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 4359 } 4360 4361 /* Calculate the total number of rcu_node structures. */ 4362 rcu_num_nodes = 0; 4363 for (i = 0; i < rcu_num_lvls; i++) 4364 rcu_num_nodes += num_rcu_lvl[i]; 4365 } 4366 4367 /* 4368 * Dump out the structure of the rcu_node combining tree associated 4369 * with the rcu_state structure. 4370 */ 4371 static void __init rcu_dump_rcu_node_tree(void) 4372 { 4373 int level = 0; 4374 struct rcu_node *rnp; 4375 4376 pr_info("rcu_node tree layout dump\n"); 4377 pr_info(" "); 4378 rcu_for_each_node_breadth_first(rnp) { 4379 if (rnp->level != level) { 4380 pr_cont("\n"); 4381 pr_info(" "); 4382 level = rnp->level; 4383 } 4384 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 4385 } 4386 pr_cont("\n"); 4387 } 4388 4389 struct workqueue_struct *rcu_gp_wq; 4390 struct workqueue_struct *rcu_par_gp_wq; 4391 4392 static void __init kfree_rcu_batch_init(void) 4393 { 4394 int cpu; 4395 int i; 4396 4397 for_each_possible_cpu(cpu) { 4398 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 4399 struct kvfree_rcu_bulk_data *bnode; 4400 4401 for (i = 0; i < KFREE_N_BATCHES; i++) { 4402 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); 4403 krcp->krw_arr[i].krcp = krcp; 4404 } 4405 4406 for (i = 0; i < rcu_min_cached_objs; i++) { 4407 bnode = (struct kvfree_rcu_bulk_data *) 4408 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 4409 4410 if (bnode) 4411 put_cached_bnode(krcp, bnode); 4412 else 4413 pr_err("Failed to preallocate for %d CPU!\n", cpu); 4414 } 4415 4416 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); 4417 krcp->initialized = true; 4418 } 4419 if (register_shrinker(&kfree_rcu_shrinker)) 4420 pr_err("Failed to register kfree_rcu() shrinker!\n"); 4421 } 4422 4423 void __init rcu_init(void) 4424 { 4425 int cpu; 4426 4427 rcu_early_boot_tests(); 4428 4429 kfree_rcu_batch_init(); 4430 rcu_bootup_announce(); 4431 rcu_init_geometry(); 4432 rcu_init_one(); 4433 if (dump_tree) 4434 rcu_dump_rcu_node_tree(); 4435 if (use_softirq) 4436 open_softirq(RCU_SOFTIRQ, rcu_core_si); 4437 4438 /* 4439 * We don't need protection against CPU-hotplug here because 4440 * this is called early in boot, before either interrupts 4441 * or the scheduler are operational. 4442 */ 4443 pm_notifier(rcu_pm_notify, 0); 4444 for_each_online_cpu(cpu) { 4445 rcutree_prepare_cpu(cpu); 4446 rcu_cpu_starting(cpu); 4447 rcutree_online_cpu(cpu); 4448 } 4449 4450 /* Create workqueue for expedited GPs and for Tree SRCU. */ 4451 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 4452 WARN_ON(!rcu_gp_wq); 4453 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); 4454 WARN_ON(!rcu_par_gp_wq); 4455 srcu_init(); 4456 4457 /* Fill in default value for rcutree.qovld boot parameter. */ 4458 /* -After- the rcu_node ->lock fields are initialized! */ 4459 if (qovld < 0) 4460 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 4461 else 4462 qovld_calc = qovld; 4463 } 4464 4465 #include "tree_stall.h" 4466 #include "tree_exp.h" 4467 #include "tree_plugin.h" 4468