1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version 23 * 24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 26 * 27 * For detailed explanation of Read-Copy Update mechanism see - 28 * Documentation/RCU 29 */ 30 #include <linux/types.h> 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/spinlock.h> 34 #include <linux/smp.h> 35 #include <linux/rcupdate.h> 36 #include <linux/interrupt.h> 37 #include <linux/sched.h> 38 #include <linux/nmi.h> 39 #include <linux/atomic.h> 40 #include <linux/bitops.h> 41 #include <linux/export.h> 42 #include <linux/completion.h> 43 #include <linux/moduleparam.h> 44 #include <linux/module.h> 45 #include <linux/percpu.h> 46 #include <linux/notifier.h> 47 #include <linux/cpu.h> 48 #include <linux/mutex.h> 49 #include <linux/time.h> 50 #include <linux/kernel_stat.h> 51 #include <linux/wait.h> 52 #include <linux/kthread.h> 53 #include <linux/prefetch.h> 54 #include <linux/delay.h> 55 #include <linux/stop_machine.h> 56 #include <linux/random.h> 57 #include <linux/ftrace_event.h> 58 #include <linux/suspend.h> 59 60 #include "tree.h" 61 #include "rcu.h" 62 63 MODULE_ALIAS("rcutree"); 64 #ifdef MODULE_PARAM_PREFIX 65 #undef MODULE_PARAM_PREFIX 66 #endif 67 #define MODULE_PARAM_PREFIX "rcutree." 68 69 /* Data structures. */ 70 71 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 72 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 73 74 /* 75 * In order to export the rcu_state name to the tracing tools, it 76 * needs to be added in the __tracepoint_string section. 77 * This requires defining a separate variable tp_<sname>_varname 78 * that points to the string being used, and this will allow 79 * the tracing userspace tools to be able to decipher the string 80 * address to the matching string. 81 */ 82 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ 83 static char sname##_varname[] = #sname; \ 84 static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \ 85 struct rcu_state sname##_state = { \ 86 .level = { &sname##_state.node[0] }, \ 87 .call = cr, \ 88 .fqs_state = RCU_GP_IDLE, \ 89 .gpnum = 0UL - 300UL, \ 90 .completed = 0UL - 300UL, \ 91 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \ 92 .orphan_nxttail = &sname##_state.orphan_nxtlist, \ 93 .orphan_donetail = &sname##_state.orphan_donelist, \ 94 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 95 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \ 96 .name = sname##_varname, \ 97 .abbr = sabbr, \ 98 }; \ 99 DEFINE_PER_CPU(struct rcu_data, sname##_data) 100 101 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 102 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); 103 104 static struct rcu_state *rcu_state_p; 105 LIST_HEAD(rcu_struct_flavors); 106 107 /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ 108 static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; 109 module_param(rcu_fanout_leaf, int, 0444); 110 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 111 static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */ 112 NUM_RCU_LVL_0, 113 NUM_RCU_LVL_1, 114 NUM_RCU_LVL_2, 115 NUM_RCU_LVL_3, 116 NUM_RCU_LVL_4, 117 }; 118 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 119 120 /* 121 * The rcu_scheduler_active variable transitions from zero to one just 122 * before the first task is spawned. So when this variable is zero, RCU 123 * can assume that there is but one task, allowing RCU to (for example) 124 * optimize synchronize_sched() to a simple barrier(). When this variable 125 * is one, RCU must actually do all the hard work required to detect real 126 * grace periods. This variable is also used to suppress boot-time false 127 * positives from lockdep-RCU error checking. 128 */ 129 int rcu_scheduler_active __read_mostly; 130 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 131 132 /* 133 * The rcu_scheduler_fully_active variable transitions from zero to one 134 * during the early_initcall() processing, which is after the scheduler 135 * is capable of creating new tasks. So RCU processing (for example, 136 * creating tasks for RCU priority boosting) must be delayed until after 137 * rcu_scheduler_fully_active transitions from zero to one. We also 138 * currently delay invocation of any RCU callbacks until after this point. 139 * 140 * It might later prove better for people registering RCU callbacks during 141 * early boot to take responsibility for these callbacks, but one step at 142 * a time. 143 */ 144 static int rcu_scheduler_fully_active __read_mostly; 145 146 #ifdef CONFIG_RCU_BOOST 147 148 /* 149 * Control variables for per-CPU and per-rcu_node kthreads. These 150 * handle all flavors of RCU. 151 */ 152 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 153 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 154 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 155 DEFINE_PER_CPU(char, rcu_cpu_has_work); 156 157 #endif /* #ifdef CONFIG_RCU_BOOST */ 158 159 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 160 static void invoke_rcu_core(void); 161 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 162 163 /* 164 * Track the rcutorture test sequence number and the update version 165 * number within a given test. The rcutorture_testseq is incremented 166 * on every rcutorture module load and unload, so has an odd value 167 * when a test is running. The rcutorture_vernum is set to zero 168 * when rcutorture starts and is incremented on each rcutorture update. 169 * These variables enable correlating rcutorture output with the 170 * RCU tracing information. 171 */ 172 unsigned long rcutorture_testseq; 173 unsigned long rcutorture_vernum; 174 175 /* 176 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 177 * permit this function to be invoked without holding the root rcu_node 178 * structure's ->lock, but of course results can be subject to change. 179 */ 180 static int rcu_gp_in_progress(struct rcu_state *rsp) 181 { 182 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); 183 } 184 185 /* 186 * Note a quiescent state. Because we do not need to know 187 * how many quiescent states passed, just if there was at least 188 * one since the start of the grace period, this just sets a flag. 189 * The caller must have disabled preemption. 190 */ 191 void rcu_sched_qs(int cpu) 192 { 193 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); 194 195 if (rdp->passed_quiesce == 0) 196 trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs")); 197 rdp->passed_quiesce = 1; 198 } 199 200 void rcu_bh_qs(int cpu) 201 { 202 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 203 204 if (rdp->passed_quiesce == 0) 205 trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs")); 206 rdp->passed_quiesce = 1; 207 } 208 209 /* 210 * Note a context switch. This is a quiescent state for RCU-sched, 211 * and requires special handling for preemptible RCU. 212 * The caller must have disabled preemption. 213 */ 214 void rcu_note_context_switch(int cpu) 215 { 216 trace_rcu_utilization(TPS("Start context switch")); 217 rcu_sched_qs(cpu); 218 rcu_preempt_note_context_switch(cpu); 219 trace_rcu_utilization(TPS("End context switch")); 220 } 221 EXPORT_SYMBOL_GPL(rcu_note_context_switch); 222 223 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, 225 .dynticks = ATOMIC_INIT(1), 226 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE 227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, 228 .dynticks_idle = ATOMIC_INIT(1), 229 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ 230 }; 231 232 static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 233 static long qhimark = 10000; /* If this many pending, ignore blimit. */ 234 static long qlowmark = 100; /* Once only this many pending, use blimit. */ 235 236 module_param(blimit, long, 0444); 237 module_param(qhimark, long, 0444); 238 module_param(qlowmark, long, 0444); 239 240 static ulong jiffies_till_first_fqs = ULONG_MAX; 241 static ulong jiffies_till_next_fqs = ULONG_MAX; 242 243 module_param(jiffies_till_first_fqs, ulong, 0644); 244 module_param(jiffies_till_next_fqs, ulong, 0644); 245 246 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 247 struct rcu_data *rdp); 248 static void force_qs_rnp(struct rcu_state *rsp, 249 int (*f)(struct rcu_data *rsp, bool *isidle, 250 unsigned long *maxj), 251 bool *isidle, unsigned long *maxj); 252 static void force_quiescent_state(struct rcu_state *rsp); 253 static int rcu_pending(int cpu); 254 255 /* 256 * Return the number of RCU-sched batches processed thus far for debug & stats. 257 */ 258 long rcu_batches_completed_sched(void) 259 { 260 return rcu_sched_state.completed; 261 } 262 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 263 264 /* 265 * Return the number of RCU BH batches processed thus far for debug & stats. 266 */ 267 long rcu_batches_completed_bh(void) 268 { 269 return rcu_bh_state.completed; 270 } 271 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 272 273 /* 274 * Force a quiescent state. 275 */ 276 void rcu_force_quiescent_state(void) 277 { 278 force_quiescent_state(rcu_state_p); 279 } 280 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 281 282 /* 283 * Force a quiescent state for RCU BH. 284 */ 285 void rcu_bh_force_quiescent_state(void) 286 { 287 force_quiescent_state(&rcu_bh_state); 288 } 289 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); 290 291 /* 292 * Show the state of the grace-period kthreads. 293 */ 294 void show_rcu_gp_kthreads(void) 295 { 296 struct rcu_state *rsp; 297 298 for_each_rcu_flavor(rsp) { 299 pr_info("%s: wait state: %d ->state: %#lx\n", 300 rsp->name, rsp->gp_state, rsp->gp_kthread->state); 301 /* sched_show_task(rsp->gp_kthread); */ 302 } 303 } 304 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 305 306 /* 307 * Record the number of times rcutorture tests have been initiated and 308 * terminated. This information allows the debugfs tracing stats to be 309 * correlated to the rcutorture messages, even when the rcutorture module 310 * is being repeatedly loaded and unloaded. In other words, we cannot 311 * store this state in rcutorture itself. 312 */ 313 void rcutorture_record_test_transition(void) 314 { 315 rcutorture_testseq++; 316 rcutorture_vernum = 0; 317 } 318 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); 319 320 /* 321 * Send along grace-period-related data for rcutorture diagnostics. 322 */ 323 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 324 unsigned long *gpnum, unsigned long *completed) 325 { 326 struct rcu_state *rsp = NULL; 327 328 switch (test_type) { 329 case RCU_FLAVOR: 330 rsp = rcu_state_p; 331 break; 332 case RCU_BH_FLAVOR: 333 rsp = &rcu_bh_state; 334 break; 335 case RCU_SCHED_FLAVOR: 336 rsp = &rcu_sched_state; 337 break; 338 default: 339 break; 340 } 341 if (rsp != NULL) { 342 *flags = ACCESS_ONCE(rsp->gp_flags); 343 *gpnum = ACCESS_ONCE(rsp->gpnum); 344 *completed = ACCESS_ONCE(rsp->completed); 345 return; 346 } 347 *flags = 0; 348 *gpnum = 0; 349 *completed = 0; 350 } 351 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 352 353 /* 354 * Record the number of writer passes through the current rcutorture test. 355 * This is also used to correlate debugfs tracing stats with the rcutorture 356 * messages. 357 */ 358 void rcutorture_record_progress(unsigned long vernum) 359 { 360 rcutorture_vernum++; 361 } 362 EXPORT_SYMBOL_GPL(rcutorture_record_progress); 363 364 /* 365 * Force a quiescent state for RCU-sched. 366 */ 367 void rcu_sched_force_quiescent_state(void) 368 { 369 force_quiescent_state(&rcu_sched_state); 370 } 371 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); 372 373 /* 374 * Does the CPU have callbacks ready to be invoked? 375 */ 376 static int 377 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) 378 { 379 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] && 380 rdp->nxttail[RCU_DONE_TAIL] != NULL; 381 } 382 383 /* 384 * Return the root node of the specified rcu_state structure. 385 */ 386 static struct rcu_node *rcu_get_root(struct rcu_state *rsp) 387 { 388 return &rsp->node[0]; 389 } 390 391 /* 392 * Is there any need for future grace periods? 393 * Interrupts must be disabled. If the caller does not hold the root 394 * rnp_node structure's ->lock, the results are advisory only. 395 */ 396 static int rcu_future_needs_gp(struct rcu_state *rsp) 397 { 398 struct rcu_node *rnp = rcu_get_root(rsp); 399 int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1; 400 int *fp = &rnp->need_future_gp[idx]; 401 402 return ACCESS_ONCE(*fp); 403 } 404 405 /* 406 * Does the current CPU require a not-yet-started grace period? 407 * The caller must have disabled interrupts to prevent races with 408 * normal callback registry. 409 */ 410 static int 411 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 412 { 413 int i; 414 415 if (rcu_gp_in_progress(rsp)) 416 return 0; /* No, a grace period is already in progress. */ 417 if (rcu_future_needs_gp(rsp)) 418 return 1; /* Yes, a no-CBs CPU needs one. */ 419 if (!rdp->nxttail[RCU_NEXT_TAIL]) 420 return 0; /* No, this is a no-CBs (or offline) CPU. */ 421 if (*rdp->nxttail[RCU_NEXT_READY_TAIL]) 422 return 1; /* Yes, this CPU has newly registered callbacks. */ 423 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) 424 if (rdp->nxttail[i - 1] != rdp->nxttail[i] && 425 ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), 426 rdp->nxtcompleted[i])) 427 return 1; /* Yes, CBs for future grace period. */ 428 return 0; /* No grace period needed. */ 429 } 430 431 /* 432 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state 433 * 434 * If the new value of the ->dynticks_nesting counter now is zero, 435 * we really have entered idle, and must do the appropriate accounting. 436 * The caller must have disabled interrupts. 437 */ 438 static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 439 bool user) 440 { 441 struct rcu_state *rsp; 442 struct rcu_data *rdp; 443 444 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 445 if (!user && !is_idle_task(current)) { 446 struct task_struct *idle __maybe_unused = 447 idle_task(smp_processor_id()); 448 449 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); 450 ftrace_dump(DUMP_ORIG); 451 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 452 current->pid, current->comm, 453 idle->pid, idle->comm); /* must be idle task! */ 454 } 455 for_each_rcu_flavor(rsp) { 456 rdp = this_cpu_ptr(rsp->rda); 457 do_nocb_deferred_wakeup(rdp); 458 } 459 rcu_prepare_for_idle(smp_processor_id()); 460 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 461 smp_mb__before_atomic(); /* See above. */ 462 atomic_inc(&rdtp->dynticks); 463 smp_mb__after_atomic(); /* Force ordering with next sojourn. */ 464 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 465 466 /* 467 * It is illegal to enter an extended quiescent state while 468 * in an RCU read-side critical section. 469 */ 470 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), 471 "Illegal idle entry in RCU read-side critical section."); 472 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), 473 "Illegal idle entry in RCU-bh read-side critical section."); 474 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), 475 "Illegal idle entry in RCU-sched read-side critical section."); 476 } 477 478 /* 479 * Enter an RCU extended quiescent state, which can be either the 480 * idle loop or adaptive-tickless usermode execution. 481 */ 482 static void rcu_eqs_enter(bool user) 483 { 484 long long oldval; 485 struct rcu_dynticks *rdtp; 486 487 rdtp = this_cpu_ptr(&rcu_dynticks); 488 oldval = rdtp->dynticks_nesting; 489 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); 490 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { 491 rdtp->dynticks_nesting = 0; 492 rcu_eqs_enter_common(rdtp, oldval, user); 493 } else { 494 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 495 } 496 } 497 498 /** 499 * rcu_idle_enter - inform RCU that current CPU is entering idle 500 * 501 * Enter idle mode, in other words, -leave- the mode in which RCU 502 * read-side critical sections can occur. (Though RCU read-side 503 * critical sections can occur in irq handlers in idle, a possibility 504 * handled by irq_enter() and irq_exit().) 505 * 506 * We crowbar the ->dynticks_nesting field to zero to allow for 507 * the possibility of usermode upcalls having messed up our count 508 * of interrupt nesting level during the prior busy period. 509 */ 510 void rcu_idle_enter(void) 511 { 512 unsigned long flags; 513 514 local_irq_save(flags); 515 rcu_eqs_enter(false); 516 rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0); 517 local_irq_restore(flags); 518 } 519 EXPORT_SYMBOL_GPL(rcu_idle_enter); 520 521 #ifdef CONFIG_RCU_USER_QS 522 /** 523 * rcu_user_enter - inform RCU that we are resuming userspace. 524 * 525 * Enter RCU idle mode right before resuming userspace. No use of RCU 526 * is permitted between this call and rcu_user_exit(). This way the 527 * CPU doesn't need to maintain the tick for RCU maintenance purposes 528 * when the CPU runs in userspace. 529 */ 530 void rcu_user_enter(void) 531 { 532 rcu_eqs_enter(1); 533 } 534 #endif /* CONFIG_RCU_USER_QS */ 535 536 /** 537 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle 538 * 539 * Exit from an interrupt handler, which might possibly result in entering 540 * idle mode, in other words, leaving the mode in which read-side critical 541 * sections can occur. 542 * 543 * This code assumes that the idle loop never does anything that might 544 * result in unbalanced calls to irq_enter() and irq_exit(). If your 545 * architecture violates this assumption, RCU will give you what you 546 * deserve, good and hard. But very infrequently and irreproducibly. 547 * 548 * Use things like work queues to work around this limitation. 549 * 550 * You have been warned. 551 */ 552 void rcu_irq_exit(void) 553 { 554 unsigned long flags; 555 long long oldval; 556 struct rcu_dynticks *rdtp; 557 558 local_irq_save(flags); 559 rdtp = this_cpu_ptr(&rcu_dynticks); 560 oldval = rdtp->dynticks_nesting; 561 rdtp->dynticks_nesting--; 562 WARN_ON_ONCE(rdtp->dynticks_nesting < 0); 563 if (rdtp->dynticks_nesting) 564 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); 565 else 566 rcu_eqs_enter_common(rdtp, oldval, true); 567 rcu_sysidle_enter(rdtp, 1); 568 local_irq_restore(flags); 569 } 570 571 /* 572 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state 573 * 574 * If the new value of the ->dynticks_nesting counter was previously zero, 575 * we really have exited idle, and must do the appropriate accounting. 576 * The caller must have disabled interrupts. 577 */ 578 static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, 579 int user) 580 { 581 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ 582 atomic_inc(&rdtp->dynticks); 583 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 584 smp_mb__after_atomic(); /* See above. */ 585 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 586 rcu_cleanup_after_idle(smp_processor_id()); 587 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); 588 if (!user && !is_idle_task(current)) { 589 struct task_struct *idle __maybe_unused = 590 idle_task(smp_processor_id()); 591 592 trace_rcu_dyntick(TPS("Error on exit: not idle task"), 593 oldval, rdtp->dynticks_nesting); 594 ftrace_dump(DUMP_ORIG); 595 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", 596 current->pid, current->comm, 597 idle->pid, idle->comm); /* must be idle task! */ 598 } 599 } 600 601 /* 602 * Exit an RCU extended quiescent state, which can be either the 603 * idle loop or adaptive-tickless usermode execution. 604 */ 605 static void rcu_eqs_exit(bool user) 606 { 607 struct rcu_dynticks *rdtp; 608 long long oldval; 609 610 rdtp = this_cpu_ptr(&rcu_dynticks); 611 oldval = rdtp->dynticks_nesting; 612 WARN_ON_ONCE(oldval < 0); 613 if (oldval & DYNTICK_TASK_NEST_MASK) { 614 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 615 } else { 616 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 617 rcu_eqs_exit_common(rdtp, oldval, user); 618 } 619 } 620 621 /** 622 * rcu_idle_exit - inform RCU that current CPU is leaving idle 623 * 624 * Exit idle mode, in other words, -enter- the mode in which RCU 625 * read-side critical sections can occur. 626 * 627 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to 628 * allow for the possibility of usermode upcalls messing up our count 629 * of interrupt nesting level during the busy period that is just 630 * now starting. 631 */ 632 void rcu_idle_exit(void) 633 { 634 unsigned long flags; 635 636 local_irq_save(flags); 637 rcu_eqs_exit(false); 638 rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0); 639 local_irq_restore(flags); 640 } 641 EXPORT_SYMBOL_GPL(rcu_idle_exit); 642 643 #ifdef CONFIG_RCU_USER_QS 644 /** 645 * rcu_user_exit - inform RCU that we are exiting userspace. 646 * 647 * Exit RCU idle mode while entering the kernel because it can 648 * run a RCU read side critical section anytime. 649 */ 650 void rcu_user_exit(void) 651 { 652 rcu_eqs_exit(1); 653 } 654 #endif /* CONFIG_RCU_USER_QS */ 655 656 /** 657 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle 658 * 659 * Enter an interrupt handler, which might possibly result in exiting 660 * idle mode, in other words, entering the mode in which read-side critical 661 * sections can occur. 662 * 663 * Note that the Linux kernel is fully capable of entering an interrupt 664 * handler that it never exits, for example when doing upcalls to 665 * user mode! This code assumes that the idle loop never does upcalls to 666 * user mode. If your architecture does do upcalls from the idle loop (or 667 * does anything else that results in unbalanced calls to the irq_enter() 668 * and irq_exit() functions), RCU will give you what you deserve, good 669 * and hard. But very infrequently and irreproducibly. 670 * 671 * Use things like work queues to work around this limitation. 672 * 673 * You have been warned. 674 */ 675 void rcu_irq_enter(void) 676 { 677 unsigned long flags; 678 struct rcu_dynticks *rdtp; 679 long long oldval; 680 681 local_irq_save(flags); 682 rdtp = this_cpu_ptr(&rcu_dynticks); 683 oldval = rdtp->dynticks_nesting; 684 rdtp->dynticks_nesting++; 685 WARN_ON_ONCE(rdtp->dynticks_nesting == 0); 686 if (oldval) 687 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); 688 else 689 rcu_eqs_exit_common(rdtp, oldval, true); 690 rcu_sysidle_exit(rdtp, 1); 691 local_irq_restore(flags); 692 } 693 694 /** 695 * rcu_nmi_enter - inform RCU of entry to NMI context 696 * 697 * If the CPU was idle with dynamic ticks active, and there is no 698 * irq handler running, this updates rdtp->dynticks_nmi to let the 699 * RCU grace-period handling know that the CPU is active. 700 */ 701 void rcu_nmi_enter(void) 702 { 703 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 704 705 if (rdtp->dynticks_nmi_nesting == 0 && 706 (atomic_read(&rdtp->dynticks) & 0x1)) 707 return; 708 rdtp->dynticks_nmi_nesting++; 709 smp_mb__before_atomic(); /* Force delay from prior write. */ 710 atomic_inc(&rdtp->dynticks); 711 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 712 smp_mb__after_atomic(); /* See above. */ 713 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 714 } 715 716 /** 717 * rcu_nmi_exit - inform RCU of exit from NMI context 718 * 719 * If the CPU was idle with dynamic ticks active, and there is no 720 * irq handler running, this updates rdtp->dynticks_nmi to let the 721 * RCU grace-period handling know that the CPU is no longer active. 722 */ 723 void rcu_nmi_exit(void) 724 { 725 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); 726 727 if (rdtp->dynticks_nmi_nesting == 0 || 728 --rdtp->dynticks_nmi_nesting != 0) 729 return; 730 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 731 smp_mb__before_atomic(); /* See above. */ 732 atomic_inc(&rdtp->dynticks); 733 smp_mb__after_atomic(); /* Force delay to next write. */ 734 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 735 } 736 737 /** 738 * __rcu_is_watching - are RCU read-side critical sections safe? 739 * 740 * Return true if RCU is watching the running CPU, which means that 741 * this CPU can safely enter RCU read-side critical sections. Unlike 742 * rcu_is_watching(), the caller of __rcu_is_watching() must have at 743 * least disabled preemption. 744 */ 745 bool notrace __rcu_is_watching(void) 746 { 747 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; 748 } 749 750 /** 751 * rcu_is_watching - see if RCU thinks that the current CPU is idle 752 * 753 * If the current CPU is in its idle loop and is neither in an interrupt 754 * or NMI handler, return true. 755 */ 756 bool notrace rcu_is_watching(void) 757 { 758 int ret; 759 760 preempt_disable(); 761 ret = __rcu_is_watching(); 762 preempt_enable(); 763 return ret; 764 } 765 EXPORT_SYMBOL_GPL(rcu_is_watching); 766 767 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 768 769 /* 770 * Is the current CPU online? Disable preemption to avoid false positives 771 * that could otherwise happen due to the current CPU number being sampled, 772 * this task being preempted, its old CPU being taken offline, resuming 773 * on some other CPU, then determining that its old CPU is now offline. 774 * It is OK to use RCU on an offline processor during initial boot, hence 775 * the check for rcu_scheduler_fully_active. Note also that it is OK 776 * for a CPU coming online to use RCU for one jiffy prior to marking itself 777 * online in the cpu_online_mask. Similarly, it is OK for a CPU going 778 * offline to continue to use RCU for one jiffy after marking itself 779 * offline in the cpu_online_mask. This leniency is necessary given the 780 * non-atomic nature of the online and offline processing, for example, 781 * the fact that a CPU enters the scheduler after completing the CPU_DYING 782 * notifiers. 783 * 784 * This is also why RCU internally marks CPUs online during the 785 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. 786 * 787 * Disable checking if in an NMI handler because we cannot safely report 788 * errors from NMI handlers anyway. 789 */ 790 bool rcu_lockdep_current_cpu_online(void) 791 { 792 struct rcu_data *rdp; 793 struct rcu_node *rnp; 794 bool ret; 795 796 if (in_nmi()) 797 return true; 798 preempt_disable(); 799 rdp = this_cpu_ptr(&rcu_sched_data); 800 rnp = rdp->mynode; 801 ret = (rdp->grpmask & rnp->qsmaskinit) || 802 !rcu_scheduler_fully_active; 803 preempt_enable(); 804 return ret; 805 } 806 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 807 808 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 809 810 /** 811 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle 812 * 813 * If the current CPU is idle or running at a first-level (not nested) 814 * interrupt from idle, return true. The caller must have at least 815 * disabled preemption. 816 */ 817 static int rcu_is_cpu_rrupt_from_idle(void) 818 { 819 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; 820 } 821 822 /* 823 * Snapshot the specified CPU's dynticks counter so that we can later 824 * credit them with an implicit quiescent state. Return 1 if this CPU 825 * is in dynticks idle mode, which is an extended quiescent state. 826 */ 827 static int dyntick_save_progress_counter(struct rcu_data *rdp, 828 bool *isidle, unsigned long *maxj) 829 { 830 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); 831 rcu_sysidle_check_cpu(rdp, isidle, maxj); 832 if ((rdp->dynticks_snap & 0x1) == 0) { 833 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 834 return 1; 835 } else { 836 return 0; 837 } 838 } 839 840 /* 841 * This function really isn't for public consumption, but RCU is special in 842 * that context switches can allow the state machine to make progress. 843 */ 844 extern void resched_cpu(int cpu); 845 846 /* 847 * Return true if the specified CPU has passed through a quiescent 848 * state by virtue of being in or having passed through an dynticks 849 * idle state since the last call to dyntick_save_progress_counter() 850 * for this same CPU, or by virtue of having been offline. 851 */ 852 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, 853 bool *isidle, unsigned long *maxj) 854 { 855 unsigned int curr; 856 unsigned int snap; 857 858 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 859 snap = (unsigned int)rdp->dynticks_snap; 860 861 /* 862 * If the CPU passed through or entered a dynticks idle phase with 863 * no active irq/NMI handlers, then we can safely pretend that the CPU 864 * already acknowledged the request to pass through a quiescent 865 * state. Either way, that CPU cannot possibly be in an RCU 866 * read-side critical section that started before the beginning 867 * of the current RCU grace period. 868 */ 869 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { 870 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 871 rdp->dynticks_fqs++; 872 return 1; 873 } 874 875 /* 876 * Check for the CPU being offline, but only if the grace period 877 * is old enough. We don't need to worry about the CPU changing 878 * state: If we see it offline even once, it has been through a 879 * quiescent state. 880 * 881 * The reason for insisting that the grace period be at least 882 * one jiffy old is that CPUs that are not quite online and that 883 * have just gone offline can still execute RCU read-side critical 884 * sections. 885 */ 886 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies)) 887 return 0; /* Grace period is not old enough. */ 888 barrier(); 889 if (cpu_is_offline(rdp->cpu)) { 890 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); 891 rdp->offline_fqs++; 892 return 1; 893 } 894 895 /* 896 * There is a possibility that a CPU in adaptive-ticks state 897 * might run in the kernel with the scheduling-clock tick disabled 898 * for an extended time period. Invoke rcu_kick_nohz_cpu() to 899 * force the CPU to restart the scheduling-clock tick in this 900 * CPU is in this state. 901 */ 902 rcu_kick_nohz_cpu(rdp->cpu); 903 904 /* 905 * Alternatively, the CPU might be running in the kernel 906 * for an extended period of time without a quiescent state. 907 * Attempt to force the CPU through the scheduler to gain the 908 * needed quiescent state, but only if the grace period has gone 909 * on for an uncommonly long time. If there are many stuck CPUs, 910 * we will beat on the first one until it gets unstuck, then move 911 * to the next. Only do this for the primary flavor of RCU. 912 */ 913 if (rdp->rsp == rcu_state_p && 914 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 915 rdp->rsp->jiffies_resched += 5; 916 resched_cpu(rdp->cpu); 917 } 918 919 return 0; 920 } 921 922 static void record_gp_stall_check_time(struct rcu_state *rsp) 923 { 924 unsigned long j = jiffies; 925 unsigned long j1; 926 927 rsp->gp_start = j; 928 smp_wmb(); /* Record start time before stall time. */ 929 j1 = rcu_jiffies_till_stall_check(); 930 ACCESS_ONCE(rsp->jiffies_stall) = j + j1; 931 rsp->jiffies_resched = j + j1 / 2; 932 } 933 934 /* 935 * Dump stacks of all tasks running on stalled CPUs. This is a fallback 936 * for architectures that do not implement trigger_all_cpu_backtrace(). 937 * The NMI-triggered stack traces are more accurate because they are 938 * printed by the target CPU. 939 */ 940 static void rcu_dump_cpu_stacks(struct rcu_state *rsp) 941 { 942 int cpu; 943 unsigned long flags; 944 struct rcu_node *rnp; 945 946 rcu_for_each_leaf_node(rsp, rnp) { 947 raw_spin_lock_irqsave(&rnp->lock, flags); 948 if (rnp->qsmask != 0) { 949 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 950 if (rnp->qsmask & (1UL << cpu)) 951 dump_cpu_task(rnp->grplo + cpu); 952 } 953 raw_spin_unlock_irqrestore(&rnp->lock, flags); 954 } 955 } 956 957 static void print_other_cpu_stall(struct rcu_state *rsp) 958 { 959 int cpu; 960 long delta; 961 unsigned long flags; 962 int ndetected = 0; 963 struct rcu_node *rnp = rcu_get_root(rsp); 964 long totqlen = 0; 965 966 /* Only let one CPU complain about others per time interval. */ 967 968 raw_spin_lock_irqsave(&rnp->lock, flags); 969 delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); 970 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 971 raw_spin_unlock_irqrestore(&rnp->lock, flags); 972 return; 973 } 974 ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; 975 raw_spin_unlock_irqrestore(&rnp->lock, flags); 976 977 /* 978 * OK, time to rat on our buddy... 979 * See Documentation/RCU/stallwarn.txt for info on how to debug 980 * RCU CPU stall warnings. 981 */ 982 pr_err("INFO: %s detected stalls on CPUs/tasks:", 983 rsp->name); 984 print_cpu_stall_info_begin(); 985 rcu_for_each_leaf_node(rsp, rnp) { 986 raw_spin_lock_irqsave(&rnp->lock, flags); 987 ndetected += rcu_print_task_stall(rnp); 988 if (rnp->qsmask != 0) { 989 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 990 if (rnp->qsmask & (1UL << cpu)) { 991 print_cpu_stall_info(rsp, 992 rnp->grplo + cpu); 993 ndetected++; 994 } 995 } 996 raw_spin_unlock_irqrestore(&rnp->lock, flags); 997 } 998 999 /* 1000 * Now rat on any tasks that got kicked up to the root rcu_node 1001 * due to CPU offlining. 1002 */ 1003 rnp = rcu_get_root(rsp); 1004 raw_spin_lock_irqsave(&rnp->lock, flags); 1005 ndetected += rcu_print_task_stall(rnp); 1006 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1007 1008 print_cpu_stall_info_end(); 1009 for_each_possible_cpu(cpu) 1010 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1011 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1012 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1013 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1014 if (ndetected == 0) 1015 pr_err("INFO: Stall ended before state dump start\n"); 1016 else if (!trigger_all_cpu_backtrace()) 1017 rcu_dump_cpu_stacks(rsp); 1018 1019 /* Complain about tasks blocking the grace period. */ 1020 1021 rcu_print_detail_task_stall(rsp); 1022 1023 force_quiescent_state(rsp); /* Kick them all. */ 1024 } 1025 1026 static void print_cpu_stall(struct rcu_state *rsp) 1027 { 1028 int cpu; 1029 unsigned long flags; 1030 struct rcu_node *rnp = rcu_get_root(rsp); 1031 long totqlen = 0; 1032 1033 /* 1034 * OK, time to rat on ourselves... 1035 * See Documentation/RCU/stallwarn.txt for info on how to debug 1036 * RCU CPU stall warnings. 1037 */ 1038 pr_err("INFO: %s self-detected stall on CPU", rsp->name); 1039 print_cpu_stall_info_begin(); 1040 print_cpu_stall_info(rsp, smp_processor_id()); 1041 print_cpu_stall_info_end(); 1042 for_each_possible_cpu(cpu) 1043 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; 1044 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1045 jiffies - rsp->gp_start, 1046 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1047 if (!trigger_all_cpu_backtrace()) 1048 dump_stack(); 1049 1050 raw_spin_lock_irqsave(&rnp->lock, flags); 1051 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall))) 1052 ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 1053 3 * rcu_jiffies_till_stall_check() + 3; 1054 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1055 1056 /* 1057 * Attempt to revive the RCU machinery by forcing a context switch. 1058 * 1059 * A context switch would normally allow the RCU state machine to make 1060 * progress and it could be we're stuck in kernel space without context 1061 * switches for an entirely unreasonable amount of time. 1062 */ 1063 resched_cpu(smp_processor_id()); 1064 } 1065 1066 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 1067 { 1068 unsigned long completed; 1069 unsigned long gpnum; 1070 unsigned long gps; 1071 unsigned long j; 1072 unsigned long js; 1073 struct rcu_node *rnp; 1074 1075 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp)) 1076 return; 1077 j = jiffies; 1078 1079 /* 1080 * Lots of memory barriers to reject false positives. 1081 * 1082 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall, 1083 * then rsp->gp_start, and finally rsp->completed. These values 1084 * are updated in the opposite order with memory barriers (or 1085 * equivalent) during grace-period initialization and cleanup. 1086 * Now, a false positive can occur if we get an new value of 1087 * rsp->gp_start and a old value of rsp->jiffies_stall. But given 1088 * the memory barriers, the only way that this can happen is if one 1089 * grace period ends and another starts between these two fetches. 1090 * Detect this by comparing rsp->completed with the previous fetch 1091 * from rsp->gpnum. 1092 * 1093 * Given this check, comparisons of jiffies, rsp->jiffies_stall, 1094 * and rsp->gp_start suffice to forestall false positives. 1095 */ 1096 gpnum = ACCESS_ONCE(rsp->gpnum); 1097 smp_rmb(); /* Pick up ->gpnum first... */ 1098 js = ACCESS_ONCE(rsp->jiffies_stall); 1099 smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 1100 gps = ACCESS_ONCE(rsp->gp_start); 1101 smp_rmb(); /* ...and finally ->gp_start before ->completed. */ 1102 completed = ACCESS_ONCE(rsp->completed); 1103 if (ULONG_CMP_GE(completed, gpnum) || 1104 ULONG_CMP_LT(j, js) || 1105 ULONG_CMP_GE(gps, js)) 1106 return; /* No stall or GP completed since entering function. */ 1107 rnp = rdp->mynode; 1108 if (rcu_gp_in_progress(rsp) && 1109 (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) { 1110 1111 /* We haven't checked in, so go dump stack. */ 1112 print_cpu_stall(rsp); 1113 1114 } else if (rcu_gp_in_progress(rsp) && 1115 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1116 1117 /* They had a few time units to dump stack, so complain. */ 1118 print_other_cpu_stall(rsp); 1119 } 1120 } 1121 1122 /** 1123 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period 1124 * 1125 * Set the stall-warning timeout way off into the future, thus preventing 1126 * any RCU CPU stall-warning messages from appearing in the current set of 1127 * RCU grace periods. 1128 * 1129 * The caller must disable hard irqs. 1130 */ 1131 void rcu_cpu_stall_reset(void) 1132 { 1133 struct rcu_state *rsp; 1134 1135 for_each_rcu_flavor(rsp) 1136 ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2; 1137 } 1138 1139 /* 1140 * Initialize the specified rcu_data structure's callback list to empty. 1141 */ 1142 static void init_callback_list(struct rcu_data *rdp) 1143 { 1144 int i; 1145 1146 if (init_nocb_callback_list(rdp)) 1147 return; 1148 rdp->nxtlist = NULL; 1149 for (i = 0; i < RCU_NEXT_SIZE; i++) 1150 rdp->nxttail[i] = &rdp->nxtlist; 1151 } 1152 1153 /* 1154 * Determine the value that ->completed will have at the end of the 1155 * next subsequent grace period. This is used to tag callbacks so that 1156 * a CPU can invoke callbacks in a timely fashion even if that CPU has 1157 * been dyntick-idle for an extended period with callbacks under the 1158 * influence of RCU_FAST_NO_HZ. 1159 * 1160 * The caller must hold rnp->lock with interrupts disabled. 1161 */ 1162 static unsigned long rcu_cbs_completed(struct rcu_state *rsp, 1163 struct rcu_node *rnp) 1164 { 1165 /* 1166 * If RCU is idle, we just wait for the next grace period. 1167 * But we can only be sure that RCU is idle if we are looking 1168 * at the root rcu_node structure -- otherwise, a new grace 1169 * period might have started, but just not yet gotten around 1170 * to initializing the current non-root rcu_node structure. 1171 */ 1172 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) 1173 return rnp->completed + 1; 1174 1175 /* 1176 * Otherwise, wait for a possible partial grace period and 1177 * then the subsequent full grace period. 1178 */ 1179 return rnp->completed + 2; 1180 } 1181 1182 /* 1183 * Trace-event helper function for rcu_start_future_gp() and 1184 * rcu_nocb_wait_gp(). 1185 */ 1186 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1187 unsigned long c, const char *s) 1188 { 1189 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, 1190 rnp->completed, c, rnp->level, 1191 rnp->grplo, rnp->grphi, s); 1192 } 1193 1194 /* 1195 * Start some future grace period, as needed to handle newly arrived 1196 * callbacks. The required future grace periods are recorded in each 1197 * rcu_node structure's ->need_future_gp field. Returns true if there 1198 * is reason to awaken the grace-period kthread. 1199 * 1200 * The caller must hold the specified rcu_node structure's ->lock. 1201 */ 1202 static bool __maybe_unused 1203 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1204 unsigned long *c_out) 1205 { 1206 unsigned long c; 1207 int i; 1208 bool ret = false; 1209 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); 1210 1211 /* 1212 * Pick up grace-period number for new callbacks. If this 1213 * grace period is already marked as needed, return to the caller. 1214 */ 1215 c = rcu_cbs_completed(rdp->rsp, rnp); 1216 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); 1217 if (rnp->need_future_gp[c & 0x1]) { 1218 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); 1219 goto out; 1220 } 1221 1222 /* 1223 * If either this rcu_node structure or the root rcu_node structure 1224 * believe that a grace period is in progress, then we must wait 1225 * for the one following, which is in "c". Because our request 1226 * will be noticed at the end of the current grace period, we don't 1227 * need to explicitly start one. 1228 */ 1229 if (rnp->gpnum != rnp->completed || 1230 ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) { 1231 rnp->need_future_gp[c & 0x1]++; 1232 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); 1233 goto out; 1234 } 1235 1236 /* 1237 * There might be no grace period in progress. If we don't already 1238 * hold it, acquire the root rcu_node structure's lock in order to 1239 * start one (if needed). 1240 */ 1241 if (rnp != rnp_root) { 1242 raw_spin_lock(&rnp_root->lock); 1243 smp_mb__after_unlock_lock(); 1244 } 1245 1246 /* 1247 * Get a new grace-period number. If there really is no grace 1248 * period in progress, it will be smaller than the one we obtained 1249 * earlier. Adjust callbacks as needed. Note that even no-CBs 1250 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed. 1251 */ 1252 c = rcu_cbs_completed(rdp->rsp, rnp_root); 1253 for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++) 1254 if (ULONG_CMP_LT(c, rdp->nxtcompleted[i])) 1255 rdp->nxtcompleted[i] = c; 1256 1257 /* 1258 * If the needed for the required grace period is already 1259 * recorded, trace and leave. 1260 */ 1261 if (rnp_root->need_future_gp[c & 0x1]) { 1262 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); 1263 goto unlock_out; 1264 } 1265 1266 /* Record the need for the future grace period. */ 1267 rnp_root->need_future_gp[c & 0x1]++; 1268 1269 /* If a grace period is not already in progress, start one. */ 1270 if (rnp_root->gpnum != rnp_root->completed) { 1271 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); 1272 } else { 1273 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); 1274 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp); 1275 } 1276 unlock_out: 1277 if (rnp != rnp_root) 1278 raw_spin_unlock(&rnp_root->lock); 1279 out: 1280 if (c_out != NULL) 1281 *c_out = c; 1282 return ret; 1283 } 1284 1285 /* 1286 * Clean up any old requests for the just-ended grace period. Also return 1287 * whether any additional grace periods have been requested. Also invoke 1288 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads 1289 * waiting for this grace period to complete. 1290 */ 1291 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 1292 { 1293 int c = rnp->completed; 1294 int needmore; 1295 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1296 1297 rcu_nocb_gp_cleanup(rsp, rnp); 1298 rnp->need_future_gp[c & 0x1] = 0; 1299 needmore = rnp->need_future_gp[(c + 1) & 0x1]; 1300 trace_rcu_future_gp(rnp, rdp, c, 1301 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1302 return needmore; 1303 } 1304 1305 /* 1306 * Awaken the grace-period kthread for the specified flavor of RCU. 1307 * Don't do a self-awaken, and don't bother awakening when there is 1308 * nothing for the grace-period kthread to do (as in several CPUs 1309 * raced to awaken, and we lost), and finally don't try to awaken 1310 * a kthread that has not yet been created. 1311 */ 1312 static void rcu_gp_kthread_wake(struct rcu_state *rsp) 1313 { 1314 if (current == rsp->gp_kthread || 1315 !ACCESS_ONCE(rsp->gp_flags) || 1316 !rsp->gp_kthread) 1317 return; 1318 wake_up(&rsp->gp_wq); 1319 } 1320 1321 /* 1322 * If there is room, assign a ->completed number to any callbacks on 1323 * this CPU that have not already been assigned. Also accelerate any 1324 * callbacks that were previously assigned a ->completed number that has 1325 * since proven to be too conservative, which can happen if callbacks get 1326 * assigned a ->completed number while RCU is idle, but with reference to 1327 * a non-root rcu_node structure. This function is idempotent, so it does 1328 * not hurt to call it repeatedly. Returns an flag saying that we should 1329 * awaken the RCU grace-period kthread. 1330 * 1331 * The caller must hold rnp->lock with interrupts disabled. 1332 */ 1333 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1334 struct rcu_data *rdp) 1335 { 1336 unsigned long c; 1337 int i; 1338 bool ret; 1339 1340 /* If the CPU has no callbacks, nothing to do. */ 1341 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) 1342 return false; 1343 1344 /* 1345 * Starting from the sublist containing the callbacks most 1346 * recently assigned a ->completed number and working down, find the 1347 * first sublist that is not assignable to an upcoming grace period. 1348 * Such a sublist has something in it (first two tests) and has 1349 * a ->completed number assigned that will complete sooner than 1350 * the ->completed number for newly arrived callbacks (last test). 1351 * 1352 * The key point is that any later sublist can be assigned the 1353 * same ->completed number as the newly arrived callbacks, which 1354 * means that the callbacks in any of these later sublist can be 1355 * grouped into a single sublist, whether or not they have already 1356 * been assigned a ->completed number. 1357 */ 1358 c = rcu_cbs_completed(rsp, rnp); 1359 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--) 1360 if (rdp->nxttail[i] != rdp->nxttail[i - 1] && 1361 !ULONG_CMP_GE(rdp->nxtcompleted[i], c)) 1362 break; 1363 1364 /* 1365 * If there are no sublist for unassigned callbacks, leave. 1366 * At the same time, advance "i" one sublist, so that "i" will 1367 * index into the sublist where all the remaining callbacks should 1368 * be grouped into. 1369 */ 1370 if (++i >= RCU_NEXT_TAIL) 1371 return false; 1372 1373 /* 1374 * Assign all subsequent callbacks' ->completed number to the next 1375 * full grace period and group them all in the sublist initially 1376 * indexed by "i". 1377 */ 1378 for (; i <= RCU_NEXT_TAIL; i++) { 1379 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL]; 1380 rdp->nxtcompleted[i] = c; 1381 } 1382 /* Record any needed additional grace periods. */ 1383 ret = rcu_start_future_gp(rnp, rdp, NULL); 1384 1385 /* Trace depending on how much we were able to accelerate. */ 1386 if (!*rdp->nxttail[RCU_WAIT_TAIL]) 1387 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB")); 1388 else 1389 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB")); 1390 return ret; 1391 } 1392 1393 /* 1394 * Move any callbacks whose grace period has completed to the 1395 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1396 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL 1397 * sublist. This function is idempotent, so it does not hurt to 1398 * invoke it repeatedly. As long as it is not invoked -too- often... 1399 * Returns true if the RCU grace-period kthread needs to be awakened. 1400 * 1401 * The caller must hold rnp->lock with interrupts disabled. 1402 */ 1403 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1404 struct rcu_data *rdp) 1405 { 1406 int i, j; 1407 1408 /* If the CPU has no callbacks, nothing to do. */ 1409 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL]) 1410 return false; 1411 1412 /* 1413 * Find all callbacks whose ->completed numbers indicate that they 1414 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1415 */ 1416 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { 1417 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) 1418 break; 1419 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i]; 1420 } 1421 /* Clean up any sublist tail pointers that were misordered above. */ 1422 for (j = RCU_WAIT_TAIL; j < i; j++) 1423 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL]; 1424 1425 /* Copy down callbacks to fill in empty sublists. */ 1426 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { 1427 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL]) 1428 break; 1429 rdp->nxttail[j] = rdp->nxttail[i]; 1430 rdp->nxtcompleted[j] = rdp->nxtcompleted[i]; 1431 } 1432 1433 /* Classify any remaining callbacks. */ 1434 return rcu_accelerate_cbs(rsp, rnp, rdp); 1435 } 1436 1437 /* 1438 * Update CPU-local rcu_data state to record the beginnings and ends of 1439 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1440 * structure corresponding to the current CPU, and must have irqs disabled. 1441 * Returns true if the grace-period kthread needs to be awakened. 1442 */ 1443 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, 1444 struct rcu_data *rdp) 1445 { 1446 bool ret; 1447 1448 /* Handle the ends of any preceding grace periods first. */ 1449 if (rdp->completed == rnp->completed) { 1450 1451 /* No grace period end, so just accelerate recent callbacks. */ 1452 ret = rcu_accelerate_cbs(rsp, rnp, rdp); 1453 1454 } else { 1455 1456 /* Advance callbacks. */ 1457 ret = rcu_advance_cbs(rsp, rnp, rdp); 1458 1459 /* Remember that we saw this grace-period completion. */ 1460 rdp->completed = rnp->completed; 1461 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); 1462 } 1463 1464 if (rdp->gpnum != rnp->gpnum) { 1465 /* 1466 * If the current grace period is waiting for this CPU, 1467 * set up to detect a quiescent state, otherwise don't 1468 * go looking for one. 1469 */ 1470 rdp->gpnum = rnp->gpnum; 1471 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart")); 1472 rdp->passed_quiesce = 0; 1473 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); 1474 zero_cpu_stall_ticks(rdp); 1475 } 1476 return ret; 1477 } 1478 1479 static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) 1480 { 1481 unsigned long flags; 1482 bool needwake; 1483 struct rcu_node *rnp; 1484 1485 local_irq_save(flags); 1486 rnp = rdp->mynode; 1487 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && 1488 rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */ 1489 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 1490 local_irq_restore(flags); 1491 return; 1492 } 1493 smp_mb__after_unlock_lock(); 1494 needwake = __note_gp_changes(rsp, rnp, rdp); 1495 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1496 if (needwake) 1497 rcu_gp_kthread_wake(rsp); 1498 } 1499 1500 /* 1501 * Initialize a new grace period. Return 0 if no grace period required. 1502 */ 1503 static int rcu_gp_init(struct rcu_state *rsp) 1504 { 1505 struct rcu_data *rdp; 1506 struct rcu_node *rnp = rcu_get_root(rsp); 1507 1508 rcu_bind_gp_kthread(); 1509 raw_spin_lock_irq(&rnp->lock); 1510 smp_mb__after_unlock_lock(); 1511 if (!ACCESS_ONCE(rsp->gp_flags)) { 1512 /* Spurious wakeup, tell caller to go back to sleep. */ 1513 raw_spin_unlock_irq(&rnp->lock); 1514 return 0; 1515 } 1516 ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */ 1517 1518 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { 1519 /* 1520 * Grace period already in progress, don't start another. 1521 * Not supposed to be able to happen. 1522 */ 1523 raw_spin_unlock_irq(&rnp->lock); 1524 return 0; 1525 } 1526 1527 /* Advance to a new grace period and initialize state. */ 1528 record_gp_stall_check_time(rsp); 1529 /* Record GP times before starting GP, hence smp_store_release(). */ 1530 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); 1531 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1532 raw_spin_unlock_irq(&rnp->lock); 1533 1534 /* Exclude any concurrent CPU-hotplug operations. */ 1535 mutex_lock(&rsp->onoff_mutex); 1536 smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */ 1537 1538 /* 1539 * Set the quiescent-state-needed bits in all the rcu_node 1540 * structures for all currently online CPUs in breadth-first order, 1541 * starting from the root rcu_node structure, relying on the layout 1542 * of the tree within the rsp->node[] array. Note that other CPUs 1543 * will access only the leaves of the hierarchy, thus seeing that no 1544 * grace period is in progress, at least until the corresponding 1545 * leaf node has been initialized. In addition, we have excluded 1546 * CPU-hotplug operations. 1547 * 1548 * The grace period cannot complete until the initialization 1549 * process finishes, because this kthread handles both. 1550 */ 1551 rcu_for_each_node_breadth_first(rsp, rnp) { 1552 raw_spin_lock_irq(&rnp->lock); 1553 smp_mb__after_unlock_lock(); 1554 rdp = this_cpu_ptr(rsp->rda); 1555 rcu_preempt_check_blocked_tasks(rnp); 1556 rnp->qsmask = rnp->qsmaskinit; 1557 ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; 1558 WARN_ON_ONCE(rnp->completed != rsp->completed); 1559 ACCESS_ONCE(rnp->completed) = rsp->completed; 1560 if (rnp == rdp->mynode) 1561 (void)__note_gp_changes(rsp, rnp, rdp); 1562 rcu_preempt_boost_start_gp(rnp); 1563 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, 1564 rnp->level, rnp->grplo, 1565 rnp->grphi, rnp->qsmask); 1566 raw_spin_unlock_irq(&rnp->lock); 1567 #ifdef CONFIG_PROVE_RCU_DELAY 1568 if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 && 1569 system_state == SYSTEM_RUNNING) 1570 udelay(200); 1571 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ 1572 cond_resched(); 1573 } 1574 1575 mutex_unlock(&rsp->onoff_mutex); 1576 return 1; 1577 } 1578 1579 /* 1580 * Do one round of quiescent-state forcing. 1581 */ 1582 static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) 1583 { 1584 int fqs_state = fqs_state_in; 1585 bool isidle = false; 1586 unsigned long maxj; 1587 struct rcu_node *rnp = rcu_get_root(rsp); 1588 1589 rsp->n_force_qs++; 1590 if (fqs_state == RCU_SAVE_DYNTICK) { 1591 /* Collect dyntick-idle snapshots. */ 1592 if (is_sysidle_rcu_state(rsp)) { 1593 isidle = 1; 1594 maxj = jiffies - ULONG_MAX / 4; 1595 } 1596 force_qs_rnp(rsp, dyntick_save_progress_counter, 1597 &isidle, &maxj); 1598 rcu_sysidle_report_gp(rsp, isidle, maxj); 1599 fqs_state = RCU_FORCE_QS; 1600 } else { 1601 /* Handle dyntick-idle and offline CPUs. */ 1602 isidle = 0; 1603 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); 1604 } 1605 /* Clear flag to prevent immediate re-entry. */ 1606 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 1607 raw_spin_lock_irq(&rnp->lock); 1608 smp_mb__after_unlock_lock(); 1609 ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS; 1610 raw_spin_unlock_irq(&rnp->lock); 1611 } 1612 return fqs_state; 1613 } 1614 1615 /* 1616 * Clean up after the old grace period. 1617 */ 1618 static void rcu_gp_cleanup(struct rcu_state *rsp) 1619 { 1620 unsigned long gp_duration; 1621 bool needgp = false; 1622 int nocb = 0; 1623 struct rcu_data *rdp; 1624 struct rcu_node *rnp = rcu_get_root(rsp); 1625 1626 raw_spin_lock_irq(&rnp->lock); 1627 smp_mb__after_unlock_lock(); 1628 gp_duration = jiffies - rsp->gp_start; 1629 if (gp_duration > rsp->gp_max) 1630 rsp->gp_max = gp_duration; 1631 1632 /* 1633 * We know the grace period is complete, but to everyone else 1634 * it appears to still be ongoing. But it is also the case 1635 * that to everyone else it looks like there is nothing that 1636 * they can do to advance the grace period. It is therefore 1637 * safe for us to drop the lock in order to mark the grace 1638 * period as completed in all of the rcu_node structures. 1639 */ 1640 raw_spin_unlock_irq(&rnp->lock); 1641 1642 /* 1643 * Propagate new ->completed value to rcu_node structures so 1644 * that other CPUs don't have to wait until the start of the next 1645 * grace period to process their callbacks. This also avoids 1646 * some nasty RCU grace-period initialization races by forcing 1647 * the end of the current grace period to be completely recorded in 1648 * all of the rcu_node structures before the beginning of the next 1649 * grace period is recorded in any of the rcu_node structures. 1650 */ 1651 rcu_for_each_node_breadth_first(rsp, rnp) { 1652 raw_spin_lock_irq(&rnp->lock); 1653 smp_mb__after_unlock_lock(); 1654 ACCESS_ONCE(rnp->completed) = rsp->gpnum; 1655 rdp = this_cpu_ptr(rsp->rda); 1656 if (rnp == rdp->mynode) 1657 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; 1658 /* smp_mb() provided by prior unlock-lock pair. */ 1659 nocb += rcu_future_gp_cleanup(rsp, rnp); 1660 raw_spin_unlock_irq(&rnp->lock); 1661 cond_resched(); 1662 } 1663 rnp = rcu_get_root(rsp); 1664 raw_spin_lock_irq(&rnp->lock); 1665 smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */ 1666 rcu_nocb_gp_set(rnp, nocb); 1667 1668 /* Declare grace period done. */ 1669 ACCESS_ONCE(rsp->completed) = rsp->gpnum; 1670 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 1671 rsp->fqs_state = RCU_GP_IDLE; 1672 rdp = this_cpu_ptr(rsp->rda); 1673 /* Advance CBs to reduce false positives below. */ 1674 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; 1675 if (needgp || cpu_needs_another_gp(rsp, rdp)) { 1676 ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; 1677 trace_rcu_grace_period(rsp->name, 1678 ACCESS_ONCE(rsp->gpnum), 1679 TPS("newreq")); 1680 } 1681 raw_spin_unlock_irq(&rnp->lock); 1682 } 1683 1684 /* 1685 * Body of kthread that handles grace periods. 1686 */ 1687 static int __noreturn rcu_gp_kthread(void *arg) 1688 { 1689 int fqs_state; 1690 int gf; 1691 unsigned long j; 1692 int ret; 1693 struct rcu_state *rsp = arg; 1694 struct rcu_node *rnp = rcu_get_root(rsp); 1695 1696 for (;;) { 1697 1698 /* Handle grace-period start. */ 1699 for (;;) { 1700 trace_rcu_grace_period(rsp->name, 1701 ACCESS_ONCE(rsp->gpnum), 1702 TPS("reqwait")); 1703 rsp->gp_state = RCU_GP_WAIT_GPS; 1704 wait_event_interruptible(rsp->gp_wq, 1705 ACCESS_ONCE(rsp->gp_flags) & 1706 RCU_GP_FLAG_INIT); 1707 /* Locking provides needed memory barrier. */ 1708 if (rcu_gp_init(rsp)) 1709 break; 1710 cond_resched(); 1711 flush_signals(current); 1712 trace_rcu_grace_period(rsp->name, 1713 ACCESS_ONCE(rsp->gpnum), 1714 TPS("reqwaitsig")); 1715 } 1716 1717 /* Handle quiescent-state forcing. */ 1718 fqs_state = RCU_SAVE_DYNTICK; 1719 j = jiffies_till_first_fqs; 1720 if (j > HZ) { 1721 j = HZ; 1722 jiffies_till_first_fqs = HZ; 1723 } 1724 ret = 0; 1725 for (;;) { 1726 if (!ret) 1727 rsp->jiffies_force_qs = jiffies + j; 1728 trace_rcu_grace_period(rsp->name, 1729 ACCESS_ONCE(rsp->gpnum), 1730 TPS("fqswait")); 1731 rsp->gp_state = RCU_GP_WAIT_FQS; 1732 ret = wait_event_interruptible_timeout(rsp->gp_wq, 1733 ((gf = ACCESS_ONCE(rsp->gp_flags)) & 1734 RCU_GP_FLAG_FQS) || 1735 (!ACCESS_ONCE(rnp->qsmask) && 1736 !rcu_preempt_blocked_readers_cgp(rnp)), 1737 j); 1738 /* Locking provides needed memory barriers. */ 1739 /* If grace period done, leave loop. */ 1740 if (!ACCESS_ONCE(rnp->qsmask) && 1741 !rcu_preempt_blocked_readers_cgp(rnp)) 1742 break; 1743 /* If time for quiescent-state forcing, do it. */ 1744 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || 1745 (gf & RCU_GP_FLAG_FQS)) { 1746 trace_rcu_grace_period(rsp->name, 1747 ACCESS_ONCE(rsp->gpnum), 1748 TPS("fqsstart")); 1749 fqs_state = rcu_gp_fqs(rsp, fqs_state); 1750 trace_rcu_grace_period(rsp->name, 1751 ACCESS_ONCE(rsp->gpnum), 1752 TPS("fqsend")); 1753 cond_resched(); 1754 } else { 1755 /* Deal with stray signal. */ 1756 cond_resched(); 1757 flush_signals(current); 1758 trace_rcu_grace_period(rsp->name, 1759 ACCESS_ONCE(rsp->gpnum), 1760 TPS("fqswaitsig")); 1761 } 1762 j = jiffies_till_next_fqs; 1763 if (j > HZ) { 1764 j = HZ; 1765 jiffies_till_next_fqs = HZ; 1766 } else if (j < 1) { 1767 j = 1; 1768 jiffies_till_next_fqs = 1; 1769 } 1770 } 1771 1772 /* Handle grace-period end. */ 1773 rcu_gp_cleanup(rsp); 1774 } 1775 } 1776 1777 /* 1778 * Start a new RCU grace period if warranted, re-initializing the hierarchy 1779 * in preparation for detecting the next grace period. The caller must hold 1780 * the root node's ->lock and hard irqs must be disabled. 1781 * 1782 * Note that it is legal for a dying CPU (which is marked as offline) to 1783 * invoke this function. This can happen when the dying CPU reports its 1784 * quiescent state. 1785 * 1786 * Returns true if the grace-period kthread must be awakened. 1787 */ 1788 static bool 1789 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 1790 struct rcu_data *rdp) 1791 { 1792 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) { 1793 /* 1794 * Either we have not yet spawned the grace-period 1795 * task, this CPU does not need another grace period, 1796 * or a grace period is already in progress. 1797 * Either way, don't start a new grace period. 1798 */ 1799 return false; 1800 } 1801 ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; 1802 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), 1803 TPS("newreq")); 1804 1805 /* 1806 * We can't do wakeups while holding the rnp->lock, as that 1807 * could cause possible deadlocks with the rq->lock. Defer 1808 * the wakeup to our caller. 1809 */ 1810 return true; 1811 } 1812 1813 /* 1814 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's 1815 * callbacks. Note that rcu_start_gp_advanced() cannot do this because it 1816 * is invoked indirectly from rcu_advance_cbs(), which would result in 1817 * endless recursion -- or would do so if it wasn't for the self-deadlock 1818 * that is encountered beforehand. 1819 * 1820 * Returns true if the grace-period kthread needs to be awakened. 1821 */ 1822 static bool rcu_start_gp(struct rcu_state *rsp) 1823 { 1824 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1825 struct rcu_node *rnp = rcu_get_root(rsp); 1826 bool ret = false; 1827 1828 /* 1829 * If there is no grace period in progress right now, any 1830 * callbacks we have up to this point will be satisfied by the 1831 * next grace period. Also, advancing the callbacks reduces the 1832 * probability of false positives from cpu_needs_another_gp() 1833 * resulting in pointless grace periods. So, advance callbacks 1834 * then start the grace period! 1835 */ 1836 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret; 1837 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret; 1838 return ret; 1839 } 1840 1841 /* 1842 * Report a full set of quiescent states to the specified rcu_state 1843 * data structure. This involves cleaning up after the prior grace 1844 * period and letting rcu_start_gp() start up the next grace period 1845 * if one is needed. Note that the caller must hold rnp->lock, which 1846 * is released before return. 1847 */ 1848 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 1849 __releases(rcu_get_root(rsp)->lock) 1850 { 1851 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 1852 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 1853 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ 1854 } 1855 1856 /* 1857 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 1858 * Allows quiescent states for a group of CPUs to be reported at one go 1859 * to the specified rcu_node structure, though all the CPUs in the group 1860 * must be represented by the same rcu_node structure (which need not be 1861 * a leaf rcu_node structure, though it often will be). That structure's 1862 * lock must be held upon entry, and it is released before return. 1863 */ 1864 static void 1865 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, 1866 struct rcu_node *rnp, unsigned long flags) 1867 __releases(rnp->lock) 1868 { 1869 struct rcu_node *rnp_c; 1870 1871 /* Walk up the rcu_node hierarchy. */ 1872 for (;;) { 1873 if (!(rnp->qsmask & mask)) { 1874 1875 /* Our bit has already been cleared, so done. */ 1876 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1877 return; 1878 } 1879 rnp->qsmask &= ~mask; 1880 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, 1881 mask, rnp->qsmask, rnp->level, 1882 rnp->grplo, rnp->grphi, 1883 !!rnp->gp_tasks); 1884 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 1885 1886 /* Other bits still set at this level, so done. */ 1887 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1888 return; 1889 } 1890 mask = rnp->grpmask; 1891 if (rnp->parent == NULL) { 1892 1893 /* No more levels. Exit loop holding root lock. */ 1894 1895 break; 1896 } 1897 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1898 rnp_c = rnp; 1899 rnp = rnp->parent; 1900 raw_spin_lock_irqsave(&rnp->lock, flags); 1901 smp_mb__after_unlock_lock(); 1902 WARN_ON_ONCE(rnp_c->qsmask); 1903 } 1904 1905 /* 1906 * Get here if we are the last CPU to pass through a quiescent 1907 * state for this grace period. Invoke rcu_report_qs_rsp() 1908 * to clean up and start the next grace period if one is needed. 1909 */ 1910 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ 1911 } 1912 1913 /* 1914 * Record a quiescent state for the specified CPU to that CPU's rcu_data 1915 * structure. This must be either called from the specified CPU, or 1916 * called when the specified CPU is known to be offline (and when it is 1917 * also known that no other CPU is concurrently trying to help the offline 1918 * CPU). The lastcomp argument is used to make sure we are still in the 1919 * grace period of interest. We don't want to end the current grace period 1920 * based on quiescent states detected in an earlier grace period! 1921 */ 1922 static void 1923 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) 1924 { 1925 unsigned long flags; 1926 unsigned long mask; 1927 bool needwake; 1928 struct rcu_node *rnp; 1929 1930 rnp = rdp->mynode; 1931 raw_spin_lock_irqsave(&rnp->lock, flags); 1932 smp_mb__after_unlock_lock(); 1933 if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || 1934 rnp->completed == rnp->gpnum) { 1935 1936 /* 1937 * The grace period in which this quiescent state was 1938 * recorded has ended, so don't report it upwards. 1939 * We will instead need a new quiescent state that lies 1940 * within the current grace period. 1941 */ 1942 rdp->passed_quiesce = 0; /* need qs for new gp. */ 1943 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1944 return; 1945 } 1946 mask = rdp->grpmask; 1947 if ((rnp->qsmask & mask) == 0) { 1948 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1949 } else { 1950 rdp->qs_pending = 0; 1951 1952 /* 1953 * This GP can't end until cpu checks in, so all of our 1954 * callbacks can be processed during the next GP. 1955 */ 1956 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 1957 1958 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ 1959 if (needwake) 1960 rcu_gp_kthread_wake(rsp); 1961 } 1962 } 1963 1964 /* 1965 * Check to see if there is a new grace period of which this CPU 1966 * is not yet aware, and if so, set up local rcu_data state for it. 1967 * Otherwise, see if this CPU has just passed through its first 1968 * quiescent state for this grace period, and record that fact if so. 1969 */ 1970 static void 1971 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) 1972 { 1973 /* Check for grace-period ends and beginnings. */ 1974 note_gp_changes(rsp, rdp); 1975 1976 /* 1977 * Does this CPU still need to do its part for current grace period? 1978 * If no, return and let the other CPUs do their part as well. 1979 */ 1980 if (!rdp->qs_pending) 1981 return; 1982 1983 /* 1984 * Was there a quiescent state since the beginning of the grace 1985 * period? If no, then exit and wait for the next call. 1986 */ 1987 if (!rdp->passed_quiesce) 1988 return; 1989 1990 /* 1991 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 1992 * judge of that). 1993 */ 1994 rcu_report_qs_rdp(rdp->cpu, rsp, rdp); 1995 } 1996 1997 #ifdef CONFIG_HOTPLUG_CPU 1998 1999 /* 2000 * Send the specified CPU's RCU callbacks to the orphanage. The 2001 * specified CPU must be offline, and the caller must hold the 2002 * ->orphan_lock. 2003 */ 2004 static void 2005 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, 2006 struct rcu_node *rnp, struct rcu_data *rdp) 2007 { 2008 /* No-CBs CPUs do not have orphanable callbacks. */ 2009 if (rcu_is_nocb_cpu(rdp->cpu)) 2010 return; 2011 2012 /* 2013 * Orphan the callbacks. First adjust the counts. This is safe 2014 * because _rcu_barrier() excludes CPU-hotplug operations, so it 2015 * cannot be running now. Thus no memory barrier is required. 2016 */ 2017 if (rdp->nxtlist != NULL) { 2018 rsp->qlen_lazy += rdp->qlen_lazy; 2019 rsp->qlen += rdp->qlen; 2020 rdp->n_cbs_orphaned += rdp->qlen; 2021 rdp->qlen_lazy = 0; 2022 ACCESS_ONCE(rdp->qlen) = 0; 2023 } 2024 2025 /* 2026 * Next, move those callbacks still needing a grace period to 2027 * the orphanage, where some other CPU will pick them up. 2028 * Some of the callbacks might have gone partway through a grace 2029 * period, but that is too bad. They get to start over because we 2030 * cannot assume that grace periods are synchronized across CPUs. 2031 * We don't bother updating the ->nxttail[] array yet, instead 2032 * we just reset the whole thing later on. 2033 */ 2034 if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) { 2035 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL]; 2036 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL]; 2037 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 2038 } 2039 2040 /* 2041 * Then move the ready-to-invoke callbacks to the orphanage, 2042 * where some other CPU will pick them up. These will not be 2043 * required to pass though another grace period: They are done. 2044 */ 2045 if (rdp->nxtlist != NULL) { 2046 *rsp->orphan_donetail = rdp->nxtlist; 2047 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL]; 2048 } 2049 2050 /* Finally, initialize the rcu_data structure's list to empty. */ 2051 init_callback_list(rdp); 2052 } 2053 2054 /* 2055 * Adopt the RCU callbacks from the specified rcu_state structure's 2056 * orphanage. The caller must hold the ->orphan_lock. 2057 */ 2058 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) 2059 { 2060 int i; 2061 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2062 2063 /* No-CBs CPUs are handled specially. */ 2064 if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) 2065 return; 2066 2067 /* Do the accounting first. */ 2068 rdp->qlen_lazy += rsp->qlen_lazy; 2069 rdp->qlen += rsp->qlen; 2070 rdp->n_cbs_adopted += rsp->qlen; 2071 if (rsp->qlen_lazy != rsp->qlen) 2072 rcu_idle_count_callbacks_posted(); 2073 rsp->qlen_lazy = 0; 2074 rsp->qlen = 0; 2075 2076 /* 2077 * We do not need a memory barrier here because the only way we 2078 * can get here if there is an rcu_barrier() in flight is if 2079 * we are the task doing the rcu_barrier(). 2080 */ 2081 2082 /* First adopt the ready-to-invoke callbacks. */ 2083 if (rsp->orphan_donelist != NULL) { 2084 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL]; 2085 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist; 2086 for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--) 2087 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL]) 2088 rdp->nxttail[i] = rsp->orphan_donetail; 2089 rsp->orphan_donelist = NULL; 2090 rsp->orphan_donetail = &rsp->orphan_donelist; 2091 } 2092 2093 /* And then adopt the callbacks that still need a grace period. */ 2094 if (rsp->orphan_nxtlist != NULL) { 2095 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist; 2096 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail; 2097 rsp->orphan_nxtlist = NULL; 2098 rsp->orphan_nxttail = &rsp->orphan_nxtlist; 2099 } 2100 } 2101 2102 /* 2103 * Trace the fact that this CPU is going offline. 2104 */ 2105 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) 2106 { 2107 RCU_TRACE(unsigned long mask); 2108 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda)); 2109 RCU_TRACE(struct rcu_node *rnp = rdp->mynode); 2110 2111 RCU_TRACE(mask = rdp->grpmask); 2112 trace_rcu_grace_period(rsp->name, 2113 rnp->gpnum + 1 - !!(rnp->qsmask & mask), 2114 TPS("cpuofl")); 2115 } 2116 2117 /* 2118 * The CPU has been completely removed, and some other CPU is reporting 2119 * this fact from process context. Do the remainder of the cleanup, 2120 * including orphaning the outgoing CPU's RCU callbacks, and also 2121 * adopting them. There can only be one CPU hotplug operation at a time, 2122 * so no other CPU can be attempting to update rcu_cpu_kthread_task. 2123 */ 2124 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2125 { 2126 unsigned long flags; 2127 unsigned long mask; 2128 int need_report = 0; 2129 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2130 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2131 2132 /* Adjust any no-longer-needed kthreads. */ 2133 rcu_boost_kthread_setaffinity(rnp, -1); 2134 2135 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ 2136 2137 /* Exclude any attempts to start a new grace period. */ 2138 mutex_lock(&rsp->onoff_mutex); 2139 raw_spin_lock_irqsave(&rsp->orphan_lock, flags); 2140 2141 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 2142 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 2143 rcu_adopt_orphan_cbs(rsp, flags); 2144 2145 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 2146 mask = rdp->grpmask; /* rnp->grplo is constant. */ 2147 do { 2148 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 2149 smp_mb__after_unlock_lock(); 2150 rnp->qsmaskinit &= ~mask; 2151 if (rnp->qsmaskinit != 0) { 2152 if (rnp != rdp->mynode) 2153 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2154 break; 2155 } 2156 if (rnp == rdp->mynode) 2157 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); 2158 else 2159 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2160 mask = rnp->grpmask; 2161 rnp = rnp->parent; 2162 } while (rnp != NULL); 2163 2164 /* 2165 * We still hold the leaf rcu_node structure lock here, and 2166 * irqs are still disabled. The reason for this subterfuge is 2167 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock 2168 * held leads to deadlock. 2169 */ 2170 raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */ 2171 rnp = rdp->mynode; 2172 if (need_report & RCU_OFL_TASKS_NORM_GP) 2173 rcu_report_unblock_qs_rnp(rnp, flags); 2174 else 2175 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2176 if (need_report & RCU_OFL_TASKS_EXP_GP) 2177 rcu_report_exp_rnp(rsp, rnp, true); 2178 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, 2179 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", 2180 cpu, rdp->qlen, rdp->nxtlist); 2181 init_callback_list(rdp); 2182 /* Disallow further callbacks on this CPU. */ 2183 rdp->nxttail[RCU_NEXT_TAIL] = NULL; 2184 mutex_unlock(&rsp->onoff_mutex); 2185 } 2186 2187 #else /* #ifdef CONFIG_HOTPLUG_CPU */ 2188 2189 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) 2190 { 2191 } 2192 2193 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) 2194 { 2195 } 2196 2197 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 2198 2199 /* 2200 * Invoke any RCU callbacks that have made it to the end of their grace 2201 * period. Thottle as specified by rdp->blimit. 2202 */ 2203 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) 2204 { 2205 unsigned long flags; 2206 struct rcu_head *next, *list, **tail; 2207 long bl, count, count_lazy; 2208 int i; 2209 2210 /* If no callbacks are ready, just return. */ 2211 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 2212 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); 2213 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), 2214 need_resched(), is_idle_task(current), 2215 rcu_is_callbacks_kthread()); 2216 return; 2217 } 2218 2219 /* 2220 * Extract the list of ready callbacks, disabling to prevent 2221 * races with call_rcu() from interrupt handlers. 2222 */ 2223 local_irq_save(flags); 2224 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2225 bl = rdp->blimit; 2226 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl); 2227 list = rdp->nxtlist; 2228 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; 2229 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 2230 tail = rdp->nxttail[RCU_DONE_TAIL]; 2231 for (i = RCU_NEXT_SIZE - 1; i >= 0; i--) 2232 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL]) 2233 rdp->nxttail[i] = &rdp->nxtlist; 2234 local_irq_restore(flags); 2235 2236 /* Invoke callbacks. */ 2237 count = count_lazy = 0; 2238 while (list) { 2239 next = list->next; 2240 prefetch(next); 2241 debug_rcu_head_unqueue(list); 2242 if (__rcu_reclaim(rsp->name, list)) 2243 count_lazy++; 2244 list = next; 2245 /* Stop only if limit reached and CPU has something to do. */ 2246 if (++count >= bl && 2247 (need_resched() || 2248 (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) 2249 break; 2250 } 2251 2252 local_irq_save(flags); 2253 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(), 2254 is_idle_task(current), 2255 rcu_is_callbacks_kthread()); 2256 2257 /* Update count, and requeue any remaining callbacks. */ 2258 if (list != NULL) { 2259 *tail = rdp->nxtlist; 2260 rdp->nxtlist = list; 2261 for (i = 0; i < RCU_NEXT_SIZE; i++) 2262 if (&rdp->nxtlist == rdp->nxttail[i]) 2263 rdp->nxttail[i] = tail; 2264 else 2265 break; 2266 } 2267 smp_mb(); /* List handling before counting for rcu_barrier(). */ 2268 rdp->qlen_lazy -= count_lazy; 2269 ACCESS_ONCE(rdp->qlen) -= count; 2270 rdp->n_cbs_invoked += count; 2271 2272 /* Reinstate batch limit if we have worked down the excess. */ 2273 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 2274 rdp->blimit = blimit; 2275 2276 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2277 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { 2278 rdp->qlen_last_fqs_check = 0; 2279 rdp->n_force_qs_snap = rsp->n_force_qs; 2280 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) 2281 rdp->qlen_last_fqs_check = rdp->qlen; 2282 WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0)); 2283 2284 local_irq_restore(flags); 2285 2286 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2287 if (cpu_has_callbacks_ready_to_invoke(rdp)) 2288 invoke_rcu_core(); 2289 } 2290 2291 /* 2292 * Check to see if this CPU is in a non-context-switch quiescent state 2293 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 2294 * Also schedule RCU core processing. 2295 * 2296 * This function must be called from hardirq context. It is normally 2297 * invoked from the scheduling-clock interrupt. If rcu_pending returns 2298 * false, there is no point in invoking rcu_check_callbacks(). 2299 */ 2300 void rcu_check_callbacks(int cpu, int user) 2301 { 2302 trace_rcu_utilization(TPS("Start scheduler-tick")); 2303 increment_cpu_stall_ticks(); 2304 if (user || rcu_is_cpu_rrupt_from_idle()) { 2305 2306 /* 2307 * Get here if this CPU took its interrupt from user 2308 * mode or from the idle loop, and if this is not a 2309 * nested interrupt. In this case, the CPU is in 2310 * a quiescent state, so note it. 2311 * 2312 * No memory barrier is required here because both 2313 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local 2314 * variables that other CPUs neither access nor modify, 2315 * at least not while the corresponding CPU is online. 2316 */ 2317 2318 rcu_sched_qs(cpu); 2319 rcu_bh_qs(cpu); 2320 2321 } else if (!in_softirq()) { 2322 2323 /* 2324 * Get here if this CPU did not take its interrupt from 2325 * softirq, in other words, if it is not interrupting 2326 * a rcu_bh read-side critical section. This is an _bh 2327 * critical section, so note it. 2328 */ 2329 2330 rcu_bh_qs(cpu); 2331 } 2332 rcu_preempt_check_callbacks(cpu); 2333 if (rcu_pending(cpu)) 2334 invoke_rcu_core(); 2335 trace_rcu_utilization(TPS("End scheduler-tick")); 2336 } 2337 2338 /* 2339 * Scan the leaf rcu_node structures, processing dyntick state for any that 2340 * have not yet encountered a quiescent state, using the function specified. 2341 * Also initiate boosting for any threads blocked on the root rcu_node. 2342 * 2343 * The caller must have suppressed start of new grace periods. 2344 */ 2345 static void force_qs_rnp(struct rcu_state *rsp, 2346 int (*f)(struct rcu_data *rsp, bool *isidle, 2347 unsigned long *maxj), 2348 bool *isidle, unsigned long *maxj) 2349 { 2350 unsigned long bit; 2351 int cpu; 2352 unsigned long flags; 2353 unsigned long mask; 2354 struct rcu_node *rnp; 2355 2356 rcu_for_each_leaf_node(rsp, rnp) { 2357 cond_resched(); 2358 mask = 0; 2359 raw_spin_lock_irqsave(&rnp->lock, flags); 2360 smp_mb__after_unlock_lock(); 2361 if (!rcu_gp_in_progress(rsp)) { 2362 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2363 return; 2364 } 2365 if (rnp->qsmask == 0) { 2366 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ 2367 continue; 2368 } 2369 cpu = rnp->grplo; 2370 bit = 1; 2371 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { 2372 if ((rnp->qsmask & bit) != 0) { 2373 if ((rnp->qsmaskinit & bit) != 0) 2374 *isidle = 0; 2375 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) 2376 mask |= bit; 2377 } 2378 } 2379 if (mask != 0) { 2380 2381 /* rcu_report_qs_rnp() releases rnp->lock. */ 2382 rcu_report_qs_rnp(mask, rsp, rnp, flags); 2383 continue; 2384 } 2385 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2386 } 2387 rnp = rcu_get_root(rsp); 2388 if (rnp->qsmask == 0) { 2389 raw_spin_lock_irqsave(&rnp->lock, flags); 2390 smp_mb__after_unlock_lock(); 2391 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ 2392 } 2393 } 2394 2395 /* 2396 * Force quiescent states on reluctant CPUs, and also detect which 2397 * CPUs are in dyntick-idle mode. 2398 */ 2399 static void force_quiescent_state(struct rcu_state *rsp) 2400 { 2401 unsigned long flags; 2402 bool ret; 2403 struct rcu_node *rnp; 2404 struct rcu_node *rnp_old = NULL; 2405 2406 /* Funnel through hierarchy to reduce memory contention. */ 2407 rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; 2408 for (; rnp != NULL; rnp = rnp->parent) { 2409 ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || 2410 !raw_spin_trylock(&rnp->fqslock); 2411 if (rnp_old != NULL) 2412 raw_spin_unlock(&rnp_old->fqslock); 2413 if (ret) { 2414 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2415 return; 2416 } 2417 rnp_old = rnp; 2418 } 2419 /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ 2420 2421 /* Reached the root of the rcu_node tree, acquire lock. */ 2422 raw_spin_lock_irqsave(&rnp_old->lock, flags); 2423 smp_mb__after_unlock_lock(); 2424 raw_spin_unlock(&rnp_old->fqslock); 2425 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { 2426 ACCESS_ONCE(rsp->n_force_qs_lh)++; 2427 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2428 return; /* Someone beat us to it. */ 2429 } 2430 ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS; 2431 raw_spin_unlock_irqrestore(&rnp_old->lock, flags); 2432 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ 2433 } 2434 2435 /* 2436 * This does the RCU core processing work for the specified rcu_state 2437 * and rcu_data structures. This may be called only from the CPU to 2438 * whom the rdp belongs. 2439 */ 2440 static void 2441 __rcu_process_callbacks(struct rcu_state *rsp) 2442 { 2443 unsigned long flags; 2444 bool needwake; 2445 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2446 2447 WARN_ON_ONCE(rdp->beenonline == 0); 2448 2449 /* Update RCU state based on any recent quiescent states. */ 2450 rcu_check_quiescent_state(rsp, rdp); 2451 2452 /* Does this CPU require a not-yet-started grace period? */ 2453 local_irq_save(flags); 2454 if (cpu_needs_another_gp(rsp, rdp)) { 2455 raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */ 2456 needwake = rcu_start_gp(rsp); 2457 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2458 if (needwake) 2459 rcu_gp_kthread_wake(rsp); 2460 } else { 2461 local_irq_restore(flags); 2462 } 2463 2464 /* If there are callbacks ready, invoke them. */ 2465 if (cpu_has_callbacks_ready_to_invoke(rdp)) 2466 invoke_rcu_callbacks(rsp, rdp); 2467 2468 /* Do any needed deferred wakeups of rcuo kthreads. */ 2469 do_nocb_deferred_wakeup(rdp); 2470 } 2471 2472 /* 2473 * Do RCU core processing for the current CPU. 2474 */ 2475 static void rcu_process_callbacks(struct softirq_action *unused) 2476 { 2477 struct rcu_state *rsp; 2478 2479 if (cpu_is_offline(smp_processor_id())) 2480 return; 2481 trace_rcu_utilization(TPS("Start RCU core")); 2482 for_each_rcu_flavor(rsp) 2483 __rcu_process_callbacks(rsp); 2484 trace_rcu_utilization(TPS("End RCU core")); 2485 } 2486 2487 /* 2488 * Schedule RCU callback invocation. If the specified type of RCU 2489 * does not support RCU priority boosting, just do a direct call, 2490 * otherwise wake up the per-CPU kernel kthread. Note that because we 2491 * are running on the current CPU with interrupts disabled, the 2492 * rcu_cpu_kthread_task cannot disappear out from under us. 2493 */ 2494 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 2495 { 2496 if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) 2497 return; 2498 if (likely(!rsp->boost)) { 2499 rcu_do_batch(rsp, rdp); 2500 return; 2501 } 2502 invoke_rcu_callbacks_kthread(); 2503 } 2504 2505 static void invoke_rcu_core(void) 2506 { 2507 if (cpu_online(smp_processor_id())) 2508 raise_softirq(RCU_SOFTIRQ); 2509 } 2510 2511 /* 2512 * Handle any core-RCU processing required by a call_rcu() invocation. 2513 */ 2514 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, 2515 struct rcu_head *head, unsigned long flags) 2516 { 2517 bool needwake; 2518 2519 /* 2520 * If called from an extended quiescent state, invoke the RCU 2521 * core in order to force a re-evaluation of RCU's idleness. 2522 */ 2523 if (!rcu_is_watching() && cpu_online(smp_processor_id())) 2524 invoke_rcu_core(); 2525 2526 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 2527 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 2528 return; 2529 2530 /* 2531 * Force the grace period if too many callbacks or too long waiting. 2532 * Enforce hysteresis, and don't invoke force_quiescent_state() 2533 * if some other CPU has recently done so. Also, don't bother 2534 * invoking force_quiescent_state() if the newly enqueued callback 2535 * is the only one waiting for a grace period to complete. 2536 */ 2537 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { 2538 2539 /* Are we ignoring a completed grace period? */ 2540 note_gp_changes(rsp, rdp); 2541 2542 /* Start a new grace period if one not already started. */ 2543 if (!rcu_gp_in_progress(rsp)) { 2544 struct rcu_node *rnp_root = rcu_get_root(rsp); 2545 2546 raw_spin_lock(&rnp_root->lock); 2547 smp_mb__after_unlock_lock(); 2548 needwake = rcu_start_gp(rsp); 2549 raw_spin_unlock(&rnp_root->lock); 2550 if (needwake) 2551 rcu_gp_kthread_wake(rsp); 2552 } else { 2553 /* Give the grace period a kick. */ 2554 rdp->blimit = LONG_MAX; 2555 if (rsp->n_force_qs == rdp->n_force_qs_snap && 2556 *rdp->nxttail[RCU_DONE_TAIL] != head) 2557 force_quiescent_state(rsp); 2558 rdp->n_force_qs_snap = rsp->n_force_qs; 2559 rdp->qlen_last_fqs_check = rdp->qlen; 2560 } 2561 } 2562 } 2563 2564 /* 2565 * RCU callback function to leak a callback. 2566 */ 2567 static void rcu_leak_callback(struct rcu_head *rhp) 2568 { 2569 } 2570 2571 /* 2572 * Helper function for call_rcu() and friends. The cpu argument will 2573 * normally be -1, indicating "currently running CPU". It may specify 2574 * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier() 2575 * is expected to specify a CPU. 2576 */ 2577 static void 2578 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 2579 struct rcu_state *rsp, int cpu, bool lazy) 2580 { 2581 unsigned long flags; 2582 struct rcu_data *rdp; 2583 2584 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */ 2585 if (debug_rcu_head_queue(head)) { 2586 /* Probable double call_rcu(), so leak the callback. */ 2587 ACCESS_ONCE(head->func) = rcu_leak_callback; 2588 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); 2589 return; 2590 } 2591 head->func = func; 2592 head->next = NULL; 2593 2594 /* 2595 * Opportunistically note grace-period endings and beginnings. 2596 * Note that we might see a beginning right after we see an 2597 * end, but never vice versa, since this CPU has to pass through 2598 * a quiescent state betweentimes. 2599 */ 2600 local_irq_save(flags); 2601 rdp = this_cpu_ptr(rsp->rda); 2602 2603 /* Add the callback to our list. */ 2604 if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) { 2605 int offline; 2606 2607 if (cpu != -1) 2608 rdp = per_cpu_ptr(rsp->rda, cpu); 2609 offline = !__call_rcu_nocb(rdp, head, lazy, flags); 2610 WARN_ON_ONCE(offline); 2611 /* _call_rcu() is illegal on offline CPU; leak the callback. */ 2612 local_irq_restore(flags); 2613 return; 2614 } 2615 ACCESS_ONCE(rdp->qlen)++; 2616 if (lazy) 2617 rdp->qlen_lazy++; 2618 else 2619 rcu_idle_count_callbacks_posted(); 2620 smp_mb(); /* Count before adding callback for rcu_barrier(). */ 2621 *rdp->nxttail[RCU_NEXT_TAIL] = head; 2622 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 2623 2624 if (__is_kfree_rcu_offset((unsigned long)func)) 2625 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, 2626 rdp->qlen_lazy, rdp->qlen); 2627 else 2628 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen); 2629 2630 /* Go handle any RCU core processing required. */ 2631 __call_rcu_core(rsp, rdp, head, flags); 2632 local_irq_restore(flags); 2633 } 2634 2635 /* 2636 * Queue an RCU-sched callback for invocation after a grace period. 2637 */ 2638 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 2639 { 2640 __call_rcu(head, func, &rcu_sched_state, -1, 0); 2641 } 2642 EXPORT_SYMBOL_GPL(call_rcu_sched); 2643 2644 /* 2645 * Queue an RCU callback for invocation after a quicker grace period. 2646 */ 2647 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 2648 { 2649 __call_rcu(head, func, &rcu_bh_state, -1, 0); 2650 } 2651 EXPORT_SYMBOL_GPL(call_rcu_bh); 2652 2653 /* 2654 * Queue an RCU callback for lazy invocation after a grace period. 2655 * This will likely be later named something like "call_rcu_lazy()", 2656 * but this change will require some way of tagging the lazy RCU 2657 * callbacks in the list of pending callbacks. Until then, this 2658 * function may only be called from __kfree_rcu(). 2659 */ 2660 void kfree_call_rcu(struct rcu_head *head, 2661 void (*func)(struct rcu_head *rcu)) 2662 { 2663 __call_rcu(head, func, rcu_state_p, -1, 1); 2664 } 2665 EXPORT_SYMBOL_GPL(kfree_call_rcu); 2666 2667 /* 2668 * Because a context switch is a grace period for RCU-sched and RCU-bh, 2669 * any blocking grace-period wait automatically implies a grace period 2670 * if there is only one CPU online at any point time during execution 2671 * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to 2672 * occasionally incorrectly indicate that there are multiple CPUs online 2673 * when there was in fact only one the whole time, as this just adds 2674 * some overhead: RCU still operates correctly. 2675 */ 2676 static inline int rcu_blocking_is_gp(void) 2677 { 2678 int ret; 2679 2680 might_sleep(); /* Check for RCU read-side critical section. */ 2681 preempt_disable(); 2682 ret = num_online_cpus() <= 1; 2683 preempt_enable(); 2684 return ret; 2685 } 2686 2687 /** 2688 * synchronize_sched - wait until an rcu-sched grace period has elapsed. 2689 * 2690 * Control will return to the caller some time after a full rcu-sched 2691 * grace period has elapsed, in other words after all currently executing 2692 * rcu-sched read-side critical sections have completed. These read-side 2693 * critical sections are delimited by rcu_read_lock_sched() and 2694 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), 2695 * local_irq_disable(), and so on may be used in place of 2696 * rcu_read_lock_sched(). 2697 * 2698 * This means that all preempt_disable code sequences, including NMI and 2699 * non-threaded hardware-interrupt handlers, in progress on entry will 2700 * have completed before this primitive returns. However, this does not 2701 * guarantee that softirq handlers will have completed, since in some 2702 * kernels, these handlers can run in process context, and can block. 2703 * 2704 * Note that this guarantee implies further memory-ordering guarantees. 2705 * On systems with more than one CPU, when synchronize_sched() returns, 2706 * each CPU is guaranteed to have executed a full memory barrier since the 2707 * end of its last RCU-sched read-side critical section whose beginning 2708 * preceded the call to synchronize_sched(). In addition, each CPU having 2709 * an RCU read-side critical section that extends beyond the return from 2710 * synchronize_sched() is guaranteed to have executed a full memory barrier 2711 * after the beginning of synchronize_sched() and before the beginning of 2712 * that RCU read-side critical section. Note that these guarantees include 2713 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 2714 * that are executing in the kernel. 2715 * 2716 * Furthermore, if CPU A invoked synchronize_sched(), which returned 2717 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 2718 * to have executed a full memory barrier during the execution of 2719 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but 2720 * again only if the system has more than one CPU). 2721 * 2722 * This primitive provides the guarantees made by the (now removed) 2723 * synchronize_kernel() API. In contrast, synchronize_rcu() only 2724 * guarantees that rcu_read_lock() sections will have completed. 2725 * In "classic RCU", these two guarantees happen to be one and 2726 * the same, but can differ in realtime RCU implementations. 2727 */ 2728 void synchronize_sched(void) 2729 { 2730 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && 2731 !lock_is_held(&rcu_lock_map) && 2732 !lock_is_held(&rcu_sched_lock_map), 2733 "Illegal synchronize_sched() in RCU-sched read-side critical section"); 2734 if (rcu_blocking_is_gp()) 2735 return; 2736 if (rcu_expedited) 2737 synchronize_sched_expedited(); 2738 else 2739 wait_rcu_gp(call_rcu_sched); 2740 } 2741 EXPORT_SYMBOL_GPL(synchronize_sched); 2742 2743 /** 2744 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. 2745 * 2746 * Control will return to the caller some time after a full rcu_bh grace 2747 * period has elapsed, in other words after all currently executing rcu_bh 2748 * read-side critical sections have completed. RCU read-side critical 2749 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), 2750 * and may be nested. 2751 * 2752 * See the description of synchronize_sched() for more detailed information 2753 * on memory ordering guarantees. 2754 */ 2755 void synchronize_rcu_bh(void) 2756 { 2757 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && 2758 !lock_is_held(&rcu_lock_map) && 2759 !lock_is_held(&rcu_sched_lock_map), 2760 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section"); 2761 if (rcu_blocking_is_gp()) 2762 return; 2763 if (rcu_expedited) 2764 synchronize_rcu_bh_expedited(); 2765 else 2766 wait_rcu_gp(call_rcu_bh); 2767 } 2768 EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 2769 2770 /** 2771 * get_state_synchronize_rcu - Snapshot current RCU state 2772 * 2773 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 2774 * to determine whether or not a full grace period has elapsed in the 2775 * meantime. 2776 */ 2777 unsigned long get_state_synchronize_rcu(void) 2778 { 2779 /* 2780 * Any prior manipulation of RCU-protected data must happen 2781 * before the load from ->gpnum. 2782 */ 2783 smp_mb(); /* ^^^ */ 2784 2785 /* 2786 * Make sure this load happens before the purportedly 2787 * time-consuming work between get_state_synchronize_rcu() 2788 * and cond_synchronize_rcu(). 2789 */ 2790 return smp_load_acquire(&rcu_state_p->gpnum); 2791 } 2792 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 2793 2794 /** 2795 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 2796 * 2797 * @oldstate: return value from earlier call to get_state_synchronize_rcu() 2798 * 2799 * If a full RCU grace period has elapsed since the earlier call to 2800 * get_state_synchronize_rcu(), just return. Otherwise, invoke 2801 * synchronize_rcu() to wait for a full grace period. 2802 * 2803 * Yes, this function does not take counter wrap into account. But 2804 * counter wrap is harmless. If the counter wraps, we have waited for 2805 * more than 2 billion grace periods (and way more on a 64-bit system!), 2806 * so waiting for one additional grace period should be just fine. 2807 */ 2808 void cond_synchronize_rcu(unsigned long oldstate) 2809 { 2810 unsigned long newstate; 2811 2812 /* 2813 * Ensure that this load happens before any RCU-destructive 2814 * actions the caller might carry out after we return. 2815 */ 2816 newstate = smp_load_acquire(&rcu_state_p->completed); 2817 if (ULONG_CMP_GE(oldstate, newstate)) 2818 synchronize_rcu(); 2819 } 2820 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 2821 2822 static int synchronize_sched_expedited_cpu_stop(void *data) 2823 { 2824 /* 2825 * There must be a full memory barrier on each affected CPU 2826 * between the time that try_stop_cpus() is called and the 2827 * time that it returns. 2828 * 2829 * In the current initial implementation of cpu_stop, the 2830 * above condition is already met when the control reaches 2831 * this point and the following smp_mb() is not strictly 2832 * necessary. Do smp_mb() anyway for documentation and 2833 * robustness against future implementation changes. 2834 */ 2835 smp_mb(); /* See above comment block. */ 2836 return 0; 2837 } 2838 2839 /** 2840 * synchronize_sched_expedited - Brute-force RCU-sched grace period 2841 * 2842 * Wait for an RCU-sched grace period to elapse, but use a "big hammer" 2843 * approach to force the grace period to end quickly. This consumes 2844 * significant time on all CPUs and is unfriendly to real-time workloads, 2845 * so is thus not recommended for any sort of common-case code. In fact, 2846 * if you are using synchronize_sched_expedited() in a loop, please 2847 * restructure your code to batch your updates, and then use a single 2848 * synchronize_sched() instead. 2849 * 2850 * Note that it is illegal to call this function while holding any lock 2851 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 2852 * to call this function from a CPU-hotplug notifier. Failing to observe 2853 * these restriction will result in deadlock. 2854 * 2855 * This implementation can be thought of as an application of ticket 2856 * locking to RCU, with sync_sched_expedited_started and 2857 * sync_sched_expedited_done taking on the roles of the halves 2858 * of the ticket-lock word. Each task atomically increments 2859 * sync_sched_expedited_started upon entry, snapshotting the old value, 2860 * then attempts to stop all the CPUs. If this succeeds, then each 2861 * CPU will have executed a context switch, resulting in an RCU-sched 2862 * grace period. We are then done, so we use atomic_cmpxchg() to 2863 * update sync_sched_expedited_done to match our snapshot -- but 2864 * only if someone else has not already advanced past our snapshot. 2865 * 2866 * On the other hand, if try_stop_cpus() fails, we check the value 2867 * of sync_sched_expedited_done. If it has advanced past our 2868 * initial snapshot, then someone else must have forced a grace period 2869 * some time after we took our snapshot. In this case, our work is 2870 * done for us, and we can simply return. Otherwise, we try again, 2871 * but keep our initial snapshot for purposes of checking for someone 2872 * doing our work for us. 2873 * 2874 * If we fail too many times in a row, we fall back to synchronize_sched(). 2875 */ 2876 void synchronize_sched_expedited(void) 2877 { 2878 long firstsnap, s, snap; 2879 int trycount = 0; 2880 struct rcu_state *rsp = &rcu_sched_state; 2881 2882 /* 2883 * If we are in danger of counter wrap, just do synchronize_sched(). 2884 * By allowing sync_sched_expedited_started to advance no more than 2885 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring 2886 * that more than 3.5 billion CPUs would be required to force a 2887 * counter wrap on a 32-bit system. Quite a few more CPUs would of 2888 * course be required on a 64-bit system. 2889 */ 2890 if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start), 2891 (ulong)atomic_long_read(&rsp->expedited_done) + 2892 ULONG_MAX / 8)) { 2893 synchronize_sched(); 2894 atomic_long_inc(&rsp->expedited_wrap); 2895 return; 2896 } 2897 2898 /* 2899 * Take a ticket. Note that atomic_inc_return() implies a 2900 * full memory barrier. 2901 */ 2902 snap = atomic_long_inc_return(&rsp->expedited_start); 2903 firstsnap = snap; 2904 get_online_cpus(); 2905 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); 2906 2907 /* 2908 * Each pass through the following loop attempts to force a 2909 * context switch on each CPU. 2910 */ 2911 while (try_stop_cpus(cpu_online_mask, 2912 synchronize_sched_expedited_cpu_stop, 2913 NULL) == -EAGAIN) { 2914 put_online_cpus(); 2915 atomic_long_inc(&rsp->expedited_tryfail); 2916 2917 /* Check to see if someone else did our work for us. */ 2918 s = atomic_long_read(&rsp->expedited_done); 2919 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2920 /* ensure test happens before caller kfree */ 2921 smp_mb__before_atomic(); /* ^^^ */ 2922 atomic_long_inc(&rsp->expedited_workdone1); 2923 return; 2924 } 2925 2926 /* No joy, try again later. Or just synchronize_sched(). */ 2927 if (trycount++ < 10) { 2928 udelay(trycount * num_online_cpus()); 2929 } else { 2930 wait_rcu_gp(call_rcu_sched); 2931 atomic_long_inc(&rsp->expedited_normal); 2932 return; 2933 } 2934 2935 /* Recheck to see if someone else did our work for us. */ 2936 s = atomic_long_read(&rsp->expedited_done); 2937 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2938 /* ensure test happens before caller kfree */ 2939 smp_mb__before_atomic(); /* ^^^ */ 2940 atomic_long_inc(&rsp->expedited_workdone2); 2941 return; 2942 } 2943 2944 /* 2945 * Refetching sync_sched_expedited_started allows later 2946 * callers to piggyback on our grace period. We retry 2947 * after they started, so our grace period works for them, 2948 * and they started after our first try, so their grace 2949 * period works for us. 2950 */ 2951 get_online_cpus(); 2952 snap = atomic_long_read(&rsp->expedited_start); 2953 smp_mb(); /* ensure read is before try_stop_cpus(). */ 2954 } 2955 atomic_long_inc(&rsp->expedited_stoppedcpus); 2956 2957 /* 2958 * Everyone up to our most recent fetch is covered by our grace 2959 * period. Update the counter, but only if our work is still 2960 * relevant -- which it won't be if someone who started later 2961 * than we did already did their update. 2962 */ 2963 do { 2964 atomic_long_inc(&rsp->expedited_done_tries); 2965 s = atomic_long_read(&rsp->expedited_done); 2966 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { 2967 /* ensure test happens before caller kfree */ 2968 smp_mb__before_atomic(); /* ^^^ */ 2969 atomic_long_inc(&rsp->expedited_done_lost); 2970 break; 2971 } 2972 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); 2973 atomic_long_inc(&rsp->expedited_done_exit); 2974 2975 put_online_cpus(); 2976 } 2977 EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 2978 2979 /* 2980 * Check to see if there is any immediate RCU-related work to be done 2981 * by the current CPU, for the specified type of RCU, returning 1 if so. 2982 * The checks are in order of increasing expense: checks that can be 2983 * carried out against CPU-local state are performed first. However, 2984 * we must check for CPU stalls first, else we might not get a chance. 2985 */ 2986 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 2987 { 2988 struct rcu_node *rnp = rdp->mynode; 2989 2990 rdp->n_rcu_pending++; 2991 2992 /* Check for CPU stalls, if enabled. */ 2993 check_cpu_stall(rsp, rdp); 2994 2995 /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */ 2996 if (rcu_nohz_full_cpu(rsp)) 2997 return 0; 2998 2999 /* Is the RCU core waiting for a quiescent state from this CPU? */ 3000 if (rcu_scheduler_fully_active && 3001 rdp->qs_pending && !rdp->passed_quiesce) { 3002 rdp->n_rp_qs_pending++; 3003 } else if (rdp->qs_pending && rdp->passed_quiesce) { 3004 rdp->n_rp_report_qs++; 3005 return 1; 3006 } 3007 3008 /* Does this CPU have callbacks ready to invoke? */ 3009 if (cpu_has_callbacks_ready_to_invoke(rdp)) { 3010 rdp->n_rp_cb_ready++; 3011 return 1; 3012 } 3013 3014 /* Has RCU gone idle with this CPU needing another grace period? */ 3015 if (cpu_needs_another_gp(rsp, rdp)) { 3016 rdp->n_rp_cpu_needs_gp++; 3017 return 1; 3018 } 3019 3020 /* Has another RCU grace period completed? */ 3021 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ 3022 rdp->n_rp_gp_completed++; 3023 return 1; 3024 } 3025 3026 /* Has a new RCU grace period started? */ 3027 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ 3028 rdp->n_rp_gp_started++; 3029 return 1; 3030 } 3031 3032 /* Does this CPU need a deferred NOCB wakeup? */ 3033 if (rcu_nocb_need_deferred_wakeup(rdp)) { 3034 rdp->n_rp_nocb_defer_wakeup++; 3035 return 1; 3036 } 3037 3038 /* nothing to do */ 3039 rdp->n_rp_need_nothing++; 3040 return 0; 3041 } 3042 3043 /* 3044 * Check to see if there is any immediate RCU-related work to be done 3045 * by the current CPU, returning 1 if so. This function is part of the 3046 * RCU implementation; it is -not- an exported member of the RCU API. 3047 */ 3048 static int rcu_pending(int cpu) 3049 { 3050 struct rcu_state *rsp; 3051 3052 for_each_rcu_flavor(rsp) 3053 if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu))) 3054 return 1; 3055 return 0; 3056 } 3057 3058 /* 3059 * Return true if the specified CPU has any callback. If all_lazy is 3060 * non-NULL, store an indication of whether all callbacks are lazy. 3061 * (If there are no callbacks, all of them are deemed to be lazy.) 3062 */ 3063 static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy) 3064 { 3065 bool al = true; 3066 bool hc = false; 3067 struct rcu_data *rdp; 3068 struct rcu_state *rsp; 3069 3070 for_each_rcu_flavor(rsp) { 3071 rdp = per_cpu_ptr(rsp->rda, cpu); 3072 if (!rdp->nxtlist) 3073 continue; 3074 hc = true; 3075 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) { 3076 al = false; 3077 break; 3078 } 3079 } 3080 if (all_lazy) 3081 *all_lazy = al; 3082 return hc; 3083 } 3084 3085 /* 3086 * Helper function for _rcu_barrier() tracing. If tracing is disabled, 3087 * the compiler is expected to optimize this away. 3088 */ 3089 static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s, 3090 int cpu, unsigned long done) 3091 { 3092 trace_rcu_barrier(rsp->name, s, cpu, 3093 atomic_read(&rsp->barrier_cpu_count), done); 3094 } 3095 3096 /* 3097 * RCU callback function for _rcu_barrier(). If we are last, wake 3098 * up the task executing _rcu_barrier(). 3099 */ 3100 static void rcu_barrier_callback(struct rcu_head *rhp) 3101 { 3102 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); 3103 struct rcu_state *rsp = rdp->rsp; 3104 3105 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) { 3106 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done); 3107 complete(&rsp->barrier_completion); 3108 } else { 3109 _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done); 3110 } 3111 } 3112 3113 /* 3114 * Called with preemption disabled, and from cross-cpu IRQ context. 3115 */ 3116 static void rcu_barrier_func(void *type) 3117 { 3118 struct rcu_state *rsp = type; 3119 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 3120 3121 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done); 3122 atomic_inc(&rsp->barrier_cpu_count); 3123 rsp->call(&rdp->barrier_head, rcu_barrier_callback); 3124 } 3125 3126 /* 3127 * Orchestrate the specified type of RCU barrier, waiting for all 3128 * RCU callbacks of the specified type to complete. 3129 */ 3130 static void _rcu_barrier(struct rcu_state *rsp) 3131 { 3132 int cpu; 3133 struct rcu_data *rdp; 3134 unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); 3135 unsigned long snap_done; 3136 3137 _rcu_barrier_trace(rsp, "Begin", -1, snap); 3138 3139 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 3140 mutex_lock(&rsp->barrier_mutex); 3141 3142 /* 3143 * Ensure that all prior references, including to ->n_barrier_done, 3144 * are ordered before the _rcu_barrier() machinery. 3145 */ 3146 smp_mb(); /* See above block comment. */ 3147 3148 /* 3149 * Recheck ->n_barrier_done to see if others did our work for us. 3150 * This means checking ->n_barrier_done for an even-to-odd-to-even 3151 * transition. The "if" expression below therefore rounds the old 3152 * value up to the next even number and adds two before comparing. 3153 */ 3154 snap_done = rsp->n_barrier_done; 3155 _rcu_barrier_trace(rsp, "Check", -1, snap_done); 3156 3157 /* 3158 * If the value in snap is odd, we needed to wait for the current 3159 * rcu_barrier() to complete, then wait for the next one, in other 3160 * words, we need the value of snap_done to be three larger than 3161 * the value of snap. On the other hand, if the value in snap is 3162 * even, we only had to wait for the next rcu_barrier() to complete, 3163 * in other words, we need the value of snap_done to be only two 3164 * greater than the value of snap. The "(snap + 3) & ~0x1" computes 3165 * this for us (thank you, Linus!). 3166 */ 3167 if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) { 3168 _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done); 3169 smp_mb(); /* caller's subsequent code after above check. */ 3170 mutex_unlock(&rsp->barrier_mutex); 3171 return; 3172 } 3173 3174 /* 3175 * Increment ->n_barrier_done to avoid duplicate work. Use 3176 * ACCESS_ONCE() to prevent the compiler from speculating 3177 * the increment to precede the early-exit check. 3178 */ 3179 ACCESS_ONCE(rsp->n_barrier_done)++; 3180 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); 3181 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); 3182 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ 3183 3184 /* 3185 * Initialize the count to one rather than to zero in order to 3186 * avoid a too-soon return to zero in case of a short grace period 3187 * (or preemption of this task). Exclude CPU-hotplug operations 3188 * to ensure that no offline CPU has callbacks queued. 3189 */ 3190 init_completion(&rsp->barrier_completion); 3191 atomic_set(&rsp->barrier_cpu_count, 1); 3192 get_online_cpus(); 3193 3194 /* 3195 * Force each CPU with callbacks to register a new callback. 3196 * When that callback is invoked, we will know that all of the 3197 * corresponding CPU's preceding callbacks have been invoked. 3198 */ 3199 for_each_possible_cpu(cpu) { 3200 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) 3201 continue; 3202 rdp = per_cpu_ptr(rsp->rda, cpu); 3203 if (rcu_is_nocb_cpu(cpu)) { 3204 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 3205 rsp->n_barrier_done); 3206 atomic_inc(&rsp->barrier_cpu_count); 3207 __call_rcu(&rdp->barrier_head, rcu_barrier_callback, 3208 rsp, cpu, 0); 3209 } else if (ACCESS_ONCE(rdp->qlen)) { 3210 _rcu_barrier_trace(rsp, "OnlineQ", cpu, 3211 rsp->n_barrier_done); 3212 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); 3213 } else { 3214 _rcu_barrier_trace(rsp, "OnlineNQ", cpu, 3215 rsp->n_barrier_done); 3216 } 3217 } 3218 put_online_cpus(); 3219 3220 /* 3221 * Now that we have an rcu_barrier_callback() callback on each 3222 * CPU, and thus each counted, remove the initial count. 3223 */ 3224 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) 3225 complete(&rsp->barrier_completion); 3226 3227 /* Increment ->n_barrier_done to prevent duplicate work. */ 3228 smp_mb(); /* Keep increment after above mechanism. */ 3229 ACCESS_ONCE(rsp->n_barrier_done)++; 3230 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); 3231 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); 3232 smp_mb(); /* Keep increment before caller's subsequent code. */ 3233 3234 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 3235 wait_for_completion(&rsp->barrier_completion); 3236 3237 /* Other rcu_barrier() invocations can now safely proceed. */ 3238 mutex_unlock(&rsp->barrier_mutex); 3239 } 3240 3241 /** 3242 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. 3243 */ 3244 void rcu_barrier_bh(void) 3245 { 3246 _rcu_barrier(&rcu_bh_state); 3247 } 3248 EXPORT_SYMBOL_GPL(rcu_barrier_bh); 3249 3250 /** 3251 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. 3252 */ 3253 void rcu_barrier_sched(void) 3254 { 3255 _rcu_barrier(&rcu_sched_state); 3256 } 3257 EXPORT_SYMBOL_GPL(rcu_barrier_sched); 3258 3259 /* 3260 * Do boot-time initialization of a CPU's per-CPU RCU data. 3261 */ 3262 static void __init 3263 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 3264 { 3265 unsigned long flags; 3266 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3267 struct rcu_node *rnp = rcu_get_root(rsp); 3268 3269 /* Set up local state, ensuring consistent view of global state. */ 3270 raw_spin_lock_irqsave(&rnp->lock, flags); 3271 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 3272 init_callback_list(rdp); 3273 rdp->qlen_lazy = 0; 3274 ACCESS_ONCE(rdp->qlen) = 0; 3275 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3276 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 3277 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 3278 rdp->cpu = cpu; 3279 rdp->rsp = rsp; 3280 rcu_boot_init_nocb_percpu_data(rdp); 3281 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3282 } 3283 3284 /* 3285 * Initialize a CPU's per-CPU RCU data. Note that only one online or 3286 * offline event can be happening at a given time. Note also that we 3287 * can accept some slop in the rsp->completed access due to the fact 3288 * that this CPU cannot possibly have any RCU callbacks in flight yet. 3289 */ 3290 static void 3291 rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 3292 { 3293 unsigned long flags; 3294 unsigned long mask; 3295 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 3296 struct rcu_node *rnp = rcu_get_root(rsp); 3297 3298 /* Exclude new grace periods. */ 3299 mutex_lock(&rsp->onoff_mutex); 3300 3301 /* Set up local state, ensuring consistent view of global state. */ 3302 raw_spin_lock_irqsave(&rnp->lock, flags); 3303 rdp->beenonline = 1; /* We have now been online. */ 3304 rdp->qlen_last_fqs_check = 0; 3305 rdp->n_force_qs_snap = rsp->n_force_qs; 3306 rdp->blimit = blimit; 3307 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ 3308 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 3309 rcu_sysidle_init_percpu_data(rdp->dynticks); 3310 atomic_set(&rdp->dynticks->dynticks, 3311 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 3312 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 3313 3314 /* Add CPU to rcu_node bitmasks. */ 3315 rnp = rdp->mynode; 3316 mask = rdp->grpmask; 3317 do { 3318 /* Exclude any attempts to start a new GP on small systems. */ 3319 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 3320 rnp->qsmaskinit |= mask; 3321 mask = rnp->grpmask; 3322 if (rnp == rdp->mynode) { 3323 /* 3324 * If there is a grace period in progress, we will 3325 * set up to wait for it next time we run the 3326 * RCU core code. 3327 */ 3328 rdp->gpnum = rnp->completed; 3329 rdp->completed = rnp->completed; 3330 rdp->passed_quiesce = 0; 3331 rdp->qs_pending = 0; 3332 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 3333 } 3334 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ 3335 rnp = rnp->parent; 3336 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 3337 local_irq_restore(flags); 3338 3339 mutex_unlock(&rsp->onoff_mutex); 3340 } 3341 3342 static void rcu_prepare_cpu(int cpu) 3343 { 3344 struct rcu_state *rsp; 3345 3346 for_each_rcu_flavor(rsp) 3347 rcu_init_percpu_data(cpu, rsp); 3348 } 3349 3350 /* 3351 * Handle CPU online/offline notification events. 3352 */ 3353 static int rcu_cpu_notify(struct notifier_block *self, 3354 unsigned long action, void *hcpu) 3355 { 3356 long cpu = (long)hcpu; 3357 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); 3358 struct rcu_node *rnp = rdp->mynode; 3359 struct rcu_state *rsp; 3360 3361 trace_rcu_utilization(TPS("Start CPU hotplug")); 3362 switch (action) { 3363 case CPU_UP_PREPARE: 3364 case CPU_UP_PREPARE_FROZEN: 3365 rcu_prepare_cpu(cpu); 3366 rcu_prepare_kthreads(cpu); 3367 break; 3368 case CPU_ONLINE: 3369 case CPU_DOWN_FAILED: 3370 rcu_boost_kthread_setaffinity(rnp, -1); 3371 break; 3372 case CPU_DOWN_PREPARE: 3373 rcu_boost_kthread_setaffinity(rnp, cpu); 3374 break; 3375 case CPU_DYING: 3376 case CPU_DYING_FROZEN: 3377 for_each_rcu_flavor(rsp) 3378 rcu_cleanup_dying_cpu(rsp); 3379 break; 3380 case CPU_DEAD: 3381 case CPU_DEAD_FROZEN: 3382 case CPU_UP_CANCELED: 3383 case CPU_UP_CANCELED_FROZEN: 3384 for_each_rcu_flavor(rsp) 3385 rcu_cleanup_dead_cpu(cpu, rsp); 3386 break; 3387 default: 3388 break; 3389 } 3390 trace_rcu_utilization(TPS("End CPU hotplug")); 3391 return NOTIFY_OK; 3392 } 3393 3394 static int rcu_pm_notify(struct notifier_block *self, 3395 unsigned long action, void *hcpu) 3396 { 3397 switch (action) { 3398 case PM_HIBERNATION_PREPARE: 3399 case PM_SUSPEND_PREPARE: 3400 if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ 3401 rcu_expedited = 1; 3402 break; 3403 case PM_POST_HIBERNATION: 3404 case PM_POST_SUSPEND: 3405 rcu_expedited = 0; 3406 break; 3407 default: 3408 break; 3409 } 3410 return NOTIFY_OK; 3411 } 3412 3413 /* 3414 * Spawn the kthread that handles this RCU flavor's grace periods. 3415 */ 3416 static int __init rcu_spawn_gp_kthread(void) 3417 { 3418 unsigned long flags; 3419 struct rcu_node *rnp; 3420 struct rcu_state *rsp; 3421 struct task_struct *t; 3422 3423 for_each_rcu_flavor(rsp) { 3424 t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); 3425 BUG_ON(IS_ERR(t)); 3426 rnp = rcu_get_root(rsp); 3427 raw_spin_lock_irqsave(&rnp->lock, flags); 3428 rsp->gp_kthread = t; 3429 raw_spin_unlock_irqrestore(&rnp->lock, flags); 3430 rcu_spawn_nocb_kthreads(rsp); 3431 } 3432 return 0; 3433 } 3434 early_initcall(rcu_spawn_gp_kthread); 3435 3436 /* 3437 * This function is invoked towards the end of the scheduler's initialization 3438 * process. Before this is called, the idle task might contain 3439 * RCU read-side critical sections (during which time, this idle 3440 * task is booting the system). After this function is called, the 3441 * idle tasks are prohibited from containing RCU read-side critical 3442 * sections. This function also enables RCU lockdep checking. 3443 */ 3444 void rcu_scheduler_starting(void) 3445 { 3446 WARN_ON(num_online_cpus() != 1); 3447 WARN_ON(nr_context_switches() > 0); 3448 rcu_scheduler_active = 1; 3449 } 3450 3451 /* 3452 * Compute the per-level fanout, either using the exact fanout specified 3453 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. 3454 */ 3455 #ifdef CONFIG_RCU_FANOUT_EXACT 3456 static void __init rcu_init_levelspread(struct rcu_state *rsp) 3457 { 3458 int i; 3459 3460 rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; 3461 for (i = rcu_num_lvls - 2; i >= 0; i--) 3462 rsp->levelspread[i] = CONFIG_RCU_FANOUT; 3463 } 3464 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ 3465 static void __init rcu_init_levelspread(struct rcu_state *rsp) 3466 { 3467 int ccur; 3468 int cprv; 3469 int i; 3470 3471 cprv = nr_cpu_ids; 3472 for (i = rcu_num_lvls - 1; i >= 0; i--) { 3473 ccur = rsp->levelcnt[i]; 3474 rsp->levelspread[i] = (cprv + ccur - 1) / ccur; 3475 cprv = ccur; 3476 } 3477 } 3478 #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ 3479 3480 /* 3481 * Helper function for rcu_init() that initializes one rcu_state structure. 3482 */ 3483 static void __init rcu_init_one(struct rcu_state *rsp, 3484 struct rcu_data __percpu *rda) 3485 { 3486 static char *buf[] = { "rcu_node_0", 3487 "rcu_node_1", 3488 "rcu_node_2", 3489 "rcu_node_3" }; /* Match MAX_RCU_LVLS */ 3490 static char *fqs[] = { "rcu_node_fqs_0", 3491 "rcu_node_fqs_1", 3492 "rcu_node_fqs_2", 3493 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ 3494 int cpustride = 1; 3495 int i; 3496 int j; 3497 struct rcu_node *rnp; 3498 3499 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 3500 3501 /* Silence gcc 4.8 warning about array index out of range. */ 3502 if (rcu_num_lvls > RCU_NUM_LVLS) 3503 panic("rcu_init_one: rcu_num_lvls overflow"); 3504 3505 /* Initialize the level-tracking arrays. */ 3506 3507 for (i = 0; i < rcu_num_lvls; i++) 3508 rsp->levelcnt[i] = num_rcu_lvl[i]; 3509 for (i = 1; i < rcu_num_lvls; i++) 3510 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 3511 rcu_init_levelspread(rsp); 3512 3513 /* Initialize the elements themselves, starting from the leaves. */ 3514 3515 for (i = rcu_num_lvls - 1; i >= 0; i--) { 3516 cpustride *= rsp->levelspread[i]; 3517 rnp = rsp->level[i]; 3518 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 3519 raw_spin_lock_init(&rnp->lock); 3520 lockdep_set_class_and_name(&rnp->lock, 3521 &rcu_node_class[i], buf[i]); 3522 raw_spin_lock_init(&rnp->fqslock); 3523 lockdep_set_class_and_name(&rnp->fqslock, 3524 &rcu_fqs_class[i], fqs[i]); 3525 rnp->gpnum = rsp->gpnum; 3526 rnp->completed = rsp->completed; 3527 rnp->qsmask = 0; 3528 rnp->qsmaskinit = 0; 3529 rnp->grplo = j * cpustride; 3530 rnp->grphi = (j + 1) * cpustride - 1; 3531 if (rnp->grphi >= nr_cpu_ids) 3532 rnp->grphi = nr_cpu_ids - 1; 3533 if (i == 0) { 3534 rnp->grpnum = 0; 3535 rnp->grpmask = 0; 3536 rnp->parent = NULL; 3537 } else { 3538 rnp->grpnum = j % rsp->levelspread[i - 1]; 3539 rnp->grpmask = 1UL << rnp->grpnum; 3540 rnp->parent = rsp->level[i - 1] + 3541 j / rsp->levelspread[i - 1]; 3542 } 3543 rnp->level = i; 3544 INIT_LIST_HEAD(&rnp->blkd_tasks); 3545 rcu_init_one_nocb(rnp); 3546 } 3547 } 3548 3549 rsp->rda = rda; 3550 init_waitqueue_head(&rsp->gp_wq); 3551 rnp = rsp->level[rcu_num_lvls - 1]; 3552 for_each_possible_cpu(i) { 3553 while (i > rnp->grphi) 3554 rnp++; 3555 per_cpu_ptr(rsp->rda, i)->mynode = rnp; 3556 rcu_boot_init_percpu_data(i, rsp); 3557 } 3558 list_add(&rsp->flavors, &rcu_struct_flavors); 3559 } 3560 3561 /* 3562 * Compute the rcu_node tree geometry from kernel parameters. This cannot 3563 * replace the definitions in tree.h because those are needed to size 3564 * the ->node array in the rcu_state structure. 3565 */ 3566 static void __init rcu_init_geometry(void) 3567 { 3568 ulong d; 3569 int i; 3570 int j; 3571 int n = nr_cpu_ids; 3572 int rcu_capacity[MAX_RCU_LVLS + 1]; 3573 3574 /* 3575 * Initialize any unspecified boot parameters. 3576 * The default values of jiffies_till_first_fqs and 3577 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 3578 * value, which is a function of HZ, then adding one for each 3579 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 3580 */ 3581 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 3582 if (jiffies_till_first_fqs == ULONG_MAX) 3583 jiffies_till_first_fqs = d; 3584 if (jiffies_till_next_fqs == ULONG_MAX) 3585 jiffies_till_next_fqs = d; 3586 3587 /* If the compile-time values are accurate, just leave. */ 3588 if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && 3589 nr_cpu_ids == NR_CPUS) 3590 return; 3591 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n", 3592 rcu_fanout_leaf, nr_cpu_ids); 3593 3594 /* 3595 * Compute number of nodes that can be handled an rcu_node tree 3596 * with the given number of levels. Setting rcu_capacity[0] makes 3597 * some of the arithmetic easier. 3598 */ 3599 rcu_capacity[0] = 1; 3600 rcu_capacity[1] = rcu_fanout_leaf; 3601 for (i = 2; i <= MAX_RCU_LVLS; i++) 3602 rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT; 3603 3604 /* 3605 * The boot-time rcu_fanout_leaf parameter is only permitted 3606 * to increase the leaf-level fanout, not decrease it. Of course, 3607 * the leaf-level fanout cannot exceed the number of bits in 3608 * the rcu_node masks. Finally, the tree must be able to accommodate 3609 * the configured number of CPUs. Complain and fall back to the 3610 * compile-time values if these limits are exceeded. 3611 */ 3612 if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF || 3613 rcu_fanout_leaf > sizeof(unsigned long) * 8 || 3614 n > rcu_capacity[MAX_RCU_LVLS]) { 3615 WARN_ON(1); 3616 return; 3617 } 3618 3619 /* Calculate the number of rcu_nodes at each level of the tree. */ 3620 for (i = 1; i <= MAX_RCU_LVLS; i++) 3621 if (n <= rcu_capacity[i]) { 3622 for (j = 0; j <= i; j++) 3623 num_rcu_lvl[j] = 3624 DIV_ROUND_UP(n, rcu_capacity[i - j]); 3625 rcu_num_lvls = i; 3626 for (j = i + 1; j <= MAX_RCU_LVLS; j++) 3627 num_rcu_lvl[j] = 0; 3628 break; 3629 } 3630 3631 /* Calculate the total number of rcu_node structures. */ 3632 rcu_num_nodes = 0; 3633 for (i = 0; i <= MAX_RCU_LVLS; i++) 3634 rcu_num_nodes += num_rcu_lvl[i]; 3635 rcu_num_nodes -= n; 3636 } 3637 3638 void __init rcu_init(void) 3639 { 3640 int cpu; 3641 3642 rcu_bootup_announce(); 3643 rcu_init_geometry(); 3644 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 3645 rcu_init_one(&rcu_sched_state, &rcu_sched_data); 3646 __rcu_init_preempt(); 3647 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 3648 3649 /* 3650 * We don't need protection against CPU-hotplug here because 3651 * this is called early in boot, before either interrupts 3652 * or the scheduler are operational. 3653 */ 3654 cpu_notifier(rcu_cpu_notify, 0); 3655 pm_notifier(rcu_pm_notify, 0); 3656 for_each_online_cpu(cpu) 3657 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 3658 } 3659 3660 #include "tree_plugin.h" 3661