1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * Papers: 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 28 * 29 * For detailed explanation of Read-Copy Update mechanism see - 30 * http://lse.sourceforge.net/locking/rcupdate.html 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp.h> 38 #include <linux/interrupt.h> 39 #include <linux/sched/signal.h> 40 #include <linux/sched/debug.h> 41 #include <linux/atomic.h> 42 #include <linux/bitops.h> 43 #include <linux/percpu.h> 44 #include <linux/notifier.h> 45 #include <linux/cpu.h> 46 #include <linux/mutex.h> 47 #include <linux/export.h> 48 #include <linux/hardirq.h> 49 #include <linux/delay.h> 50 #include <linux/moduleparam.h> 51 #include <linux/kthread.h> 52 #include <linux/tick.h> 53 #include <linux/rcupdate_wait.h> 54 55 #define CREATE_TRACE_POINTS 56 57 #include "rcu.h" 58 59 #ifdef MODULE_PARAM_PREFIX 60 #undef MODULE_PARAM_PREFIX 61 #endif 62 #define MODULE_PARAM_PREFIX "rcupdate." 63 64 #ifndef CONFIG_TINY_RCU 65 module_param(rcu_expedited, int, 0); 66 module_param(rcu_normal, int, 0); 67 static int rcu_normal_after_boot; 68 module_param(rcu_normal_after_boot, int, 0); 69 #endif /* #ifndef CONFIG_TINY_RCU */ 70 71 #ifdef CONFIG_DEBUG_LOCK_ALLOC 72 /** 73 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 74 * 75 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 76 * RCU-sched read-side critical section. In absence of 77 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 78 * critical section unless it can prove otherwise. Note that disabling 79 * of preemption (including disabling irqs) counts as an RCU-sched 80 * read-side critical section. This is useful for debug checks in functions 81 * that required that they be called within an RCU-sched read-side 82 * critical section. 83 * 84 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 85 * and while lockdep is disabled. 86 * 87 * Note that if the CPU is in the idle loop from an RCU point of 88 * view (ie: that we are in the section between rcu_idle_enter() and 89 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU 90 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs 91 * that are in such a section, considering these as in extended quiescent 92 * state, so such a CPU is effectively never in an RCU read-side critical 93 * section regardless of what RCU primitives it invokes. This state of 94 * affairs is required --- we need to keep an RCU-free window in idle 95 * where the CPU may possibly enter into low power mode. This way we can 96 * notice an extended quiescent state to other CPUs that started a grace 97 * period. Otherwise we would delay any grace period as long as we run in 98 * the idle task. 99 * 100 * Similarly, we avoid claiming an SRCU read lock held if the current 101 * CPU is offline. 102 */ 103 int rcu_read_lock_sched_held(void) 104 { 105 int lockdep_opinion = 0; 106 107 if (!debug_lockdep_rcu_enabled()) 108 return 1; 109 if (!rcu_is_watching()) 110 return 0; 111 if (!rcu_lockdep_current_cpu_online()) 112 return 0; 113 if (debug_locks) 114 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 115 return lockdep_opinion || !preemptible(); 116 } 117 EXPORT_SYMBOL(rcu_read_lock_sched_held); 118 #endif 119 120 #ifndef CONFIG_TINY_RCU 121 122 /* 123 * Should expedited grace-period primitives always fall back to their 124 * non-expedited counterparts? Intended for use within RCU. Note 125 * that if the user specifies both rcu_expedited and rcu_normal, then 126 * rcu_normal wins. (Except during the time period during boot from 127 * when the first task is spawned until the rcu_set_runtime_mode() 128 * core_initcall() is invoked, at which point everything is expedited.) 129 */ 130 bool rcu_gp_is_normal(void) 131 { 132 return READ_ONCE(rcu_normal) && 133 rcu_scheduler_active != RCU_SCHEDULER_INIT; 134 } 135 EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 136 137 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); 138 139 /* 140 * Should normal grace-period primitives be expedited? Intended for 141 * use within RCU. Note that this function takes the rcu_expedited 142 * sysfs/boot variable and rcu_scheduler_active into account as well 143 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() 144 * until rcu_gp_is_expedited() returns false is a -really- bad idea. 145 */ 146 bool rcu_gp_is_expedited(void) 147 { 148 return rcu_expedited || atomic_read(&rcu_expedited_nesting) || 149 rcu_scheduler_active == RCU_SCHEDULER_INIT; 150 } 151 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 152 153 /** 154 * rcu_expedite_gp - Expedite future RCU grace periods 155 * 156 * After a call to this function, future calls to synchronize_rcu() and 157 * friends act as the corresponding synchronize_rcu_expedited() function 158 * had instead been called. 159 */ 160 void rcu_expedite_gp(void) 161 { 162 atomic_inc(&rcu_expedited_nesting); 163 } 164 EXPORT_SYMBOL_GPL(rcu_expedite_gp); 165 166 /** 167 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation 168 * 169 * Undo a prior call to rcu_expedite_gp(). If all prior calls to 170 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), 171 * and if the rcu_expedited sysfs/boot parameter is not set, then all 172 * subsequent calls to synchronize_rcu() and friends will return to 173 * their normal non-expedited behavior. 174 */ 175 void rcu_unexpedite_gp(void) 176 { 177 atomic_dec(&rcu_expedited_nesting); 178 } 179 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); 180 181 /* 182 * Inform RCU of the end of the in-kernel boot sequence. 183 */ 184 void rcu_end_inkernel_boot(void) 185 { 186 rcu_unexpedite_gp(); 187 if (rcu_normal_after_boot) 188 WRITE_ONCE(rcu_normal, 1); 189 } 190 191 #endif /* #ifndef CONFIG_TINY_RCU */ 192 193 /* 194 * Test each non-SRCU synchronous grace-period wait API. This is 195 * useful just after a change in mode for these primitives, and 196 * during early boot. 197 */ 198 void rcu_test_sync_prims(void) 199 { 200 if (!IS_ENABLED(CONFIG_PROVE_RCU)) 201 return; 202 synchronize_rcu(); 203 synchronize_rcu_bh(); 204 synchronize_sched(); 205 synchronize_rcu_expedited(); 206 synchronize_rcu_bh_expedited(); 207 synchronize_sched_expedited(); 208 } 209 210 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) 211 212 /* 213 * Switch to run-time mode once RCU has fully initialized. 214 */ 215 static int __init rcu_set_runtime_mode(void) 216 { 217 rcu_test_sync_prims(); 218 rcu_scheduler_active = RCU_SCHEDULER_RUNNING; 219 rcu_test_sync_prims(); 220 return 0; 221 } 222 core_initcall(rcu_set_runtime_mode); 223 224 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */ 225 226 #ifdef CONFIG_PREEMPT_RCU 227 228 /* 229 * Preemptible RCU implementation for rcu_read_lock(). 230 * Just increment ->rcu_read_lock_nesting, shared state will be updated 231 * if we block. 232 */ 233 void __rcu_read_lock(void) 234 { 235 current->rcu_read_lock_nesting++; 236 barrier(); /* critical section after entry code. */ 237 } 238 EXPORT_SYMBOL_GPL(__rcu_read_lock); 239 240 /* 241 * Preemptible RCU implementation for rcu_read_unlock(). 242 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 243 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 244 * invoke rcu_read_unlock_special() to clean up after a context switch 245 * in an RCU read-side critical section and other special cases. 246 */ 247 void __rcu_read_unlock(void) 248 { 249 struct task_struct *t = current; 250 251 if (t->rcu_read_lock_nesting != 1) { 252 --t->rcu_read_lock_nesting; 253 } else { 254 barrier(); /* critical section before exit code. */ 255 t->rcu_read_lock_nesting = INT_MIN; 256 barrier(); /* assign before ->rcu_read_unlock_special load */ 257 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 258 rcu_read_unlock_special(t); 259 barrier(); /* ->rcu_read_unlock_special load before assign */ 260 t->rcu_read_lock_nesting = 0; 261 } 262 #ifdef CONFIG_PROVE_LOCKING 263 { 264 int rrln = READ_ONCE(t->rcu_read_lock_nesting); 265 266 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 267 } 268 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 269 } 270 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 271 272 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 273 274 #ifdef CONFIG_DEBUG_LOCK_ALLOC 275 static struct lock_class_key rcu_lock_key; 276 struct lockdep_map rcu_lock_map = 277 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 278 EXPORT_SYMBOL_GPL(rcu_lock_map); 279 280 static struct lock_class_key rcu_bh_lock_key; 281 struct lockdep_map rcu_bh_lock_map = 282 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 283 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 284 285 static struct lock_class_key rcu_sched_lock_key; 286 struct lockdep_map rcu_sched_lock_map = 287 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 288 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 289 290 static struct lock_class_key rcu_callback_key; 291 struct lockdep_map rcu_callback_map = 292 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); 293 EXPORT_SYMBOL_GPL(rcu_callback_map); 294 295 int notrace debug_lockdep_rcu_enabled(void) 296 { 297 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && 298 current->lockdep_recursion == 0; 299 } 300 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 301 302 /** 303 * rcu_read_lock_held() - might we be in RCU read-side critical section? 304 * 305 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 306 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 307 * this assumes we are in an RCU read-side critical section unless it can 308 * prove otherwise. This is useful for debug checks in functions that 309 * require that they be called within an RCU read-side critical section. 310 * 311 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 312 * and while lockdep is disabled. 313 * 314 * Note that rcu_read_lock() and the matching rcu_read_unlock() must 315 * occur in the same context, for example, it is illegal to invoke 316 * rcu_read_unlock() in process context if the matching rcu_read_lock() 317 * was invoked from within an irq handler. 318 * 319 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 320 * offline from an RCU perspective, so check for those as well. 321 */ 322 int rcu_read_lock_held(void) 323 { 324 if (!debug_lockdep_rcu_enabled()) 325 return 1; 326 if (!rcu_is_watching()) 327 return 0; 328 if (!rcu_lockdep_current_cpu_online()) 329 return 0; 330 return lock_is_held(&rcu_lock_map); 331 } 332 EXPORT_SYMBOL_GPL(rcu_read_lock_held); 333 334 /** 335 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 336 * 337 * Check for bottom half being disabled, which covers both the 338 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 339 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 340 * will show the situation. This is useful for debug checks in functions 341 * that require that they be called within an RCU read-side critical 342 * section. 343 * 344 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 345 * 346 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 347 * offline from an RCU perspective, so check for those as well. 348 */ 349 int rcu_read_lock_bh_held(void) 350 { 351 if (!debug_lockdep_rcu_enabled()) 352 return 1; 353 if (!rcu_is_watching()) 354 return 0; 355 if (!rcu_lockdep_current_cpu_online()) 356 return 0; 357 return in_softirq() || irqs_disabled(); 358 } 359 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 360 361 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 362 363 /** 364 * wakeme_after_rcu() - Callback function to awaken a task after grace period 365 * @head: Pointer to rcu_head member within rcu_synchronize structure 366 * 367 * Awaken the corresponding task now that a grace period has elapsed. 368 */ 369 void wakeme_after_rcu(struct rcu_head *head) 370 { 371 struct rcu_synchronize *rcu; 372 373 rcu = container_of(head, struct rcu_synchronize, head); 374 complete(&rcu->completion); 375 } 376 EXPORT_SYMBOL_GPL(wakeme_after_rcu); 377 378 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 379 struct rcu_synchronize *rs_array) 380 { 381 int i; 382 383 /* Initialize and register callbacks for each flavor specified. */ 384 for (i = 0; i < n; i++) { 385 if (checktiny && 386 (crcu_array[i] == call_rcu || 387 crcu_array[i] == call_rcu_bh)) { 388 might_sleep(); 389 continue; 390 } 391 init_rcu_head_on_stack(&rs_array[i].head); 392 init_completion(&rs_array[i].completion); 393 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); 394 } 395 396 /* Wait for all callbacks to be invoked. */ 397 for (i = 0; i < n; i++) { 398 if (checktiny && 399 (crcu_array[i] == call_rcu || 400 crcu_array[i] == call_rcu_bh)) 401 continue; 402 wait_for_completion(&rs_array[i].completion); 403 destroy_rcu_head_on_stack(&rs_array[i].head); 404 } 405 } 406 EXPORT_SYMBOL_GPL(__wait_rcu_gp); 407 408 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 409 void init_rcu_head(struct rcu_head *head) 410 { 411 debug_object_init(head, &rcuhead_debug_descr); 412 } 413 414 void destroy_rcu_head(struct rcu_head *head) 415 { 416 debug_object_free(head, &rcuhead_debug_descr); 417 } 418 419 static bool rcuhead_is_static_object(void *addr) 420 { 421 return true; 422 } 423 424 /** 425 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 426 * @head: pointer to rcu_head structure to be initialized 427 * 428 * This function informs debugobjects of a new rcu_head structure that 429 * has been allocated as an auto variable on the stack. This function 430 * is not required for rcu_head structures that are statically defined or 431 * that are dynamically allocated on the heap. This function has no 432 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 433 */ 434 void init_rcu_head_on_stack(struct rcu_head *head) 435 { 436 debug_object_init_on_stack(head, &rcuhead_debug_descr); 437 } 438 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 439 440 /** 441 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 442 * @head: pointer to rcu_head structure to be initialized 443 * 444 * This function informs debugobjects that an on-stack rcu_head structure 445 * is about to go out of scope. As with init_rcu_head_on_stack(), this 446 * function is not required for rcu_head structures that are statically 447 * defined or that are dynamically allocated on the heap. Also as with 448 * init_rcu_head_on_stack(), this function has no effect for 449 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 450 */ 451 void destroy_rcu_head_on_stack(struct rcu_head *head) 452 { 453 debug_object_free(head, &rcuhead_debug_descr); 454 } 455 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 456 457 struct debug_obj_descr rcuhead_debug_descr = { 458 .name = "rcu_head", 459 .is_static_object = rcuhead_is_static_object, 460 }; 461 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 462 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 463 464 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 465 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 466 unsigned long secs, 467 unsigned long c_old, unsigned long c) 468 { 469 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 470 } 471 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 472 #else 473 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 474 do { } while (0) 475 #endif 476 477 #ifdef CONFIG_RCU_STALL_COMMON 478 479 #ifdef CONFIG_PROVE_RCU 480 #define RCU_STALL_DELAY_DELTA (5 * HZ) 481 #else 482 #define RCU_STALL_DELAY_DELTA 0 483 #endif 484 485 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 486 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 487 488 module_param(rcu_cpu_stall_suppress, int, 0644); 489 module_param(rcu_cpu_stall_timeout, int, 0644); 490 491 int rcu_jiffies_till_stall_check(void) 492 { 493 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); 494 495 /* 496 * Limit check must be consistent with the Kconfig limits 497 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 498 */ 499 if (till_stall_check < 3) { 500 WRITE_ONCE(rcu_cpu_stall_timeout, 3); 501 till_stall_check = 3; 502 } else if (till_stall_check > 300) { 503 WRITE_ONCE(rcu_cpu_stall_timeout, 300); 504 till_stall_check = 300; 505 } 506 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 507 } 508 509 void rcu_sysrq_start(void) 510 { 511 if (!rcu_cpu_stall_suppress) 512 rcu_cpu_stall_suppress = 2; 513 } 514 515 void rcu_sysrq_end(void) 516 { 517 if (rcu_cpu_stall_suppress == 2) 518 rcu_cpu_stall_suppress = 0; 519 } 520 521 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 522 { 523 rcu_cpu_stall_suppress = 1; 524 return NOTIFY_DONE; 525 } 526 527 static struct notifier_block rcu_panic_block = { 528 .notifier_call = rcu_panic, 529 }; 530 531 static int __init check_cpu_stall_init(void) 532 { 533 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 534 return 0; 535 } 536 early_initcall(check_cpu_stall_init); 537 538 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 539 540 #ifdef CONFIG_TASKS_RCU 541 542 /* 543 * Simple variant of RCU whose quiescent states are voluntary context switch, 544 * user-space execution, and idle. As such, grace periods can take one good 545 * long time. There are no read-side primitives similar to rcu_read_lock() 546 * and rcu_read_unlock() because this implementation is intended to get 547 * the system into a safe state for some of the manipulations involved in 548 * tracing and the like. Finally, this implementation does not support 549 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 550 * per-CPU callback lists will be needed. 551 */ 552 553 /* Global list of callbacks and associated lock. */ 554 static struct rcu_head *rcu_tasks_cbs_head; 555 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 556 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); 557 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); 558 559 /* Track exiting tasks in order to allow them to be waited for. */ 560 DEFINE_SRCU(tasks_rcu_exit_srcu); 561 562 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 563 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; 564 module_param(rcu_task_stall_timeout, int, 0644); 565 566 static void rcu_spawn_tasks_kthread(void); 567 static struct task_struct *rcu_tasks_kthread_ptr; 568 569 /* 570 * Post an RCU-tasks callback. First call must be from process context 571 * after the scheduler if fully operational. 572 */ 573 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 574 { 575 unsigned long flags; 576 bool needwake; 577 bool havetask = READ_ONCE(rcu_tasks_kthread_ptr); 578 579 rhp->next = NULL; 580 rhp->func = func; 581 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 582 needwake = !rcu_tasks_cbs_head; 583 *rcu_tasks_cbs_tail = rhp; 584 rcu_tasks_cbs_tail = &rhp->next; 585 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 586 /* We can't create the thread unless interrupts are enabled. */ 587 if ((needwake && havetask) || 588 (!havetask && !irqs_disabled_flags(flags))) { 589 rcu_spawn_tasks_kthread(); 590 wake_up(&rcu_tasks_cbs_wq); 591 } 592 } 593 EXPORT_SYMBOL_GPL(call_rcu_tasks); 594 595 /** 596 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 597 * 598 * Control will return to the caller some time after a full rcu-tasks 599 * grace period has elapsed, in other words after all currently 600 * executing rcu-tasks read-side critical sections have elapsed. These 601 * read-side critical sections are delimited by calls to schedule(), 602 * cond_resched_rcu_qs(), idle execution, userspace execution, calls 603 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 604 * 605 * This is a very specialized primitive, intended only for a few uses in 606 * tracing and other situations requiring manipulation of function 607 * preambles and profiling hooks. The synchronize_rcu_tasks() function 608 * is not (yet) intended for heavy use from multiple CPUs. 609 * 610 * Note that this guarantee implies further memory-ordering guarantees. 611 * On systems with more than one CPU, when synchronize_rcu_tasks() returns, 612 * each CPU is guaranteed to have executed a full memory barrier since the 613 * end of its last RCU-tasks read-side critical section whose beginning 614 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU 615 * having an RCU-tasks read-side critical section that extends beyond 616 * the return from synchronize_rcu_tasks() is guaranteed to have executed 617 * a full memory barrier after the beginning of synchronize_rcu_tasks() 618 * and before the beginning of that RCU-tasks read-side critical section. 619 * Note that these guarantees include CPUs that are offline, idle, or 620 * executing in user mode, as well as CPUs that are executing in the kernel. 621 * 622 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned 623 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 624 * to have executed a full memory barrier during the execution of 625 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU 626 * (but again only if the system has more than one CPU). 627 */ 628 void synchronize_rcu_tasks(void) 629 { 630 /* Complain if the scheduler has not started. */ 631 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 632 "synchronize_rcu_tasks called too soon"); 633 634 /* Wait for the grace period. */ 635 wait_rcu_gp(call_rcu_tasks); 636 } 637 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 638 639 /** 640 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 641 * 642 * Although the current implementation is guaranteed to wait, it is not 643 * obligated to, for example, if there are no pending callbacks. 644 */ 645 void rcu_barrier_tasks(void) 646 { 647 /* There is only one callback queue, so this is easy. ;-) */ 648 synchronize_rcu_tasks(); 649 } 650 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 651 652 /* See if tasks are still holding out, complain if so. */ 653 static void check_holdout_task(struct task_struct *t, 654 bool needreport, bool *firstreport) 655 { 656 int cpu; 657 658 if (!READ_ONCE(t->rcu_tasks_holdout) || 659 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 660 !READ_ONCE(t->on_rq) || 661 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 662 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 663 WRITE_ONCE(t->rcu_tasks_holdout, false); 664 list_del_init(&t->rcu_tasks_holdout_list); 665 put_task_struct(t); 666 return; 667 } 668 rcu_request_urgent_qs_task(t); 669 if (!needreport) 670 return; 671 if (*firstreport) { 672 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 673 *firstreport = false; 674 } 675 cpu = task_cpu(t); 676 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 677 t, ".I"[is_idle_task(t)], 678 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 679 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 680 t->rcu_tasks_idle_cpu, cpu); 681 sched_show_task(t); 682 } 683 684 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 685 static int __noreturn rcu_tasks_kthread(void *arg) 686 { 687 unsigned long flags; 688 struct task_struct *g, *t; 689 unsigned long lastreport; 690 struct rcu_head *list; 691 struct rcu_head *next; 692 LIST_HEAD(rcu_tasks_holdouts); 693 694 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 695 housekeeping_affine(current); 696 697 /* 698 * Each pass through the following loop makes one check for 699 * newly arrived callbacks, and, if there are some, waits for 700 * one RCU-tasks grace period and then invokes the callbacks. 701 * This loop is terminated by the system going down. ;-) 702 */ 703 for (;;) { 704 705 /* Pick up any new callbacks. */ 706 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 707 list = rcu_tasks_cbs_head; 708 rcu_tasks_cbs_head = NULL; 709 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 710 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 711 712 /* If there were none, wait a bit and start over. */ 713 if (!list) { 714 wait_event_interruptible(rcu_tasks_cbs_wq, 715 rcu_tasks_cbs_head); 716 if (!rcu_tasks_cbs_head) { 717 WARN_ON(signal_pending(current)); 718 schedule_timeout_interruptible(HZ/10); 719 } 720 continue; 721 } 722 723 /* 724 * Wait for all pre-existing t->on_rq and t->nvcsw 725 * transitions to complete. Invoking synchronize_sched() 726 * suffices because all these transitions occur with 727 * interrupts disabled. Without this synchronize_sched(), 728 * a read-side critical section that started before the 729 * grace period might be incorrectly seen as having started 730 * after the grace period. 731 * 732 * This synchronize_sched() also dispenses with the 733 * need for a memory barrier on the first store to 734 * ->rcu_tasks_holdout, as it forces the store to happen 735 * after the beginning of the grace period. 736 */ 737 synchronize_sched(); 738 739 /* 740 * There were callbacks, so we need to wait for an 741 * RCU-tasks grace period. Start off by scanning 742 * the task list for tasks that are not already 743 * voluntarily blocked. Mark these tasks and make 744 * a list of them in rcu_tasks_holdouts. 745 */ 746 rcu_read_lock(); 747 for_each_process_thread(g, t) { 748 if (t != current && READ_ONCE(t->on_rq) && 749 !is_idle_task(t)) { 750 get_task_struct(t); 751 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 752 WRITE_ONCE(t->rcu_tasks_holdout, true); 753 list_add(&t->rcu_tasks_holdout_list, 754 &rcu_tasks_holdouts); 755 } 756 } 757 rcu_read_unlock(); 758 759 /* 760 * Wait for tasks that are in the process of exiting. 761 * This does only part of the job, ensuring that all 762 * tasks that were previously exiting reach the point 763 * where they have disabled preemption, allowing the 764 * later synchronize_sched() to finish the job. 765 */ 766 synchronize_srcu(&tasks_rcu_exit_srcu); 767 768 /* 769 * Each pass through the following loop scans the list 770 * of holdout tasks, removing any that are no longer 771 * holdouts. When the list is empty, we are done. 772 */ 773 lastreport = jiffies; 774 while (!list_empty(&rcu_tasks_holdouts)) { 775 bool firstreport; 776 bool needreport; 777 int rtst; 778 struct task_struct *t1; 779 780 schedule_timeout_interruptible(HZ); 781 rtst = READ_ONCE(rcu_task_stall_timeout); 782 needreport = rtst > 0 && 783 time_after(jiffies, lastreport + rtst); 784 if (needreport) 785 lastreport = jiffies; 786 firstreport = true; 787 WARN_ON(signal_pending(current)); 788 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, 789 rcu_tasks_holdout_list) { 790 check_holdout_task(t, needreport, &firstreport); 791 cond_resched(); 792 } 793 } 794 795 /* 796 * Because ->on_rq and ->nvcsw are not guaranteed 797 * to have a full memory barriers prior to them in the 798 * schedule() path, memory reordering on other CPUs could 799 * cause their RCU-tasks read-side critical sections to 800 * extend past the end of the grace period. However, 801 * because these ->nvcsw updates are carried out with 802 * interrupts disabled, we can use synchronize_sched() 803 * to force the needed ordering on all such CPUs. 804 * 805 * This synchronize_sched() also confines all 806 * ->rcu_tasks_holdout accesses to be within the grace 807 * period, avoiding the need for memory barriers for 808 * ->rcu_tasks_holdout accesses. 809 * 810 * In addition, this synchronize_sched() waits for exiting 811 * tasks to complete their final preempt_disable() region 812 * of execution, cleaning up after the synchronize_srcu() 813 * above. 814 */ 815 synchronize_sched(); 816 817 /* Invoke the callbacks. */ 818 while (list) { 819 next = list->next; 820 local_bh_disable(); 821 list->func(list); 822 local_bh_enable(); 823 list = next; 824 cond_resched(); 825 } 826 schedule_timeout_uninterruptible(HZ/10); 827 } 828 } 829 830 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ 831 static void rcu_spawn_tasks_kthread(void) 832 { 833 static DEFINE_MUTEX(rcu_tasks_kthread_mutex); 834 struct task_struct *t; 835 836 if (READ_ONCE(rcu_tasks_kthread_ptr)) { 837 smp_mb(); /* Ensure caller sees full kthread. */ 838 return; 839 } 840 mutex_lock(&rcu_tasks_kthread_mutex); 841 if (rcu_tasks_kthread_ptr) { 842 mutex_unlock(&rcu_tasks_kthread_mutex); 843 return; 844 } 845 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); 846 BUG_ON(IS_ERR(t)); 847 smp_mb(); /* Ensure others see full kthread. */ 848 WRITE_ONCE(rcu_tasks_kthread_ptr, t); 849 mutex_unlock(&rcu_tasks_kthread_mutex); 850 } 851 852 #endif /* #ifdef CONFIG_TASKS_RCU */ 853 854 #ifdef CONFIG_PROVE_RCU 855 856 /* 857 * Early boot self test parameters, one for each flavor 858 */ 859 static bool rcu_self_test; 860 static bool rcu_self_test_bh; 861 static bool rcu_self_test_sched; 862 863 module_param(rcu_self_test, bool, 0444); 864 module_param(rcu_self_test_bh, bool, 0444); 865 module_param(rcu_self_test_sched, bool, 0444); 866 867 static int rcu_self_test_counter; 868 869 static void test_callback(struct rcu_head *r) 870 { 871 rcu_self_test_counter++; 872 pr_info("RCU test callback executed %d\n", rcu_self_test_counter); 873 } 874 875 static void early_boot_test_call_rcu(void) 876 { 877 static struct rcu_head head; 878 879 call_rcu(&head, test_callback); 880 } 881 882 static void early_boot_test_call_rcu_bh(void) 883 { 884 static struct rcu_head head; 885 886 call_rcu_bh(&head, test_callback); 887 } 888 889 static void early_boot_test_call_rcu_sched(void) 890 { 891 static struct rcu_head head; 892 893 call_rcu_sched(&head, test_callback); 894 } 895 896 void rcu_early_boot_tests(void) 897 { 898 pr_info("Running RCU self tests\n"); 899 900 if (rcu_self_test) 901 early_boot_test_call_rcu(); 902 if (rcu_self_test_bh) 903 early_boot_test_call_rcu_bh(); 904 if (rcu_self_test_sched) 905 early_boot_test_call_rcu_sched(); 906 rcu_test_sync_prims(); 907 } 908 909 static int rcu_verify_early_boot_tests(void) 910 { 911 int ret = 0; 912 int early_boot_test_counter = 0; 913 914 if (rcu_self_test) { 915 early_boot_test_counter++; 916 rcu_barrier(); 917 } 918 if (rcu_self_test_bh) { 919 early_boot_test_counter++; 920 rcu_barrier_bh(); 921 } 922 if (rcu_self_test_sched) { 923 early_boot_test_counter++; 924 rcu_barrier_sched(); 925 } 926 927 if (rcu_self_test_counter != early_boot_test_counter) { 928 WARN_ON(1); 929 ret = -1; 930 } 931 932 return ret; 933 } 934 late_initcall(rcu_verify_early_boot_tests); 935 #else 936 void rcu_early_boot_tests(void) {} 937 #endif /* CONFIG_PROVE_RCU */ 938