1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * Papers: 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 28 * 29 * For detailed explanation of Read-Copy Update mechanism see - 30 * http://lse.sourceforge.net/locking/rcupdate.html 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp.h> 38 #include <linux/interrupt.h> 39 #include <linux/sched.h> 40 #include <linux/atomic.h> 41 #include <linux/bitops.h> 42 #include <linux/percpu.h> 43 #include <linux/notifier.h> 44 #include <linux/cpu.h> 45 #include <linux/mutex.h> 46 #include <linux/export.h> 47 #include <linux/hardirq.h> 48 #include <linux/delay.h> 49 #include <linux/moduleparam.h> 50 #include <linux/kthread.h> 51 #include <linux/tick.h> 52 53 #define CREATE_TRACE_POINTS 54 55 #include "rcu.h" 56 57 #ifdef MODULE_PARAM_PREFIX 58 #undef MODULE_PARAM_PREFIX 59 #endif 60 #define MODULE_PARAM_PREFIX "rcupdate." 61 62 #ifndef CONFIG_TINY_RCU 63 module_param(rcu_expedited, int, 0); 64 module_param(rcu_normal, int, 0); 65 static int rcu_normal_after_boot; 66 module_param(rcu_normal_after_boot, int, 0); 67 #endif /* #ifndef CONFIG_TINY_RCU */ 68 69 #ifdef CONFIG_DEBUG_LOCK_ALLOC 70 /** 71 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 72 * 73 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 74 * RCU-sched read-side critical section. In absence of 75 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 76 * critical section unless it can prove otherwise. Note that disabling 77 * of preemption (including disabling irqs) counts as an RCU-sched 78 * read-side critical section. This is useful for debug checks in functions 79 * that required that they be called within an RCU-sched read-side 80 * critical section. 81 * 82 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 83 * and while lockdep is disabled. 84 * 85 * Note that if the CPU is in the idle loop from an RCU point of 86 * view (ie: that we are in the section between rcu_idle_enter() and 87 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU 88 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs 89 * that are in such a section, considering these as in extended quiescent 90 * state, so such a CPU is effectively never in an RCU read-side critical 91 * section regardless of what RCU primitives it invokes. This state of 92 * affairs is required --- we need to keep an RCU-free window in idle 93 * where the CPU may possibly enter into low power mode. This way we can 94 * notice an extended quiescent state to other CPUs that started a grace 95 * period. Otherwise we would delay any grace period as long as we run in 96 * the idle task. 97 * 98 * Similarly, we avoid claiming an SRCU read lock held if the current 99 * CPU is offline. 100 */ 101 int rcu_read_lock_sched_held(void) 102 { 103 int lockdep_opinion = 0; 104 105 if (!debug_lockdep_rcu_enabled()) 106 return 1; 107 if (!rcu_is_watching()) 108 return 0; 109 if (!rcu_lockdep_current_cpu_online()) 110 return 0; 111 if (debug_locks) 112 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 113 return lockdep_opinion || !preemptible(); 114 } 115 EXPORT_SYMBOL(rcu_read_lock_sched_held); 116 #endif 117 118 #ifndef CONFIG_TINY_RCU 119 120 /* 121 * Should expedited grace-period primitives always fall back to their 122 * non-expedited counterparts? Intended for use within RCU. Note 123 * that if the user specifies both rcu_expedited and rcu_normal, then 124 * rcu_normal wins. (Except during the time period during boot from 125 * when the first task is spawned until the rcu_exp_runtime_mode() 126 * core_initcall() is invoked, at which point everything is expedited.) 127 */ 128 bool rcu_gp_is_normal(void) 129 { 130 return READ_ONCE(rcu_normal) && 131 rcu_scheduler_active != RCU_SCHEDULER_INIT; 132 } 133 EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 134 135 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); 136 137 /* 138 * Should normal grace-period primitives be expedited? Intended for 139 * use within RCU. Note that this function takes the rcu_expedited 140 * sysfs/boot variable and rcu_scheduler_active into account as well 141 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() 142 * until rcu_gp_is_expedited() returns false is a -really- bad idea. 143 */ 144 bool rcu_gp_is_expedited(void) 145 { 146 return rcu_expedited || atomic_read(&rcu_expedited_nesting) || 147 rcu_scheduler_active == RCU_SCHEDULER_INIT; 148 } 149 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 150 151 /** 152 * rcu_expedite_gp - Expedite future RCU grace periods 153 * 154 * After a call to this function, future calls to synchronize_rcu() and 155 * friends act as the corresponding synchronize_rcu_expedited() function 156 * had instead been called. 157 */ 158 void rcu_expedite_gp(void) 159 { 160 atomic_inc(&rcu_expedited_nesting); 161 } 162 EXPORT_SYMBOL_GPL(rcu_expedite_gp); 163 164 /** 165 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation 166 * 167 * Undo a prior call to rcu_expedite_gp(). If all prior calls to 168 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), 169 * and if the rcu_expedited sysfs/boot parameter is not set, then all 170 * subsequent calls to synchronize_rcu() and friends will return to 171 * their normal non-expedited behavior. 172 */ 173 void rcu_unexpedite_gp(void) 174 { 175 atomic_dec(&rcu_expedited_nesting); 176 } 177 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); 178 179 /* 180 * Inform RCU of the end of the in-kernel boot sequence. 181 */ 182 void rcu_end_inkernel_boot(void) 183 { 184 rcu_unexpedite_gp(); 185 if (rcu_normal_after_boot) 186 WRITE_ONCE(rcu_normal, 1); 187 } 188 189 #endif /* #ifndef CONFIG_TINY_RCU */ 190 191 #ifdef CONFIG_PREEMPT_RCU 192 193 /* 194 * Preemptible RCU implementation for rcu_read_lock(). 195 * Just increment ->rcu_read_lock_nesting, shared state will be updated 196 * if we block. 197 */ 198 void __rcu_read_lock(void) 199 { 200 current->rcu_read_lock_nesting++; 201 barrier(); /* critical section after entry code. */ 202 } 203 EXPORT_SYMBOL_GPL(__rcu_read_lock); 204 205 /* 206 * Preemptible RCU implementation for rcu_read_unlock(). 207 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 208 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 209 * invoke rcu_read_unlock_special() to clean up after a context switch 210 * in an RCU read-side critical section and other special cases. 211 */ 212 void __rcu_read_unlock(void) 213 { 214 struct task_struct *t = current; 215 216 if (t->rcu_read_lock_nesting != 1) { 217 --t->rcu_read_lock_nesting; 218 } else { 219 barrier(); /* critical section before exit code. */ 220 t->rcu_read_lock_nesting = INT_MIN; 221 barrier(); /* assign before ->rcu_read_unlock_special load */ 222 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 223 rcu_read_unlock_special(t); 224 barrier(); /* ->rcu_read_unlock_special load before assign */ 225 t->rcu_read_lock_nesting = 0; 226 } 227 #ifdef CONFIG_PROVE_LOCKING 228 { 229 int rrln = READ_ONCE(t->rcu_read_lock_nesting); 230 231 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 232 } 233 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 234 } 235 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 236 237 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 238 239 #ifdef CONFIG_DEBUG_LOCK_ALLOC 240 static struct lock_class_key rcu_lock_key; 241 struct lockdep_map rcu_lock_map = 242 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 243 EXPORT_SYMBOL_GPL(rcu_lock_map); 244 245 static struct lock_class_key rcu_bh_lock_key; 246 struct lockdep_map rcu_bh_lock_map = 247 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 248 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 249 250 static struct lock_class_key rcu_sched_lock_key; 251 struct lockdep_map rcu_sched_lock_map = 252 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 253 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 254 255 static struct lock_class_key rcu_callback_key; 256 struct lockdep_map rcu_callback_map = 257 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); 258 EXPORT_SYMBOL_GPL(rcu_callback_map); 259 260 int notrace debug_lockdep_rcu_enabled(void) 261 { 262 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && 263 current->lockdep_recursion == 0; 264 } 265 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 266 267 /** 268 * rcu_read_lock_held() - might we be in RCU read-side critical section? 269 * 270 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 271 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 272 * this assumes we are in an RCU read-side critical section unless it can 273 * prove otherwise. This is useful for debug checks in functions that 274 * require that they be called within an RCU read-side critical section. 275 * 276 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 277 * and while lockdep is disabled. 278 * 279 * Note that rcu_read_lock() and the matching rcu_read_unlock() must 280 * occur in the same context, for example, it is illegal to invoke 281 * rcu_read_unlock() in process context if the matching rcu_read_lock() 282 * was invoked from within an irq handler. 283 * 284 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 285 * offline from an RCU perspective, so check for those as well. 286 */ 287 int rcu_read_lock_held(void) 288 { 289 if (!debug_lockdep_rcu_enabled()) 290 return 1; 291 if (!rcu_is_watching()) 292 return 0; 293 if (!rcu_lockdep_current_cpu_online()) 294 return 0; 295 return lock_is_held(&rcu_lock_map); 296 } 297 EXPORT_SYMBOL_GPL(rcu_read_lock_held); 298 299 /** 300 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 301 * 302 * Check for bottom half being disabled, which covers both the 303 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 304 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 305 * will show the situation. This is useful for debug checks in functions 306 * that require that they be called within an RCU read-side critical 307 * section. 308 * 309 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 310 * 311 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 312 * offline from an RCU perspective, so check for those as well. 313 */ 314 int rcu_read_lock_bh_held(void) 315 { 316 if (!debug_lockdep_rcu_enabled()) 317 return 1; 318 if (!rcu_is_watching()) 319 return 0; 320 if (!rcu_lockdep_current_cpu_online()) 321 return 0; 322 return in_softirq() || irqs_disabled(); 323 } 324 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 325 326 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 327 328 /** 329 * wakeme_after_rcu() - Callback function to awaken a task after grace period 330 * @head: Pointer to rcu_head member within rcu_synchronize structure 331 * 332 * Awaken the corresponding task now that a grace period has elapsed. 333 */ 334 void wakeme_after_rcu(struct rcu_head *head) 335 { 336 struct rcu_synchronize *rcu; 337 338 rcu = container_of(head, struct rcu_synchronize, head); 339 complete(&rcu->completion); 340 } 341 EXPORT_SYMBOL_GPL(wakeme_after_rcu); 342 343 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 344 struct rcu_synchronize *rs_array) 345 { 346 int i; 347 348 /* Initialize and register callbacks for each flavor specified. */ 349 for (i = 0; i < n; i++) { 350 if (checktiny && 351 (crcu_array[i] == call_rcu || 352 crcu_array[i] == call_rcu_bh)) { 353 might_sleep(); 354 continue; 355 } 356 init_rcu_head_on_stack(&rs_array[i].head); 357 init_completion(&rs_array[i].completion); 358 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); 359 } 360 361 /* Wait for all callbacks to be invoked. */ 362 for (i = 0; i < n; i++) { 363 if (checktiny && 364 (crcu_array[i] == call_rcu || 365 crcu_array[i] == call_rcu_bh)) 366 continue; 367 wait_for_completion(&rs_array[i].completion); 368 destroy_rcu_head_on_stack(&rs_array[i].head); 369 } 370 } 371 EXPORT_SYMBOL_GPL(__wait_rcu_gp); 372 373 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 374 void init_rcu_head(struct rcu_head *head) 375 { 376 debug_object_init(head, &rcuhead_debug_descr); 377 } 378 379 void destroy_rcu_head(struct rcu_head *head) 380 { 381 debug_object_free(head, &rcuhead_debug_descr); 382 } 383 384 static bool rcuhead_is_static_object(void *addr) 385 { 386 return true; 387 } 388 389 /** 390 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 391 * @head: pointer to rcu_head structure to be initialized 392 * 393 * This function informs debugobjects of a new rcu_head structure that 394 * has been allocated as an auto variable on the stack. This function 395 * is not required for rcu_head structures that are statically defined or 396 * that are dynamically allocated on the heap. This function has no 397 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 398 */ 399 void init_rcu_head_on_stack(struct rcu_head *head) 400 { 401 debug_object_init_on_stack(head, &rcuhead_debug_descr); 402 } 403 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 404 405 /** 406 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 407 * @head: pointer to rcu_head structure to be initialized 408 * 409 * This function informs debugobjects that an on-stack rcu_head structure 410 * is about to go out of scope. As with init_rcu_head_on_stack(), this 411 * function is not required for rcu_head structures that are statically 412 * defined or that are dynamically allocated on the heap. Also as with 413 * init_rcu_head_on_stack(), this function has no effect for 414 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 415 */ 416 void destroy_rcu_head_on_stack(struct rcu_head *head) 417 { 418 debug_object_free(head, &rcuhead_debug_descr); 419 } 420 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 421 422 struct debug_obj_descr rcuhead_debug_descr = { 423 .name = "rcu_head", 424 .is_static_object = rcuhead_is_static_object, 425 }; 426 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 427 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 428 429 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 430 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 431 unsigned long secs, 432 unsigned long c_old, unsigned long c) 433 { 434 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 435 } 436 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 437 #else 438 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 439 do { } while (0) 440 #endif 441 442 #ifdef CONFIG_RCU_STALL_COMMON 443 444 #ifdef CONFIG_PROVE_RCU 445 #define RCU_STALL_DELAY_DELTA (5 * HZ) 446 #else 447 #define RCU_STALL_DELAY_DELTA 0 448 #endif 449 450 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 451 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 452 453 module_param(rcu_cpu_stall_suppress, int, 0644); 454 module_param(rcu_cpu_stall_timeout, int, 0644); 455 456 int rcu_jiffies_till_stall_check(void) 457 { 458 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); 459 460 /* 461 * Limit check must be consistent with the Kconfig limits 462 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 463 */ 464 if (till_stall_check < 3) { 465 WRITE_ONCE(rcu_cpu_stall_timeout, 3); 466 till_stall_check = 3; 467 } else if (till_stall_check > 300) { 468 WRITE_ONCE(rcu_cpu_stall_timeout, 300); 469 till_stall_check = 300; 470 } 471 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 472 } 473 474 void rcu_sysrq_start(void) 475 { 476 if (!rcu_cpu_stall_suppress) 477 rcu_cpu_stall_suppress = 2; 478 } 479 480 void rcu_sysrq_end(void) 481 { 482 if (rcu_cpu_stall_suppress == 2) 483 rcu_cpu_stall_suppress = 0; 484 } 485 486 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 487 { 488 rcu_cpu_stall_suppress = 1; 489 return NOTIFY_DONE; 490 } 491 492 static struct notifier_block rcu_panic_block = { 493 .notifier_call = rcu_panic, 494 }; 495 496 static int __init check_cpu_stall_init(void) 497 { 498 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 499 return 0; 500 } 501 early_initcall(check_cpu_stall_init); 502 503 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 504 505 #ifdef CONFIG_TASKS_RCU 506 507 /* 508 * Simple variant of RCU whose quiescent states are voluntary context switch, 509 * user-space execution, and idle. As such, grace periods can take one good 510 * long time. There are no read-side primitives similar to rcu_read_lock() 511 * and rcu_read_unlock() because this implementation is intended to get 512 * the system into a safe state for some of the manipulations involved in 513 * tracing and the like. Finally, this implementation does not support 514 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 515 * per-CPU callback lists will be needed. 516 */ 517 518 /* Global list of callbacks and associated lock. */ 519 static struct rcu_head *rcu_tasks_cbs_head; 520 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 521 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); 522 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); 523 524 /* Track exiting tasks in order to allow them to be waited for. */ 525 DEFINE_SRCU(tasks_rcu_exit_srcu); 526 527 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 528 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; 529 module_param(rcu_task_stall_timeout, int, 0644); 530 531 static void rcu_spawn_tasks_kthread(void); 532 static struct task_struct *rcu_tasks_kthread_ptr; 533 534 /* 535 * Post an RCU-tasks callback. First call must be from process context 536 * after the scheduler if fully operational. 537 */ 538 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 539 { 540 unsigned long flags; 541 bool needwake; 542 bool havetask = READ_ONCE(rcu_tasks_kthread_ptr); 543 544 rhp->next = NULL; 545 rhp->func = func; 546 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 547 needwake = !rcu_tasks_cbs_head; 548 *rcu_tasks_cbs_tail = rhp; 549 rcu_tasks_cbs_tail = &rhp->next; 550 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 551 /* We can't create the thread unless interrupts are enabled. */ 552 if ((needwake && havetask) || 553 (!havetask && !irqs_disabled_flags(flags))) { 554 rcu_spawn_tasks_kthread(); 555 wake_up(&rcu_tasks_cbs_wq); 556 } 557 } 558 EXPORT_SYMBOL_GPL(call_rcu_tasks); 559 560 /** 561 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 562 * 563 * Control will return to the caller some time after a full rcu-tasks 564 * grace period has elapsed, in other words after all currently 565 * executing rcu-tasks read-side critical sections have elapsed. These 566 * read-side critical sections are delimited by calls to schedule(), 567 * cond_resched_rcu_qs(), idle execution, userspace execution, calls 568 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 569 * 570 * This is a very specialized primitive, intended only for a few uses in 571 * tracing and other situations requiring manipulation of function 572 * preambles and profiling hooks. The synchronize_rcu_tasks() function 573 * is not (yet) intended for heavy use from multiple CPUs. 574 * 575 * Note that this guarantee implies further memory-ordering guarantees. 576 * On systems with more than one CPU, when synchronize_rcu_tasks() returns, 577 * each CPU is guaranteed to have executed a full memory barrier since the 578 * end of its last RCU-tasks read-side critical section whose beginning 579 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU 580 * having an RCU-tasks read-side critical section that extends beyond 581 * the return from synchronize_rcu_tasks() is guaranteed to have executed 582 * a full memory barrier after the beginning of synchronize_rcu_tasks() 583 * and before the beginning of that RCU-tasks read-side critical section. 584 * Note that these guarantees include CPUs that are offline, idle, or 585 * executing in user mode, as well as CPUs that are executing in the kernel. 586 * 587 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned 588 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 589 * to have executed a full memory barrier during the execution of 590 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU 591 * (but again only if the system has more than one CPU). 592 */ 593 void synchronize_rcu_tasks(void) 594 { 595 /* Complain if the scheduler has not started. */ 596 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 597 "synchronize_rcu_tasks called too soon"); 598 599 /* Wait for the grace period. */ 600 wait_rcu_gp(call_rcu_tasks); 601 } 602 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 603 604 /** 605 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 606 * 607 * Although the current implementation is guaranteed to wait, it is not 608 * obligated to, for example, if there are no pending callbacks. 609 */ 610 void rcu_barrier_tasks(void) 611 { 612 /* There is only one callback queue, so this is easy. ;-) */ 613 synchronize_rcu_tasks(); 614 } 615 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 616 617 /* See if tasks are still holding out, complain if so. */ 618 static void check_holdout_task(struct task_struct *t, 619 bool needreport, bool *firstreport) 620 { 621 int cpu; 622 623 if (!READ_ONCE(t->rcu_tasks_holdout) || 624 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 625 !READ_ONCE(t->on_rq) || 626 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 627 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 628 WRITE_ONCE(t->rcu_tasks_holdout, false); 629 list_del_init(&t->rcu_tasks_holdout_list); 630 put_task_struct(t); 631 return; 632 } 633 if (!needreport) 634 return; 635 if (*firstreport) { 636 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 637 *firstreport = false; 638 } 639 cpu = task_cpu(t); 640 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 641 t, ".I"[is_idle_task(t)], 642 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 643 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 644 t->rcu_tasks_idle_cpu, cpu); 645 sched_show_task(t); 646 } 647 648 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 649 static int __noreturn rcu_tasks_kthread(void *arg) 650 { 651 unsigned long flags; 652 struct task_struct *g, *t; 653 unsigned long lastreport; 654 struct rcu_head *list; 655 struct rcu_head *next; 656 LIST_HEAD(rcu_tasks_holdouts); 657 658 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 659 housekeeping_affine(current); 660 661 /* 662 * Each pass through the following loop makes one check for 663 * newly arrived callbacks, and, if there are some, waits for 664 * one RCU-tasks grace period and then invokes the callbacks. 665 * This loop is terminated by the system going down. ;-) 666 */ 667 for (;;) { 668 669 /* Pick up any new callbacks. */ 670 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 671 list = rcu_tasks_cbs_head; 672 rcu_tasks_cbs_head = NULL; 673 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 674 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 675 676 /* If there were none, wait a bit and start over. */ 677 if (!list) { 678 wait_event_interruptible(rcu_tasks_cbs_wq, 679 rcu_tasks_cbs_head); 680 if (!rcu_tasks_cbs_head) { 681 WARN_ON(signal_pending(current)); 682 schedule_timeout_interruptible(HZ/10); 683 } 684 continue; 685 } 686 687 /* 688 * Wait for all pre-existing t->on_rq and t->nvcsw 689 * transitions to complete. Invoking synchronize_sched() 690 * suffices because all these transitions occur with 691 * interrupts disabled. Without this synchronize_sched(), 692 * a read-side critical section that started before the 693 * grace period might be incorrectly seen as having started 694 * after the grace period. 695 * 696 * This synchronize_sched() also dispenses with the 697 * need for a memory barrier on the first store to 698 * ->rcu_tasks_holdout, as it forces the store to happen 699 * after the beginning of the grace period. 700 */ 701 synchronize_sched(); 702 703 /* 704 * There were callbacks, so we need to wait for an 705 * RCU-tasks grace period. Start off by scanning 706 * the task list for tasks that are not already 707 * voluntarily blocked. Mark these tasks and make 708 * a list of them in rcu_tasks_holdouts. 709 */ 710 rcu_read_lock(); 711 for_each_process_thread(g, t) { 712 if (t != current && READ_ONCE(t->on_rq) && 713 !is_idle_task(t)) { 714 get_task_struct(t); 715 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 716 WRITE_ONCE(t->rcu_tasks_holdout, true); 717 list_add(&t->rcu_tasks_holdout_list, 718 &rcu_tasks_holdouts); 719 } 720 } 721 rcu_read_unlock(); 722 723 /* 724 * Wait for tasks that are in the process of exiting. 725 * This does only part of the job, ensuring that all 726 * tasks that were previously exiting reach the point 727 * where they have disabled preemption, allowing the 728 * later synchronize_sched() to finish the job. 729 */ 730 synchronize_srcu(&tasks_rcu_exit_srcu); 731 732 /* 733 * Each pass through the following loop scans the list 734 * of holdout tasks, removing any that are no longer 735 * holdouts. When the list is empty, we are done. 736 */ 737 lastreport = jiffies; 738 while (!list_empty(&rcu_tasks_holdouts)) { 739 bool firstreport; 740 bool needreport; 741 int rtst; 742 struct task_struct *t1; 743 744 schedule_timeout_interruptible(HZ); 745 rtst = READ_ONCE(rcu_task_stall_timeout); 746 needreport = rtst > 0 && 747 time_after(jiffies, lastreport + rtst); 748 if (needreport) 749 lastreport = jiffies; 750 firstreport = true; 751 WARN_ON(signal_pending(current)); 752 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, 753 rcu_tasks_holdout_list) { 754 check_holdout_task(t, needreport, &firstreport); 755 cond_resched(); 756 } 757 } 758 759 /* 760 * Because ->on_rq and ->nvcsw are not guaranteed 761 * to have a full memory barriers prior to them in the 762 * schedule() path, memory reordering on other CPUs could 763 * cause their RCU-tasks read-side critical sections to 764 * extend past the end of the grace period. However, 765 * because these ->nvcsw updates are carried out with 766 * interrupts disabled, we can use synchronize_sched() 767 * to force the needed ordering on all such CPUs. 768 * 769 * This synchronize_sched() also confines all 770 * ->rcu_tasks_holdout accesses to be within the grace 771 * period, avoiding the need for memory barriers for 772 * ->rcu_tasks_holdout accesses. 773 * 774 * In addition, this synchronize_sched() waits for exiting 775 * tasks to complete their final preempt_disable() region 776 * of execution, cleaning up after the synchronize_srcu() 777 * above. 778 */ 779 synchronize_sched(); 780 781 /* Invoke the callbacks. */ 782 while (list) { 783 next = list->next; 784 local_bh_disable(); 785 list->func(list); 786 local_bh_enable(); 787 list = next; 788 cond_resched(); 789 } 790 schedule_timeout_uninterruptible(HZ/10); 791 } 792 } 793 794 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ 795 static void rcu_spawn_tasks_kthread(void) 796 { 797 static DEFINE_MUTEX(rcu_tasks_kthread_mutex); 798 struct task_struct *t; 799 800 if (READ_ONCE(rcu_tasks_kthread_ptr)) { 801 smp_mb(); /* Ensure caller sees full kthread. */ 802 return; 803 } 804 mutex_lock(&rcu_tasks_kthread_mutex); 805 if (rcu_tasks_kthread_ptr) { 806 mutex_unlock(&rcu_tasks_kthread_mutex); 807 return; 808 } 809 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); 810 BUG_ON(IS_ERR(t)); 811 smp_mb(); /* Ensure others see full kthread. */ 812 WRITE_ONCE(rcu_tasks_kthread_ptr, t); 813 mutex_unlock(&rcu_tasks_kthread_mutex); 814 } 815 816 #endif /* #ifdef CONFIG_TASKS_RCU */ 817 818 /* 819 * Test each non-SRCU synchronous grace-period wait API. This is 820 * useful just after a change in mode for these primitives, and 821 * during early boot. 822 */ 823 void rcu_test_sync_prims(void) 824 { 825 if (!IS_ENABLED(CONFIG_PROVE_RCU)) 826 return; 827 synchronize_rcu(); 828 synchronize_rcu_bh(); 829 synchronize_sched(); 830 synchronize_rcu_expedited(); 831 synchronize_rcu_bh_expedited(); 832 synchronize_sched_expedited(); 833 } 834 835 #ifdef CONFIG_PROVE_RCU 836 837 /* 838 * Early boot self test parameters, one for each flavor 839 */ 840 static bool rcu_self_test; 841 static bool rcu_self_test_bh; 842 static bool rcu_self_test_sched; 843 844 module_param(rcu_self_test, bool, 0444); 845 module_param(rcu_self_test_bh, bool, 0444); 846 module_param(rcu_self_test_sched, bool, 0444); 847 848 static int rcu_self_test_counter; 849 850 static void test_callback(struct rcu_head *r) 851 { 852 rcu_self_test_counter++; 853 pr_info("RCU test callback executed %d\n", rcu_self_test_counter); 854 } 855 856 static void early_boot_test_call_rcu(void) 857 { 858 static struct rcu_head head; 859 860 call_rcu(&head, test_callback); 861 } 862 863 static void early_boot_test_call_rcu_bh(void) 864 { 865 static struct rcu_head head; 866 867 call_rcu_bh(&head, test_callback); 868 } 869 870 static void early_boot_test_call_rcu_sched(void) 871 { 872 static struct rcu_head head; 873 874 call_rcu_sched(&head, test_callback); 875 } 876 877 void rcu_early_boot_tests(void) 878 { 879 pr_info("Running RCU self tests\n"); 880 881 if (rcu_self_test) 882 early_boot_test_call_rcu(); 883 if (rcu_self_test_bh) 884 early_boot_test_call_rcu_bh(); 885 if (rcu_self_test_sched) 886 early_boot_test_call_rcu_sched(); 887 rcu_test_sync_prims(); 888 } 889 890 static int rcu_verify_early_boot_tests(void) 891 { 892 int ret = 0; 893 int early_boot_test_counter = 0; 894 895 if (rcu_self_test) { 896 early_boot_test_counter++; 897 rcu_barrier(); 898 } 899 if (rcu_self_test_bh) { 900 early_boot_test_counter++; 901 rcu_barrier_bh(); 902 } 903 if (rcu_self_test_sched) { 904 early_boot_test_counter++; 905 rcu_barrier_sched(); 906 } 907 908 if (rcu_self_test_counter != early_boot_test_counter) { 909 WARN_ON(1); 910 ret = -1; 911 } 912 913 return ret; 914 } 915 late_initcall(rcu_verify_early_boot_tests); 916 #else 917 void rcu_early_boot_tests(void) {} 918 #endif /* CONFIG_PROVE_RCU */ 919