1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * Papers: 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 28 * 29 * For detailed explanation of Read-Copy Update mechanism see - 30 * http://lse.sourceforge.net/locking/rcupdate.html 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp.h> 38 #include <linux/interrupt.h> 39 #include <linux/sched/signal.h> 40 #include <linux/sched/debug.h> 41 #include <linux/atomic.h> 42 #include <linux/bitops.h> 43 #include <linux/percpu.h> 44 #include <linux/notifier.h> 45 #include <linux/cpu.h> 46 #include <linux/mutex.h> 47 #include <linux/export.h> 48 #include <linux/hardirq.h> 49 #include <linux/delay.h> 50 #include <linux/moduleparam.h> 51 #include <linux/kthread.h> 52 #include <linux/tick.h> 53 #include <linux/rcupdate_wait.h> 54 55 #define CREATE_TRACE_POINTS 56 57 #include "rcu.h" 58 59 #ifdef MODULE_PARAM_PREFIX 60 #undef MODULE_PARAM_PREFIX 61 #endif 62 #define MODULE_PARAM_PREFIX "rcupdate." 63 64 #ifndef CONFIG_TINY_RCU 65 module_param(rcu_expedited, int, 0); 66 module_param(rcu_normal, int, 0); 67 static int rcu_normal_after_boot; 68 module_param(rcu_normal_after_boot, int, 0); 69 #endif /* #ifndef CONFIG_TINY_RCU */ 70 71 #ifdef CONFIG_DEBUG_LOCK_ALLOC 72 /** 73 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 74 * 75 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 76 * RCU-sched read-side critical section. In absence of 77 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 78 * critical section unless it can prove otherwise. Note that disabling 79 * of preemption (including disabling irqs) counts as an RCU-sched 80 * read-side critical section. This is useful for debug checks in functions 81 * that required that they be called within an RCU-sched read-side 82 * critical section. 83 * 84 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 85 * and while lockdep is disabled. 86 * 87 * Note that if the CPU is in the idle loop from an RCU point of 88 * view (ie: that we are in the section between rcu_idle_enter() and 89 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU 90 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs 91 * that are in such a section, considering these as in extended quiescent 92 * state, so such a CPU is effectively never in an RCU read-side critical 93 * section regardless of what RCU primitives it invokes. This state of 94 * affairs is required --- we need to keep an RCU-free window in idle 95 * where the CPU may possibly enter into low power mode. This way we can 96 * notice an extended quiescent state to other CPUs that started a grace 97 * period. Otherwise we would delay any grace period as long as we run in 98 * the idle task. 99 * 100 * Similarly, we avoid claiming an SRCU read lock held if the current 101 * CPU is offline. 102 */ 103 int rcu_read_lock_sched_held(void) 104 { 105 int lockdep_opinion = 0; 106 107 if (!debug_lockdep_rcu_enabled()) 108 return 1; 109 if (!rcu_is_watching()) 110 return 0; 111 if (!rcu_lockdep_current_cpu_online()) 112 return 0; 113 if (debug_locks) 114 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 115 return lockdep_opinion || !preemptible(); 116 } 117 EXPORT_SYMBOL(rcu_read_lock_sched_held); 118 #endif 119 120 #ifndef CONFIG_TINY_RCU 121 122 /* 123 * Should expedited grace-period primitives always fall back to their 124 * non-expedited counterparts? Intended for use within RCU. Note 125 * that if the user specifies both rcu_expedited and rcu_normal, then 126 * rcu_normal wins. (Except during the time period during boot from 127 * when the first task is spawned until the rcu_exp_runtime_mode() 128 * core_initcall() is invoked, at which point everything is expedited.) 129 */ 130 bool rcu_gp_is_normal(void) 131 { 132 return READ_ONCE(rcu_normal) && 133 rcu_scheduler_active != RCU_SCHEDULER_INIT; 134 } 135 EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 136 137 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); 138 139 /* 140 * Should normal grace-period primitives be expedited? Intended for 141 * use within RCU. Note that this function takes the rcu_expedited 142 * sysfs/boot variable and rcu_scheduler_active into account as well 143 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() 144 * until rcu_gp_is_expedited() returns false is a -really- bad idea. 145 */ 146 bool rcu_gp_is_expedited(void) 147 { 148 return rcu_expedited || atomic_read(&rcu_expedited_nesting) || 149 rcu_scheduler_active == RCU_SCHEDULER_INIT; 150 } 151 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 152 153 /** 154 * rcu_expedite_gp - Expedite future RCU grace periods 155 * 156 * After a call to this function, future calls to synchronize_rcu() and 157 * friends act as the corresponding synchronize_rcu_expedited() function 158 * had instead been called. 159 */ 160 void rcu_expedite_gp(void) 161 { 162 atomic_inc(&rcu_expedited_nesting); 163 } 164 EXPORT_SYMBOL_GPL(rcu_expedite_gp); 165 166 /** 167 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation 168 * 169 * Undo a prior call to rcu_expedite_gp(). If all prior calls to 170 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), 171 * and if the rcu_expedited sysfs/boot parameter is not set, then all 172 * subsequent calls to synchronize_rcu() and friends will return to 173 * their normal non-expedited behavior. 174 */ 175 void rcu_unexpedite_gp(void) 176 { 177 atomic_dec(&rcu_expedited_nesting); 178 } 179 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); 180 181 /* 182 * Inform RCU of the end of the in-kernel boot sequence. 183 */ 184 void rcu_end_inkernel_boot(void) 185 { 186 rcu_unexpedite_gp(); 187 if (rcu_normal_after_boot) 188 WRITE_ONCE(rcu_normal, 1); 189 } 190 191 #endif /* #ifndef CONFIG_TINY_RCU */ 192 193 #ifdef CONFIG_PREEMPT_RCU 194 195 /* 196 * Preemptible RCU implementation for rcu_read_lock(). 197 * Just increment ->rcu_read_lock_nesting, shared state will be updated 198 * if we block. 199 */ 200 void __rcu_read_lock(void) 201 { 202 current->rcu_read_lock_nesting++; 203 barrier(); /* critical section after entry code. */ 204 } 205 EXPORT_SYMBOL_GPL(__rcu_read_lock); 206 207 /* 208 * Preemptible RCU implementation for rcu_read_unlock(). 209 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 210 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 211 * invoke rcu_read_unlock_special() to clean up after a context switch 212 * in an RCU read-side critical section and other special cases. 213 */ 214 void __rcu_read_unlock(void) 215 { 216 struct task_struct *t = current; 217 218 if (t->rcu_read_lock_nesting != 1) { 219 --t->rcu_read_lock_nesting; 220 } else { 221 barrier(); /* critical section before exit code. */ 222 t->rcu_read_lock_nesting = INT_MIN; 223 barrier(); /* assign before ->rcu_read_unlock_special load */ 224 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 225 rcu_read_unlock_special(t); 226 barrier(); /* ->rcu_read_unlock_special load before assign */ 227 t->rcu_read_lock_nesting = 0; 228 } 229 #ifdef CONFIG_PROVE_LOCKING 230 { 231 int rrln = READ_ONCE(t->rcu_read_lock_nesting); 232 233 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 234 } 235 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 236 } 237 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 238 239 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 240 241 #ifdef CONFIG_DEBUG_LOCK_ALLOC 242 static struct lock_class_key rcu_lock_key; 243 struct lockdep_map rcu_lock_map = 244 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 245 EXPORT_SYMBOL_GPL(rcu_lock_map); 246 247 static struct lock_class_key rcu_bh_lock_key; 248 struct lockdep_map rcu_bh_lock_map = 249 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 250 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 251 252 static struct lock_class_key rcu_sched_lock_key; 253 struct lockdep_map rcu_sched_lock_map = 254 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 255 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 256 257 static struct lock_class_key rcu_callback_key; 258 struct lockdep_map rcu_callback_map = 259 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); 260 EXPORT_SYMBOL_GPL(rcu_callback_map); 261 262 int notrace debug_lockdep_rcu_enabled(void) 263 { 264 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && 265 current->lockdep_recursion == 0; 266 } 267 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 268 269 /** 270 * rcu_read_lock_held() - might we be in RCU read-side critical section? 271 * 272 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 273 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 274 * this assumes we are in an RCU read-side critical section unless it can 275 * prove otherwise. This is useful for debug checks in functions that 276 * require that they be called within an RCU read-side critical section. 277 * 278 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 279 * and while lockdep is disabled. 280 * 281 * Note that rcu_read_lock() and the matching rcu_read_unlock() must 282 * occur in the same context, for example, it is illegal to invoke 283 * rcu_read_unlock() in process context if the matching rcu_read_lock() 284 * was invoked from within an irq handler. 285 * 286 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 287 * offline from an RCU perspective, so check for those as well. 288 */ 289 int rcu_read_lock_held(void) 290 { 291 if (!debug_lockdep_rcu_enabled()) 292 return 1; 293 if (!rcu_is_watching()) 294 return 0; 295 if (!rcu_lockdep_current_cpu_online()) 296 return 0; 297 return lock_is_held(&rcu_lock_map); 298 } 299 EXPORT_SYMBOL_GPL(rcu_read_lock_held); 300 301 /** 302 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 303 * 304 * Check for bottom half being disabled, which covers both the 305 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 306 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 307 * will show the situation. This is useful for debug checks in functions 308 * that require that they be called within an RCU read-side critical 309 * section. 310 * 311 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 312 * 313 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 314 * offline from an RCU perspective, so check for those as well. 315 */ 316 int rcu_read_lock_bh_held(void) 317 { 318 if (!debug_lockdep_rcu_enabled()) 319 return 1; 320 if (!rcu_is_watching()) 321 return 0; 322 if (!rcu_lockdep_current_cpu_online()) 323 return 0; 324 return in_softirq() || irqs_disabled(); 325 } 326 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 327 328 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 329 330 /** 331 * wakeme_after_rcu() - Callback function to awaken a task after grace period 332 * @head: Pointer to rcu_head member within rcu_synchronize structure 333 * 334 * Awaken the corresponding task now that a grace period has elapsed. 335 */ 336 void wakeme_after_rcu(struct rcu_head *head) 337 { 338 struct rcu_synchronize *rcu; 339 340 rcu = container_of(head, struct rcu_synchronize, head); 341 complete(&rcu->completion); 342 } 343 EXPORT_SYMBOL_GPL(wakeme_after_rcu); 344 345 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 346 struct rcu_synchronize *rs_array) 347 { 348 int i; 349 350 /* Initialize and register callbacks for each flavor specified. */ 351 for (i = 0; i < n; i++) { 352 if (checktiny && 353 (crcu_array[i] == call_rcu || 354 crcu_array[i] == call_rcu_bh)) { 355 might_sleep(); 356 continue; 357 } 358 init_rcu_head_on_stack(&rs_array[i].head); 359 init_completion(&rs_array[i].completion); 360 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); 361 } 362 363 /* Wait for all callbacks to be invoked. */ 364 for (i = 0; i < n; i++) { 365 if (checktiny && 366 (crcu_array[i] == call_rcu || 367 crcu_array[i] == call_rcu_bh)) 368 continue; 369 wait_for_completion(&rs_array[i].completion); 370 destroy_rcu_head_on_stack(&rs_array[i].head); 371 } 372 } 373 EXPORT_SYMBOL_GPL(__wait_rcu_gp); 374 375 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 376 void init_rcu_head(struct rcu_head *head) 377 { 378 debug_object_init(head, &rcuhead_debug_descr); 379 } 380 381 void destroy_rcu_head(struct rcu_head *head) 382 { 383 debug_object_free(head, &rcuhead_debug_descr); 384 } 385 386 static bool rcuhead_is_static_object(void *addr) 387 { 388 return true; 389 } 390 391 /** 392 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 393 * @head: pointer to rcu_head structure to be initialized 394 * 395 * This function informs debugobjects of a new rcu_head structure that 396 * has been allocated as an auto variable on the stack. This function 397 * is not required for rcu_head structures that are statically defined or 398 * that are dynamically allocated on the heap. This function has no 399 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 400 */ 401 void init_rcu_head_on_stack(struct rcu_head *head) 402 { 403 debug_object_init_on_stack(head, &rcuhead_debug_descr); 404 } 405 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 406 407 /** 408 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 409 * @head: pointer to rcu_head structure to be initialized 410 * 411 * This function informs debugobjects that an on-stack rcu_head structure 412 * is about to go out of scope. As with init_rcu_head_on_stack(), this 413 * function is not required for rcu_head structures that are statically 414 * defined or that are dynamically allocated on the heap. Also as with 415 * init_rcu_head_on_stack(), this function has no effect for 416 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 417 */ 418 void destroy_rcu_head_on_stack(struct rcu_head *head) 419 { 420 debug_object_free(head, &rcuhead_debug_descr); 421 } 422 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 423 424 struct debug_obj_descr rcuhead_debug_descr = { 425 .name = "rcu_head", 426 .is_static_object = rcuhead_is_static_object, 427 }; 428 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 429 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 430 431 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 432 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 433 unsigned long secs, 434 unsigned long c_old, unsigned long c) 435 { 436 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 437 } 438 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 439 #else 440 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 441 do { } while (0) 442 #endif 443 444 #ifdef CONFIG_RCU_STALL_COMMON 445 446 #ifdef CONFIG_PROVE_RCU 447 #define RCU_STALL_DELAY_DELTA (5 * HZ) 448 #else 449 #define RCU_STALL_DELAY_DELTA 0 450 #endif 451 452 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 453 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 454 455 module_param(rcu_cpu_stall_suppress, int, 0644); 456 module_param(rcu_cpu_stall_timeout, int, 0644); 457 458 int rcu_jiffies_till_stall_check(void) 459 { 460 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); 461 462 /* 463 * Limit check must be consistent with the Kconfig limits 464 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 465 */ 466 if (till_stall_check < 3) { 467 WRITE_ONCE(rcu_cpu_stall_timeout, 3); 468 till_stall_check = 3; 469 } else if (till_stall_check > 300) { 470 WRITE_ONCE(rcu_cpu_stall_timeout, 300); 471 till_stall_check = 300; 472 } 473 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 474 } 475 476 void rcu_sysrq_start(void) 477 { 478 if (!rcu_cpu_stall_suppress) 479 rcu_cpu_stall_suppress = 2; 480 } 481 482 void rcu_sysrq_end(void) 483 { 484 if (rcu_cpu_stall_suppress == 2) 485 rcu_cpu_stall_suppress = 0; 486 } 487 488 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 489 { 490 rcu_cpu_stall_suppress = 1; 491 return NOTIFY_DONE; 492 } 493 494 static struct notifier_block rcu_panic_block = { 495 .notifier_call = rcu_panic, 496 }; 497 498 static int __init check_cpu_stall_init(void) 499 { 500 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 501 return 0; 502 } 503 early_initcall(check_cpu_stall_init); 504 505 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 506 507 #ifdef CONFIG_TASKS_RCU 508 509 /* 510 * Simple variant of RCU whose quiescent states are voluntary context switch, 511 * user-space execution, and idle. As such, grace periods can take one good 512 * long time. There are no read-side primitives similar to rcu_read_lock() 513 * and rcu_read_unlock() because this implementation is intended to get 514 * the system into a safe state for some of the manipulations involved in 515 * tracing and the like. Finally, this implementation does not support 516 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 517 * per-CPU callback lists will be needed. 518 */ 519 520 /* Global list of callbacks and associated lock. */ 521 static struct rcu_head *rcu_tasks_cbs_head; 522 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 523 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); 524 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); 525 526 /* Track exiting tasks in order to allow them to be waited for. */ 527 DEFINE_SRCU(tasks_rcu_exit_srcu); 528 529 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 530 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; 531 module_param(rcu_task_stall_timeout, int, 0644); 532 533 static void rcu_spawn_tasks_kthread(void); 534 static struct task_struct *rcu_tasks_kthread_ptr; 535 536 /* 537 * Post an RCU-tasks callback. First call must be from process context 538 * after the scheduler if fully operational. 539 */ 540 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 541 { 542 unsigned long flags; 543 bool needwake; 544 bool havetask = READ_ONCE(rcu_tasks_kthread_ptr); 545 546 rhp->next = NULL; 547 rhp->func = func; 548 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 549 needwake = !rcu_tasks_cbs_head; 550 *rcu_tasks_cbs_tail = rhp; 551 rcu_tasks_cbs_tail = &rhp->next; 552 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 553 /* We can't create the thread unless interrupts are enabled. */ 554 if ((needwake && havetask) || 555 (!havetask && !irqs_disabled_flags(flags))) { 556 rcu_spawn_tasks_kthread(); 557 wake_up(&rcu_tasks_cbs_wq); 558 } 559 } 560 EXPORT_SYMBOL_GPL(call_rcu_tasks); 561 562 /** 563 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 564 * 565 * Control will return to the caller some time after a full rcu-tasks 566 * grace period has elapsed, in other words after all currently 567 * executing rcu-tasks read-side critical sections have elapsed. These 568 * read-side critical sections are delimited by calls to schedule(), 569 * cond_resched_rcu_qs(), idle execution, userspace execution, calls 570 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 571 * 572 * This is a very specialized primitive, intended only for a few uses in 573 * tracing and other situations requiring manipulation of function 574 * preambles and profiling hooks. The synchronize_rcu_tasks() function 575 * is not (yet) intended for heavy use from multiple CPUs. 576 * 577 * Note that this guarantee implies further memory-ordering guarantees. 578 * On systems with more than one CPU, when synchronize_rcu_tasks() returns, 579 * each CPU is guaranteed to have executed a full memory barrier since the 580 * end of its last RCU-tasks read-side critical section whose beginning 581 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU 582 * having an RCU-tasks read-side critical section that extends beyond 583 * the return from synchronize_rcu_tasks() is guaranteed to have executed 584 * a full memory barrier after the beginning of synchronize_rcu_tasks() 585 * and before the beginning of that RCU-tasks read-side critical section. 586 * Note that these guarantees include CPUs that are offline, idle, or 587 * executing in user mode, as well as CPUs that are executing in the kernel. 588 * 589 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned 590 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 591 * to have executed a full memory barrier during the execution of 592 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU 593 * (but again only if the system has more than one CPU). 594 */ 595 void synchronize_rcu_tasks(void) 596 { 597 /* Complain if the scheduler has not started. */ 598 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 599 "synchronize_rcu_tasks called too soon"); 600 601 /* Wait for the grace period. */ 602 wait_rcu_gp(call_rcu_tasks); 603 } 604 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 605 606 /** 607 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 608 * 609 * Although the current implementation is guaranteed to wait, it is not 610 * obligated to, for example, if there are no pending callbacks. 611 */ 612 void rcu_barrier_tasks(void) 613 { 614 /* There is only one callback queue, so this is easy. ;-) */ 615 synchronize_rcu_tasks(); 616 } 617 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 618 619 /* See if tasks are still holding out, complain if so. */ 620 static void check_holdout_task(struct task_struct *t, 621 bool needreport, bool *firstreport) 622 { 623 int cpu; 624 625 if (!READ_ONCE(t->rcu_tasks_holdout) || 626 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 627 !READ_ONCE(t->on_rq) || 628 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 629 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 630 WRITE_ONCE(t->rcu_tasks_holdout, false); 631 list_del_init(&t->rcu_tasks_holdout_list); 632 put_task_struct(t); 633 return; 634 } 635 if (!needreport) 636 return; 637 if (*firstreport) { 638 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 639 *firstreport = false; 640 } 641 cpu = task_cpu(t); 642 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 643 t, ".I"[is_idle_task(t)], 644 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 645 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 646 t->rcu_tasks_idle_cpu, cpu); 647 sched_show_task(t); 648 } 649 650 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 651 static int __noreturn rcu_tasks_kthread(void *arg) 652 { 653 unsigned long flags; 654 struct task_struct *g, *t; 655 unsigned long lastreport; 656 struct rcu_head *list; 657 struct rcu_head *next; 658 LIST_HEAD(rcu_tasks_holdouts); 659 660 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 661 housekeeping_affine(current); 662 663 /* 664 * Each pass through the following loop makes one check for 665 * newly arrived callbacks, and, if there are some, waits for 666 * one RCU-tasks grace period and then invokes the callbacks. 667 * This loop is terminated by the system going down. ;-) 668 */ 669 for (;;) { 670 671 /* Pick up any new callbacks. */ 672 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 673 list = rcu_tasks_cbs_head; 674 rcu_tasks_cbs_head = NULL; 675 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 676 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 677 678 /* If there were none, wait a bit and start over. */ 679 if (!list) { 680 wait_event_interruptible(rcu_tasks_cbs_wq, 681 rcu_tasks_cbs_head); 682 if (!rcu_tasks_cbs_head) { 683 WARN_ON(signal_pending(current)); 684 schedule_timeout_interruptible(HZ/10); 685 } 686 continue; 687 } 688 689 /* 690 * Wait for all pre-existing t->on_rq and t->nvcsw 691 * transitions to complete. Invoking synchronize_sched() 692 * suffices because all these transitions occur with 693 * interrupts disabled. Without this synchronize_sched(), 694 * a read-side critical section that started before the 695 * grace period might be incorrectly seen as having started 696 * after the grace period. 697 * 698 * This synchronize_sched() also dispenses with the 699 * need for a memory barrier on the first store to 700 * ->rcu_tasks_holdout, as it forces the store to happen 701 * after the beginning of the grace period. 702 */ 703 synchronize_sched(); 704 705 /* 706 * There were callbacks, so we need to wait for an 707 * RCU-tasks grace period. Start off by scanning 708 * the task list for tasks that are not already 709 * voluntarily blocked. Mark these tasks and make 710 * a list of them in rcu_tasks_holdouts. 711 */ 712 rcu_read_lock(); 713 for_each_process_thread(g, t) { 714 if (t != current && READ_ONCE(t->on_rq) && 715 !is_idle_task(t)) { 716 get_task_struct(t); 717 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 718 WRITE_ONCE(t->rcu_tasks_holdout, true); 719 list_add(&t->rcu_tasks_holdout_list, 720 &rcu_tasks_holdouts); 721 } 722 } 723 rcu_read_unlock(); 724 725 /* 726 * Wait for tasks that are in the process of exiting. 727 * This does only part of the job, ensuring that all 728 * tasks that were previously exiting reach the point 729 * where they have disabled preemption, allowing the 730 * later synchronize_sched() to finish the job. 731 */ 732 synchronize_srcu(&tasks_rcu_exit_srcu); 733 734 /* 735 * Each pass through the following loop scans the list 736 * of holdout tasks, removing any that are no longer 737 * holdouts. When the list is empty, we are done. 738 */ 739 lastreport = jiffies; 740 while (!list_empty(&rcu_tasks_holdouts)) { 741 bool firstreport; 742 bool needreport; 743 int rtst; 744 struct task_struct *t1; 745 746 schedule_timeout_interruptible(HZ); 747 rtst = READ_ONCE(rcu_task_stall_timeout); 748 needreport = rtst > 0 && 749 time_after(jiffies, lastreport + rtst); 750 if (needreport) 751 lastreport = jiffies; 752 firstreport = true; 753 WARN_ON(signal_pending(current)); 754 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, 755 rcu_tasks_holdout_list) { 756 check_holdout_task(t, needreport, &firstreport); 757 cond_resched(); 758 } 759 } 760 761 /* 762 * Because ->on_rq and ->nvcsw are not guaranteed 763 * to have a full memory barriers prior to them in the 764 * schedule() path, memory reordering on other CPUs could 765 * cause their RCU-tasks read-side critical sections to 766 * extend past the end of the grace period. However, 767 * because these ->nvcsw updates are carried out with 768 * interrupts disabled, we can use synchronize_sched() 769 * to force the needed ordering on all such CPUs. 770 * 771 * This synchronize_sched() also confines all 772 * ->rcu_tasks_holdout accesses to be within the grace 773 * period, avoiding the need for memory barriers for 774 * ->rcu_tasks_holdout accesses. 775 * 776 * In addition, this synchronize_sched() waits for exiting 777 * tasks to complete their final preempt_disable() region 778 * of execution, cleaning up after the synchronize_srcu() 779 * above. 780 */ 781 synchronize_sched(); 782 783 /* Invoke the callbacks. */ 784 while (list) { 785 next = list->next; 786 local_bh_disable(); 787 list->func(list); 788 local_bh_enable(); 789 list = next; 790 cond_resched(); 791 } 792 schedule_timeout_uninterruptible(HZ/10); 793 } 794 } 795 796 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ 797 static void rcu_spawn_tasks_kthread(void) 798 { 799 static DEFINE_MUTEX(rcu_tasks_kthread_mutex); 800 struct task_struct *t; 801 802 if (READ_ONCE(rcu_tasks_kthread_ptr)) { 803 smp_mb(); /* Ensure caller sees full kthread. */ 804 return; 805 } 806 mutex_lock(&rcu_tasks_kthread_mutex); 807 if (rcu_tasks_kthread_ptr) { 808 mutex_unlock(&rcu_tasks_kthread_mutex); 809 return; 810 } 811 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); 812 BUG_ON(IS_ERR(t)); 813 smp_mb(); /* Ensure others see full kthread. */ 814 WRITE_ONCE(rcu_tasks_kthread_ptr, t); 815 mutex_unlock(&rcu_tasks_kthread_mutex); 816 } 817 818 #endif /* #ifdef CONFIG_TASKS_RCU */ 819 820 /* 821 * Test each non-SRCU synchronous grace-period wait API. This is 822 * useful just after a change in mode for these primitives, and 823 * during early boot. 824 */ 825 void rcu_test_sync_prims(void) 826 { 827 if (!IS_ENABLED(CONFIG_PROVE_RCU)) 828 return; 829 synchronize_rcu(); 830 synchronize_rcu_bh(); 831 synchronize_sched(); 832 synchronize_rcu_expedited(); 833 synchronize_rcu_bh_expedited(); 834 synchronize_sched_expedited(); 835 } 836 837 #ifdef CONFIG_PROVE_RCU 838 839 /* 840 * Early boot self test parameters, one for each flavor 841 */ 842 static bool rcu_self_test; 843 static bool rcu_self_test_bh; 844 static bool rcu_self_test_sched; 845 846 module_param(rcu_self_test, bool, 0444); 847 module_param(rcu_self_test_bh, bool, 0444); 848 module_param(rcu_self_test_sched, bool, 0444); 849 850 static int rcu_self_test_counter; 851 852 static void test_callback(struct rcu_head *r) 853 { 854 rcu_self_test_counter++; 855 pr_info("RCU test callback executed %d\n", rcu_self_test_counter); 856 } 857 858 static void early_boot_test_call_rcu(void) 859 { 860 static struct rcu_head head; 861 862 call_rcu(&head, test_callback); 863 } 864 865 static void early_boot_test_call_rcu_bh(void) 866 { 867 static struct rcu_head head; 868 869 call_rcu_bh(&head, test_callback); 870 } 871 872 static void early_boot_test_call_rcu_sched(void) 873 { 874 static struct rcu_head head; 875 876 call_rcu_sched(&head, test_callback); 877 } 878 879 void rcu_early_boot_tests(void) 880 { 881 pr_info("Running RCU self tests\n"); 882 883 if (rcu_self_test) 884 early_boot_test_call_rcu(); 885 if (rcu_self_test_bh) 886 early_boot_test_call_rcu_bh(); 887 if (rcu_self_test_sched) 888 early_boot_test_call_rcu_sched(); 889 rcu_test_sync_prims(); 890 } 891 892 static int rcu_verify_early_boot_tests(void) 893 { 894 int ret = 0; 895 int early_boot_test_counter = 0; 896 897 if (rcu_self_test) { 898 early_boot_test_counter++; 899 rcu_barrier(); 900 } 901 if (rcu_self_test_bh) { 902 early_boot_test_counter++; 903 rcu_barrier_bh(); 904 } 905 if (rcu_self_test_sched) { 906 early_boot_test_counter++; 907 rcu_barrier_sched(); 908 } 909 910 if (rcu_self_test_counter != early_boot_test_counter) { 911 WARN_ON(1); 912 ret = -1; 913 } 914 915 return ret; 916 } 917 late_initcall(rcu_verify_early_boot_tests); 918 #else 919 void rcu_early_boot_tests(void) {} 920 #endif /* CONFIG_PROVE_RCU */ 921