1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * Papers: 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 28 * 29 * For detailed explanation of Read-Copy Update mechanism see - 30 * http://lse.sourceforge.net/locking/rcupdate.html 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp.h> 38 #include <linux/interrupt.h> 39 #include <linux/sched/signal.h> 40 #include <linux/sched/debug.h> 41 #include <linux/atomic.h> 42 #include <linux/bitops.h> 43 #include <linux/percpu.h> 44 #include <linux/notifier.h> 45 #include <linux/cpu.h> 46 #include <linux/mutex.h> 47 #include <linux/export.h> 48 #include <linux/hardirq.h> 49 #include <linux/delay.h> 50 #include <linux/moduleparam.h> 51 #include <linux/kthread.h> 52 #include <linux/tick.h> 53 #include <linux/rcupdate_wait.h> 54 #include <linux/sched/isolation.h> 55 56 #define CREATE_TRACE_POINTS 57 58 #include "rcu.h" 59 60 #ifdef MODULE_PARAM_PREFIX 61 #undef MODULE_PARAM_PREFIX 62 #endif 63 #define MODULE_PARAM_PREFIX "rcupdate." 64 65 #ifndef CONFIG_TINY_RCU 66 extern int rcu_expedited; /* from sysctl */ 67 module_param(rcu_expedited, int, 0); 68 extern int rcu_normal; /* from sysctl */ 69 module_param(rcu_normal, int, 0); 70 static int rcu_normal_after_boot; 71 module_param(rcu_normal_after_boot, int, 0); 72 #endif /* #ifndef CONFIG_TINY_RCU */ 73 74 #ifdef CONFIG_DEBUG_LOCK_ALLOC 75 /** 76 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 77 * 78 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 79 * RCU-sched read-side critical section. In absence of 80 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 81 * critical section unless it can prove otherwise. Note that disabling 82 * of preemption (including disabling irqs) counts as an RCU-sched 83 * read-side critical section. This is useful for debug checks in functions 84 * that required that they be called within an RCU-sched read-side 85 * critical section. 86 * 87 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 88 * and while lockdep is disabled. 89 * 90 * Note that if the CPU is in the idle loop from an RCU point of 91 * view (ie: that we are in the section between rcu_idle_enter() and 92 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU 93 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs 94 * that are in such a section, considering these as in extended quiescent 95 * state, so such a CPU is effectively never in an RCU read-side critical 96 * section regardless of what RCU primitives it invokes. This state of 97 * affairs is required --- we need to keep an RCU-free window in idle 98 * where the CPU may possibly enter into low power mode. This way we can 99 * notice an extended quiescent state to other CPUs that started a grace 100 * period. Otherwise we would delay any grace period as long as we run in 101 * the idle task. 102 * 103 * Similarly, we avoid claiming an SRCU read lock held if the current 104 * CPU is offline. 105 */ 106 int rcu_read_lock_sched_held(void) 107 { 108 int lockdep_opinion = 0; 109 110 if (!debug_lockdep_rcu_enabled()) 111 return 1; 112 if (!rcu_is_watching()) 113 return 0; 114 if (!rcu_lockdep_current_cpu_online()) 115 return 0; 116 if (debug_locks) 117 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 118 return lockdep_opinion || !preemptible(); 119 } 120 EXPORT_SYMBOL(rcu_read_lock_sched_held); 121 #endif 122 123 #ifndef CONFIG_TINY_RCU 124 125 /* 126 * Should expedited grace-period primitives always fall back to their 127 * non-expedited counterparts? Intended for use within RCU. Note 128 * that if the user specifies both rcu_expedited and rcu_normal, then 129 * rcu_normal wins. (Except during the time period during boot from 130 * when the first task is spawned until the rcu_set_runtime_mode() 131 * core_initcall() is invoked, at which point everything is expedited.) 132 */ 133 bool rcu_gp_is_normal(void) 134 { 135 return READ_ONCE(rcu_normal) && 136 rcu_scheduler_active != RCU_SCHEDULER_INIT; 137 } 138 EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 139 140 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); 141 142 /* 143 * Should normal grace-period primitives be expedited? Intended for 144 * use within RCU. Note that this function takes the rcu_expedited 145 * sysfs/boot variable and rcu_scheduler_active into account as well 146 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() 147 * until rcu_gp_is_expedited() returns false is a -really- bad idea. 148 */ 149 bool rcu_gp_is_expedited(void) 150 { 151 return rcu_expedited || atomic_read(&rcu_expedited_nesting) || 152 rcu_scheduler_active == RCU_SCHEDULER_INIT; 153 } 154 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 155 156 /** 157 * rcu_expedite_gp - Expedite future RCU grace periods 158 * 159 * After a call to this function, future calls to synchronize_rcu() and 160 * friends act as the corresponding synchronize_rcu_expedited() function 161 * had instead been called. 162 */ 163 void rcu_expedite_gp(void) 164 { 165 atomic_inc(&rcu_expedited_nesting); 166 } 167 EXPORT_SYMBOL_GPL(rcu_expedite_gp); 168 169 /** 170 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation 171 * 172 * Undo a prior call to rcu_expedite_gp(). If all prior calls to 173 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), 174 * and if the rcu_expedited sysfs/boot parameter is not set, then all 175 * subsequent calls to synchronize_rcu() and friends will return to 176 * their normal non-expedited behavior. 177 */ 178 void rcu_unexpedite_gp(void) 179 { 180 atomic_dec(&rcu_expedited_nesting); 181 } 182 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); 183 184 /* 185 * Inform RCU of the end of the in-kernel boot sequence. 186 */ 187 void rcu_end_inkernel_boot(void) 188 { 189 rcu_unexpedite_gp(); 190 if (rcu_normal_after_boot) 191 WRITE_ONCE(rcu_normal, 1); 192 } 193 194 #endif /* #ifndef CONFIG_TINY_RCU */ 195 196 /* 197 * Test each non-SRCU synchronous grace-period wait API. This is 198 * useful just after a change in mode for these primitives, and 199 * during early boot. 200 */ 201 void rcu_test_sync_prims(void) 202 { 203 if (!IS_ENABLED(CONFIG_PROVE_RCU)) 204 return; 205 synchronize_rcu(); 206 synchronize_rcu_bh(); 207 synchronize_sched(); 208 synchronize_rcu_expedited(); 209 synchronize_rcu_bh_expedited(); 210 synchronize_sched_expedited(); 211 } 212 213 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) 214 215 /* 216 * Switch to run-time mode once RCU has fully initialized. 217 */ 218 static int __init rcu_set_runtime_mode(void) 219 { 220 rcu_test_sync_prims(); 221 rcu_scheduler_active = RCU_SCHEDULER_RUNNING; 222 rcu_test_sync_prims(); 223 return 0; 224 } 225 core_initcall(rcu_set_runtime_mode); 226 227 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */ 228 229 #ifdef CONFIG_PREEMPT_RCU 230 231 /* 232 * Preemptible RCU implementation for rcu_read_lock(). 233 * Just increment ->rcu_read_lock_nesting, shared state will be updated 234 * if we block. 235 */ 236 void __rcu_read_lock(void) 237 { 238 current->rcu_read_lock_nesting++; 239 barrier(); /* critical section after entry code. */ 240 } 241 EXPORT_SYMBOL_GPL(__rcu_read_lock); 242 243 /* 244 * Preemptible RCU implementation for rcu_read_unlock(). 245 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 246 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 247 * invoke rcu_read_unlock_special() to clean up after a context switch 248 * in an RCU read-side critical section and other special cases. 249 */ 250 void __rcu_read_unlock(void) 251 { 252 struct task_struct *t = current; 253 254 if (t->rcu_read_lock_nesting != 1) { 255 --t->rcu_read_lock_nesting; 256 } else { 257 barrier(); /* critical section before exit code. */ 258 t->rcu_read_lock_nesting = INT_MIN; 259 barrier(); /* assign before ->rcu_read_unlock_special load */ 260 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) 261 rcu_read_unlock_special(t); 262 barrier(); /* ->rcu_read_unlock_special load before assign */ 263 t->rcu_read_lock_nesting = 0; 264 } 265 #ifdef CONFIG_PROVE_LOCKING 266 { 267 int rrln = READ_ONCE(t->rcu_read_lock_nesting); 268 269 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 270 } 271 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 272 } 273 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 274 275 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 276 277 #ifdef CONFIG_DEBUG_LOCK_ALLOC 278 static struct lock_class_key rcu_lock_key; 279 struct lockdep_map rcu_lock_map = 280 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 281 EXPORT_SYMBOL_GPL(rcu_lock_map); 282 283 static struct lock_class_key rcu_bh_lock_key; 284 struct lockdep_map rcu_bh_lock_map = 285 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 286 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 287 288 static struct lock_class_key rcu_sched_lock_key; 289 struct lockdep_map rcu_sched_lock_map = 290 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 291 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 292 293 static struct lock_class_key rcu_callback_key; 294 struct lockdep_map rcu_callback_map = 295 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); 296 EXPORT_SYMBOL_GPL(rcu_callback_map); 297 298 int notrace debug_lockdep_rcu_enabled(void) 299 { 300 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && 301 current->lockdep_recursion == 0; 302 } 303 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 304 305 /** 306 * rcu_read_lock_held() - might we be in RCU read-side critical section? 307 * 308 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 309 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 310 * this assumes we are in an RCU read-side critical section unless it can 311 * prove otherwise. This is useful for debug checks in functions that 312 * require that they be called within an RCU read-side critical section. 313 * 314 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 315 * and while lockdep is disabled. 316 * 317 * Note that rcu_read_lock() and the matching rcu_read_unlock() must 318 * occur in the same context, for example, it is illegal to invoke 319 * rcu_read_unlock() in process context if the matching rcu_read_lock() 320 * was invoked from within an irq handler. 321 * 322 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 323 * offline from an RCU perspective, so check for those as well. 324 */ 325 int rcu_read_lock_held(void) 326 { 327 if (!debug_lockdep_rcu_enabled()) 328 return 1; 329 if (!rcu_is_watching()) 330 return 0; 331 if (!rcu_lockdep_current_cpu_online()) 332 return 0; 333 return lock_is_held(&rcu_lock_map); 334 } 335 EXPORT_SYMBOL_GPL(rcu_read_lock_held); 336 337 /** 338 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 339 * 340 * Check for bottom half being disabled, which covers both the 341 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 342 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 343 * will show the situation. This is useful for debug checks in functions 344 * that require that they be called within an RCU read-side critical 345 * section. 346 * 347 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 348 * 349 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 350 * offline from an RCU perspective, so check for those as well. 351 */ 352 int rcu_read_lock_bh_held(void) 353 { 354 if (!debug_lockdep_rcu_enabled()) 355 return 1; 356 if (!rcu_is_watching()) 357 return 0; 358 if (!rcu_lockdep_current_cpu_online()) 359 return 0; 360 return in_softirq() || irqs_disabled(); 361 } 362 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 363 364 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 365 366 /** 367 * wakeme_after_rcu() - Callback function to awaken a task after grace period 368 * @head: Pointer to rcu_head member within rcu_synchronize structure 369 * 370 * Awaken the corresponding task now that a grace period has elapsed. 371 */ 372 void wakeme_after_rcu(struct rcu_head *head) 373 { 374 struct rcu_synchronize *rcu; 375 376 rcu = container_of(head, struct rcu_synchronize, head); 377 complete(&rcu->completion); 378 } 379 EXPORT_SYMBOL_GPL(wakeme_after_rcu); 380 381 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 382 struct rcu_synchronize *rs_array) 383 { 384 int i; 385 int j; 386 387 /* Initialize and register callbacks for each flavor specified. */ 388 for (i = 0; i < n; i++) { 389 if (checktiny && 390 (crcu_array[i] == call_rcu || 391 crcu_array[i] == call_rcu_bh)) { 392 might_sleep(); 393 continue; 394 } 395 init_rcu_head_on_stack(&rs_array[i].head); 396 init_completion(&rs_array[i].completion); 397 for (j = 0; j < i; j++) 398 if (crcu_array[j] == crcu_array[i]) 399 break; 400 if (j == i) 401 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); 402 } 403 404 /* Wait for all callbacks to be invoked. */ 405 for (i = 0; i < n; i++) { 406 if (checktiny && 407 (crcu_array[i] == call_rcu || 408 crcu_array[i] == call_rcu_bh)) 409 continue; 410 for (j = 0; j < i; j++) 411 if (crcu_array[j] == crcu_array[i]) 412 break; 413 if (j == i) 414 wait_for_completion(&rs_array[i].completion); 415 destroy_rcu_head_on_stack(&rs_array[i].head); 416 } 417 } 418 EXPORT_SYMBOL_GPL(__wait_rcu_gp); 419 420 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 421 void init_rcu_head(struct rcu_head *head) 422 { 423 debug_object_init(head, &rcuhead_debug_descr); 424 } 425 EXPORT_SYMBOL_GPL(init_rcu_head); 426 427 void destroy_rcu_head(struct rcu_head *head) 428 { 429 debug_object_free(head, &rcuhead_debug_descr); 430 } 431 EXPORT_SYMBOL_GPL(destroy_rcu_head); 432 433 static bool rcuhead_is_static_object(void *addr) 434 { 435 return true; 436 } 437 438 /** 439 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 440 * @head: pointer to rcu_head structure to be initialized 441 * 442 * This function informs debugobjects of a new rcu_head structure that 443 * has been allocated as an auto variable on the stack. This function 444 * is not required for rcu_head structures that are statically defined or 445 * that are dynamically allocated on the heap. This function has no 446 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 447 */ 448 void init_rcu_head_on_stack(struct rcu_head *head) 449 { 450 debug_object_init_on_stack(head, &rcuhead_debug_descr); 451 } 452 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 453 454 /** 455 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 456 * @head: pointer to rcu_head structure to be initialized 457 * 458 * This function informs debugobjects that an on-stack rcu_head structure 459 * is about to go out of scope. As with init_rcu_head_on_stack(), this 460 * function is not required for rcu_head structures that are statically 461 * defined or that are dynamically allocated on the heap. Also as with 462 * init_rcu_head_on_stack(), this function has no effect for 463 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 464 */ 465 void destroy_rcu_head_on_stack(struct rcu_head *head) 466 { 467 debug_object_free(head, &rcuhead_debug_descr); 468 } 469 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 470 471 struct debug_obj_descr rcuhead_debug_descr = { 472 .name = "rcu_head", 473 .is_static_object = rcuhead_is_static_object, 474 }; 475 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 476 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 477 478 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 479 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 480 unsigned long secs, 481 unsigned long c_old, unsigned long c) 482 { 483 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 484 } 485 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 486 #else 487 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 488 do { } while (0) 489 #endif 490 491 #ifdef CONFIG_RCU_STALL_COMMON 492 493 #ifdef CONFIG_PROVE_RCU 494 #define RCU_STALL_DELAY_DELTA (5 * HZ) 495 #else 496 #define RCU_STALL_DELAY_DELTA 0 497 #endif 498 499 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 500 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress); 501 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 502 503 module_param(rcu_cpu_stall_suppress, int, 0644); 504 module_param(rcu_cpu_stall_timeout, int, 0644); 505 506 int rcu_jiffies_till_stall_check(void) 507 { 508 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); 509 510 /* 511 * Limit check must be consistent with the Kconfig limits 512 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 513 */ 514 if (till_stall_check < 3) { 515 WRITE_ONCE(rcu_cpu_stall_timeout, 3); 516 till_stall_check = 3; 517 } else if (till_stall_check > 300) { 518 WRITE_ONCE(rcu_cpu_stall_timeout, 300); 519 till_stall_check = 300; 520 } 521 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 522 } 523 524 void rcu_sysrq_start(void) 525 { 526 if (!rcu_cpu_stall_suppress) 527 rcu_cpu_stall_suppress = 2; 528 } 529 530 void rcu_sysrq_end(void) 531 { 532 if (rcu_cpu_stall_suppress == 2) 533 rcu_cpu_stall_suppress = 0; 534 } 535 536 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 537 { 538 rcu_cpu_stall_suppress = 1; 539 return NOTIFY_DONE; 540 } 541 542 static struct notifier_block rcu_panic_block = { 543 .notifier_call = rcu_panic, 544 }; 545 546 static int __init check_cpu_stall_init(void) 547 { 548 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 549 return 0; 550 } 551 early_initcall(check_cpu_stall_init); 552 553 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 554 555 #ifdef CONFIG_TASKS_RCU 556 557 /* 558 * Simple variant of RCU whose quiescent states are voluntary context switch, 559 * user-space execution, and idle. As such, grace periods can take one good 560 * long time. There are no read-side primitives similar to rcu_read_lock() 561 * and rcu_read_unlock() because this implementation is intended to get 562 * the system into a safe state for some of the manipulations involved in 563 * tracing and the like. Finally, this implementation does not support 564 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 565 * per-CPU callback lists will be needed. 566 */ 567 568 /* Global list of callbacks and associated lock. */ 569 static struct rcu_head *rcu_tasks_cbs_head; 570 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 571 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); 572 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); 573 574 /* Track exiting tasks in order to allow them to be waited for. */ 575 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); 576 577 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 578 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) 579 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; 580 module_param(rcu_task_stall_timeout, int, 0644); 581 582 static struct task_struct *rcu_tasks_kthread_ptr; 583 584 /** 585 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 586 * @rhp: structure to be used for queueing the RCU updates. 587 * @func: actual callback function to be invoked after the grace period 588 * 589 * The callback function will be invoked some time after a full grace 590 * period elapses, in other words after all currently executing RCU 591 * read-side critical sections have completed. call_rcu_tasks() assumes 592 * that the read-side critical sections end at a voluntary context 593 * switch (not a preemption!), entry into idle, or transition to usermode 594 * execution. As such, there are no read-side primitives analogous to 595 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended 596 * to determine that all tasks have passed through a safe state, not so 597 * much for data-strcuture synchronization. 598 * 599 * See the description of call_rcu() for more detailed information on 600 * memory ordering guarantees. 601 */ 602 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 603 { 604 unsigned long flags; 605 bool needwake; 606 607 rhp->next = NULL; 608 rhp->func = func; 609 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 610 needwake = !rcu_tasks_cbs_head; 611 *rcu_tasks_cbs_tail = rhp; 612 rcu_tasks_cbs_tail = &rhp->next; 613 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 614 /* We can't create the thread unless interrupts are enabled. */ 615 if (needwake && READ_ONCE(rcu_tasks_kthread_ptr)) 616 wake_up(&rcu_tasks_cbs_wq); 617 } 618 EXPORT_SYMBOL_GPL(call_rcu_tasks); 619 620 /** 621 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 622 * 623 * Control will return to the caller some time after a full rcu-tasks 624 * grace period has elapsed, in other words after all currently 625 * executing rcu-tasks read-side critical sections have elapsed. These 626 * read-side critical sections are delimited by calls to schedule(), 627 * cond_resched_rcu_qs(), idle execution, userspace execution, calls 628 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 629 * 630 * This is a very specialized primitive, intended only for a few uses in 631 * tracing and other situations requiring manipulation of function 632 * preambles and profiling hooks. The synchronize_rcu_tasks() function 633 * is not (yet) intended for heavy use from multiple CPUs. 634 * 635 * Note that this guarantee implies further memory-ordering guarantees. 636 * On systems with more than one CPU, when synchronize_rcu_tasks() returns, 637 * each CPU is guaranteed to have executed a full memory barrier since the 638 * end of its last RCU-tasks read-side critical section whose beginning 639 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU 640 * having an RCU-tasks read-side critical section that extends beyond 641 * the return from synchronize_rcu_tasks() is guaranteed to have executed 642 * a full memory barrier after the beginning of synchronize_rcu_tasks() 643 * and before the beginning of that RCU-tasks read-side critical section. 644 * Note that these guarantees include CPUs that are offline, idle, or 645 * executing in user mode, as well as CPUs that are executing in the kernel. 646 * 647 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned 648 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 649 * to have executed a full memory barrier during the execution of 650 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU 651 * (but again only if the system has more than one CPU). 652 */ 653 void synchronize_rcu_tasks(void) 654 { 655 /* Complain if the scheduler has not started. */ 656 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 657 "synchronize_rcu_tasks called too soon"); 658 659 /* Wait for the grace period. */ 660 wait_rcu_gp(call_rcu_tasks); 661 } 662 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 663 664 /** 665 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 666 * 667 * Although the current implementation is guaranteed to wait, it is not 668 * obligated to, for example, if there are no pending callbacks. 669 */ 670 void rcu_barrier_tasks(void) 671 { 672 /* There is only one callback queue, so this is easy. ;-) */ 673 synchronize_rcu_tasks(); 674 } 675 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 676 677 /* See if tasks are still holding out, complain if so. */ 678 static void check_holdout_task(struct task_struct *t, 679 bool needreport, bool *firstreport) 680 { 681 int cpu; 682 683 if (!READ_ONCE(t->rcu_tasks_holdout) || 684 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 685 !READ_ONCE(t->on_rq) || 686 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 687 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 688 WRITE_ONCE(t->rcu_tasks_holdout, false); 689 list_del_init(&t->rcu_tasks_holdout_list); 690 put_task_struct(t); 691 return; 692 } 693 rcu_request_urgent_qs_task(t); 694 if (!needreport) 695 return; 696 if (*firstreport) { 697 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 698 *firstreport = false; 699 } 700 cpu = task_cpu(t); 701 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 702 t, ".I"[is_idle_task(t)], 703 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 704 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 705 t->rcu_tasks_idle_cpu, cpu); 706 sched_show_task(t); 707 } 708 709 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 710 static int __noreturn rcu_tasks_kthread(void *arg) 711 { 712 unsigned long flags; 713 struct task_struct *g, *t; 714 unsigned long lastreport; 715 struct rcu_head *list; 716 struct rcu_head *next; 717 LIST_HEAD(rcu_tasks_holdouts); 718 719 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 720 housekeeping_affine(current, HK_FLAG_RCU); 721 722 /* 723 * Each pass through the following loop makes one check for 724 * newly arrived callbacks, and, if there are some, waits for 725 * one RCU-tasks grace period and then invokes the callbacks. 726 * This loop is terminated by the system going down. ;-) 727 */ 728 for (;;) { 729 730 /* Pick up any new callbacks. */ 731 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 732 list = rcu_tasks_cbs_head; 733 rcu_tasks_cbs_head = NULL; 734 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 735 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 736 737 /* If there were none, wait a bit and start over. */ 738 if (!list) { 739 wait_event_interruptible(rcu_tasks_cbs_wq, 740 rcu_tasks_cbs_head); 741 if (!rcu_tasks_cbs_head) { 742 WARN_ON(signal_pending(current)); 743 schedule_timeout_interruptible(HZ/10); 744 } 745 continue; 746 } 747 748 /* 749 * Wait for all pre-existing t->on_rq and t->nvcsw 750 * transitions to complete. Invoking synchronize_sched() 751 * suffices because all these transitions occur with 752 * interrupts disabled. Without this synchronize_sched(), 753 * a read-side critical section that started before the 754 * grace period might be incorrectly seen as having started 755 * after the grace period. 756 * 757 * This synchronize_sched() also dispenses with the 758 * need for a memory barrier on the first store to 759 * ->rcu_tasks_holdout, as it forces the store to happen 760 * after the beginning of the grace period. 761 */ 762 synchronize_sched(); 763 764 /* 765 * There were callbacks, so we need to wait for an 766 * RCU-tasks grace period. Start off by scanning 767 * the task list for tasks that are not already 768 * voluntarily blocked. Mark these tasks and make 769 * a list of them in rcu_tasks_holdouts. 770 */ 771 rcu_read_lock(); 772 for_each_process_thread(g, t) { 773 if (t != current && READ_ONCE(t->on_rq) && 774 !is_idle_task(t)) { 775 get_task_struct(t); 776 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 777 WRITE_ONCE(t->rcu_tasks_holdout, true); 778 list_add(&t->rcu_tasks_holdout_list, 779 &rcu_tasks_holdouts); 780 } 781 } 782 rcu_read_unlock(); 783 784 /* 785 * Wait for tasks that are in the process of exiting. 786 * This does only part of the job, ensuring that all 787 * tasks that were previously exiting reach the point 788 * where they have disabled preemption, allowing the 789 * later synchronize_sched() to finish the job. 790 */ 791 synchronize_srcu(&tasks_rcu_exit_srcu); 792 793 /* 794 * Each pass through the following loop scans the list 795 * of holdout tasks, removing any that are no longer 796 * holdouts. When the list is empty, we are done. 797 */ 798 lastreport = jiffies; 799 while (!list_empty(&rcu_tasks_holdouts)) { 800 bool firstreport; 801 bool needreport; 802 int rtst; 803 struct task_struct *t1; 804 805 schedule_timeout_interruptible(HZ); 806 rtst = READ_ONCE(rcu_task_stall_timeout); 807 needreport = rtst > 0 && 808 time_after(jiffies, lastreport + rtst); 809 if (needreport) 810 lastreport = jiffies; 811 firstreport = true; 812 WARN_ON(signal_pending(current)); 813 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, 814 rcu_tasks_holdout_list) { 815 check_holdout_task(t, needreport, &firstreport); 816 cond_resched(); 817 } 818 } 819 820 /* 821 * Because ->on_rq and ->nvcsw are not guaranteed 822 * to have a full memory barriers prior to them in the 823 * schedule() path, memory reordering on other CPUs could 824 * cause their RCU-tasks read-side critical sections to 825 * extend past the end of the grace period. However, 826 * because these ->nvcsw updates are carried out with 827 * interrupts disabled, we can use synchronize_sched() 828 * to force the needed ordering on all such CPUs. 829 * 830 * This synchronize_sched() also confines all 831 * ->rcu_tasks_holdout accesses to be within the grace 832 * period, avoiding the need for memory barriers for 833 * ->rcu_tasks_holdout accesses. 834 * 835 * In addition, this synchronize_sched() waits for exiting 836 * tasks to complete their final preempt_disable() region 837 * of execution, cleaning up after the synchronize_srcu() 838 * above. 839 */ 840 synchronize_sched(); 841 842 /* Invoke the callbacks. */ 843 while (list) { 844 next = list->next; 845 local_bh_disable(); 846 list->func(list); 847 local_bh_enable(); 848 list = next; 849 cond_resched(); 850 } 851 schedule_timeout_uninterruptible(HZ/10); 852 } 853 } 854 855 /* Spawn rcu_tasks_kthread() at core_initcall() time. */ 856 static int __init rcu_spawn_tasks_kthread(void) 857 { 858 struct task_struct *t; 859 860 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); 861 BUG_ON(IS_ERR(t)); 862 smp_mb(); /* Ensure others see full kthread. */ 863 WRITE_ONCE(rcu_tasks_kthread_ptr, t); 864 return 0; 865 } 866 core_initcall(rcu_spawn_tasks_kthread); 867 868 /* Do the srcu_read_lock() for the above synchronize_srcu(). */ 869 void exit_tasks_rcu_start(void) 870 { 871 preempt_disable(); 872 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); 873 preempt_enable(); 874 } 875 876 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ 877 void exit_tasks_rcu_finish(void) 878 { 879 preempt_disable(); 880 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx); 881 preempt_enable(); 882 } 883 884 #endif /* #ifdef CONFIG_TASKS_RCU */ 885 886 #ifndef CONFIG_TINY_RCU 887 888 /* 889 * Print any non-default Tasks RCU settings. 890 */ 891 static void __init rcu_tasks_bootup_oddness(void) 892 { 893 #ifdef CONFIG_TASKS_RCU 894 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) 895 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); 896 else 897 pr_info("\tTasks RCU enabled.\n"); 898 #endif /* #ifdef CONFIG_TASKS_RCU */ 899 } 900 901 #endif /* #ifndef CONFIG_TINY_RCU */ 902 903 #ifdef CONFIG_PROVE_RCU 904 905 /* 906 * Early boot self test parameters, one for each flavor 907 */ 908 static bool rcu_self_test; 909 static bool rcu_self_test_bh; 910 static bool rcu_self_test_sched; 911 912 module_param(rcu_self_test, bool, 0444); 913 module_param(rcu_self_test_bh, bool, 0444); 914 module_param(rcu_self_test_sched, bool, 0444); 915 916 static int rcu_self_test_counter; 917 918 static void test_callback(struct rcu_head *r) 919 { 920 rcu_self_test_counter++; 921 pr_info("RCU test callback executed %d\n", rcu_self_test_counter); 922 } 923 924 static void early_boot_test_call_rcu(void) 925 { 926 static struct rcu_head head; 927 928 call_rcu(&head, test_callback); 929 } 930 931 static void early_boot_test_call_rcu_bh(void) 932 { 933 static struct rcu_head head; 934 935 call_rcu_bh(&head, test_callback); 936 } 937 938 static void early_boot_test_call_rcu_sched(void) 939 { 940 static struct rcu_head head; 941 942 call_rcu_sched(&head, test_callback); 943 } 944 945 void rcu_early_boot_tests(void) 946 { 947 pr_info("Running RCU self tests\n"); 948 949 if (rcu_self_test) 950 early_boot_test_call_rcu(); 951 if (rcu_self_test_bh) 952 early_boot_test_call_rcu_bh(); 953 if (rcu_self_test_sched) 954 early_boot_test_call_rcu_sched(); 955 rcu_test_sync_prims(); 956 } 957 958 static int rcu_verify_early_boot_tests(void) 959 { 960 int ret = 0; 961 int early_boot_test_counter = 0; 962 963 if (rcu_self_test) { 964 early_boot_test_counter++; 965 rcu_barrier(); 966 } 967 if (rcu_self_test_bh) { 968 early_boot_test_counter++; 969 rcu_barrier_bh(); 970 } 971 if (rcu_self_test_sched) { 972 early_boot_test_counter++; 973 rcu_barrier_sched(); 974 } 975 976 if (rcu_self_test_counter != early_boot_test_counter) { 977 WARN_ON(1); 978 ret = -1; 979 } 980 981 return ret; 982 } 983 late_initcall(rcu_verify_early_boot_tests); 984 #else 985 void rcu_early_boot_tests(void) {} 986 #endif /* CONFIG_PROVE_RCU */ 987 988 #ifndef CONFIG_TINY_RCU 989 990 /* 991 * Print any significant non-default boot-time settings. 992 */ 993 void __init rcupdate_announce_bootup_oddness(void) 994 { 995 if (rcu_normal) 996 pr_info("\tNo expedited grace period (rcu_normal).\n"); 997 else if (rcu_normal_after_boot) 998 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n"); 999 else if (rcu_expedited) 1000 pr_info("\tAll grace periods are expedited (rcu_expedited).\n"); 1001 if (rcu_cpu_stall_suppress) 1002 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n"); 1003 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT) 1004 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout); 1005 rcu_tasks_bootup_oddness(); 1006 } 1007 1008 #endif /* #ifndef CONFIG_TINY_RCU */ 1009