1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * Papers: 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 28 * 29 * For detailed explanation of Read-Copy Update mechanism see - 30 * http://lse.sourceforge.net/locking/rcupdate.html 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp.h> 38 #include <linux/interrupt.h> 39 #include <linux/sched.h> 40 #include <linux/atomic.h> 41 #include <linux/bitops.h> 42 #include <linux/percpu.h> 43 #include <linux/notifier.h> 44 #include <linux/cpu.h> 45 #include <linux/mutex.h> 46 #include <linux/export.h> 47 #include <linux/hardirq.h> 48 #include <linux/delay.h> 49 #include <linux/module.h> 50 #include <linux/kthread.h> 51 52 #define CREATE_TRACE_POINTS 53 54 #include "rcu.h" 55 56 MODULE_ALIAS("rcupdate"); 57 #ifdef MODULE_PARAM_PREFIX 58 #undef MODULE_PARAM_PREFIX 59 #endif 60 #define MODULE_PARAM_PREFIX "rcupdate." 61 62 module_param(rcu_expedited, int, 0); 63 64 #ifdef CONFIG_PREEMPT_RCU 65 66 /* 67 * Preemptible RCU implementation for rcu_read_lock(). 68 * Just increment ->rcu_read_lock_nesting, shared state will be updated 69 * if we block. 70 */ 71 void __rcu_read_lock(void) 72 { 73 current->rcu_read_lock_nesting++; 74 barrier(); /* critical section after entry code. */ 75 } 76 EXPORT_SYMBOL_GPL(__rcu_read_lock); 77 78 /* 79 * Preemptible RCU implementation for rcu_read_unlock(). 80 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 81 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 82 * invoke rcu_read_unlock_special() to clean up after a context switch 83 * in an RCU read-side critical section and other special cases. 84 */ 85 void __rcu_read_unlock(void) 86 { 87 struct task_struct *t = current; 88 89 if (t->rcu_read_lock_nesting != 1) { 90 --t->rcu_read_lock_nesting; 91 } else { 92 barrier(); /* critical section before exit code. */ 93 t->rcu_read_lock_nesting = INT_MIN; 94 barrier(); /* assign before ->rcu_read_unlock_special load */ 95 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 96 rcu_read_unlock_special(t); 97 barrier(); /* ->rcu_read_unlock_special load before assign */ 98 t->rcu_read_lock_nesting = 0; 99 } 100 #ifdef CONFIG_PROVE_LOCKING 101 { 102 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); 103 104 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 105 } 106 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 107 } 108 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 109 110 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 111 112 #ifdef CONFIG_DEBUG_LOCK_ALLOC 113 static struct lock_class_key rcu_lock_key; 114 struct lockdep_map rcu_lock_map = 115 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 116 EXPORT_SYMBOL_GPL(rcu_lock_map); 117 118 static struct lock_class_key rcu_bh_lock_key; 119 struct lockdep_map rcu_bh_lock_map = 120 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 121 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 122 123 static struct lock_class_key rcu_sched_lock_key; 124 struct lockdep_map rcu_sched_lock_map = 125 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 126 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 127 128 static struct lock_class_key rcu_callback_key; 129 struct lockdep_map rcu_callback_map = 130 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); 131 EXPORT_SYMBOL_GPL(rcu_callback_map); 132 133 int notrace debug_lockdep_rcu_enabled(void) 134 { 135 return rcu_scheduler_active && debug_locks && 136 current->lockdep_recursion == 0; 137 } 138 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 139 140 /** 141 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 142 * 143 * Check for bottom half being disabled, which covers both the 144 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 145 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 146 * will show the situation. This is useful for debug checks in functions 147 * that require that they be called within an RCU read-side critical 148 * section. 149 * 150 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 151 * 152 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 153 * offline from an RCU perspective, so check for those as well. 154 */ 155 int rcu_read_lock_bh_held(void) 156 { 157 if (!debug_lockdep_rcu_enabled()) 158 return 1; 159 if (!rcu_is_watching()) 160 return 0; 161 if (!rcu_lockdep_current_cpu_online()) 162 return 0; 163 return in_softirq() || irqs_disabled(); 164 } 165 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 166 167 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 168 169 struct rcu_synchronize { 170 struct rcu_head head; 171 struct completion completion; 172 }; 173 174 /* 175 * Awaken the corresponding synchronize_rcu() instance now that a 176 * grace period has elapsed. 177 */ 178 static void wakeme_after_rcu(struct rcu_head *head) 179 { 180 struct rcu_synchronize *rcu; 181 182 rcu = container_of(head, struct rcu_synchronize, head); 183 complete(&rcu->completion); 184 } 185 186 void wait_rcu_gp(call_rcu_func_t crf) 187 { 188 struct rcu_synchronize rcu; 189 190 init_rcu_head_on_stack(&rcu.head); 191 init_completion(&rcu.completion); 192 /* Will wake me after RCU finished. */ 193 crf(&rcu.head, wakeme_after_rcu); 194 /* Wait for it. */ 195 wait_for_completion(&rcu.completion); 196 destroy_rcu_head_on_stack(&rcu.head); 197 } 198 EXPORT_SYMBOL_GPL(wait_rcu_gp); 199 200 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 201 void init_rcu_head(struct rcu_head *head) 202 { 203 debug_object_init(head, &rcuhead_debug_descr); 204 } 205 206 void destroy_rcu_head(struct rcu_head *head) 207 { 208 debug_object_free(head, &rcuhead_debug_descr); 209 } 210 211 /* 212 * fixup_activate is called when: 213 * - an active object is activated 214 * - an unknown object is activated (might be a statically initialized object) 215 * Activation is performed internally by call_rcu(). 216 */ 217 static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) 218 { 219 struct rcu_head *head = addr; 220 221 switch (state) { 222 223 case ODEBUG_STATE_NOTAVAILABLE: 224 /* 225 * This is not really a fixup. We just make sure that it is 226 * tracked in the object tracker. 227 */ 228 debug_object_init(head, &rcuhead_debug_descr); 229 debug_object_activate(head, &rcuhead_debug_descr); 230 return 0; 231 default: 232 return 1; 233 } 234 } 235 236 /** 237 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 238 * @head: pointer to rcu_head structure to be initialized 239 * 240 * This function informs debugobjects of a new rcu_head structure that 241 * has been allocated as an auto variable on the stack. This function 242 * is not required for rcu_head structures that are statically defined or 243 * that are dynamically allocated on the heap. This function has no 244 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 245 */ 246 void init_rcu_head_on_stack(struct rcu_head *head) 247 { 248 debug_object_init_on_stack(head, &rcuhead_debug_descr); 249 } 250 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 251 252 /** 253 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 254 * @head: pointer to rcu_head structure to be initialized 255 * 256 * This function informs debugobjects that an on-stack rcu_head structure 257 * is about to go out of scope. As with init_rcu_head_on_stack(), this 258 * function is not required for rcu_head structures that are statically 259 * defined or that are dynamically allocated on the heap. Also as with 260 * init_rcu_head_on_stack(), this function has no effect for 261 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 262 */ 263 void destroy_rcu_head_on_stack(struct rcu_head *head) 264 { 265 debug_object_free(head, &rcuhead_debug_descr); 266 } 267 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 268 269 struct debug_obj_descr rcuhead_debug_descr = { 270 .name = "rcu_head", 271 .fixup_activate = rcuhead_fixup_activate, 272 }; 273 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 274 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 275 276 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 277 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 278 unsigned long secs, 279 unsigned long c_old, unsigned long c) 280 { 281 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 282 } 283 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 284 #else 285 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 286 do { } while (0) 287 #endif 288 289 #ifdef CONFIG_RCU_STALL_COMMON 290 291 #ifdef CONFIG_PROVE_RCU 292 #define RCU_STALL_DELAY_DELTA (5 * HZ) 293 #else 294 #define RCU_STALL_DELAY_DELTA 0 295 #endif 296 297 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 298 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 299 300 module_param(rcu_cpu_stall_suppress, int, 0644); 301 module_param(rcu_cpu_stall_timeout, int, 0644); 302 303 int rcu_jiffies_till_stall_check(void) 304 { 305 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); 306 307 /* 308 * Limit check must be consistent with the Kconfig limits 309 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 310 */ 311 if (till_stall_check < 3) { 312 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; 313 till_stall_check = 3; 314 } else if (till_stall_check > 300) { 315 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; 316 till_stall_check = 300; 317 } 318 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 319 } 320 321 void rcu_sysrq_start(void) 322 { 323 if (!rcu_cpu_stall_suppress) 324 rcu_cpu_stall_suppress = 2; 325 } 326 327 void rcu_sysrq_end(void) 328 { 329 if (rcu_cpu_stall_suppress == 2) 330 rcu_cpu_stall_suppress = 0; 331 } 332 333 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 334 { 335 rcu_cpu_stall_suppress = 1; 336 return NOTIFY_DONE; 337 } 338 339 static struct notifier_block rcu_panic_block = { 340 .notifier_call = rcu_panic, 341 }; 342 343 static int __init check_cpu_stall_init(void) 344 { 345 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 346 return 0; 347 } 348 early_initcall(check_cpu_stall_init); 349 350 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 351 352 #ifdef CONFIG_TASKS_RCU 353 354 /* 355 * Simple variant of RCU whose quiescent states are voluntary context switch, 356 * user-space execution, and idle. As such, grace periods can take one good 357 * long time. There are no read-side primitives similar to rcu_read_lock() 358 * and rcu_read_unlock() because this implementation is intended to get 359 * the system into a safe state for some of the manipulations involved in 360 * tracing and the like. Finally, this implementation does not support 361 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 362 * per-CPU callback lists will be needed. 363 */ 364 365 /* Global list of callbacks and associated lock. */ 366 static struct rcu_head *rcu_tasks_cbs_head; 367 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 368 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); 369 370 /* Post an RCU-tasks callback. */ 371 void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) 372 { 373 unsigned long flags; 374 375 rhp->next = NULL; 376 rhp->func = func; 377 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 378 *rcu_tasks_cbs_tail = rhp; 379 rcu_tasks_cbs_tail = &rhp->next; 380 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 381 } 382 EXPORT_SYMBOL_GPL(call_rcu_tasks); 383 384 /** 385 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 386 * 387 * Control will return to the caller some time after a full rcu-tasks 388 * grace period has elapsed, in other words after all currently 389 * executing rcu-tasks read-side critical sections have elapsed. These 390 * read-side critical sections are delimited by calls to schedule(), 391 * cond_resched_rcu_qs(), idle execution, userspace execution, calls 392 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 393 * 394 * This is a very specialized primitive, intended only for a few uses in 395 * tracing and other situations requiring manipulation of function 396 * preambles and profiling hooks. The synchronize_rcu_tasks() function 397 * is not (yet) intended for heavy use from multiple CPUs. 398 * 399 * Note that this guarantee implies further memory-ordering guarantees. 400 * On systems with more than one CPU, when synchronize_rcu_tasks() returns, 401 * each CPU is guaranteed to have executed a full memory barrier since the 402 * end of its last RCU-tasks read-side critical section whose beginning 403 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU 404 * having an RCU-tasks read-side critical section that extends beyond 405 * the return from synchronize_rcu_tasks() is guaranteed to have executed 406 * a full memory barrier after the beginning of synchronize_rcu_tasks() 407 * and before the beginning of that RCU-tasks read-side critical section. 408 * Note that these guarantees include CPUs that are offline, idle, or 409 * executing in user mode, as well as CPUs that are executing in the kernel. 410 * 411 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned 412 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 413 * to have executed a full memory barrier during the execution of 414 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU 415 * (but again only if the system has more than one CPU). 416 */ 417 void synchronize_rcu_tasks(void) 418 { 419 /* Complain if the scheduler has not started. */ 420 rcu_lockdep_assert(!rcu_scheduler_active, 421 "synchronize_rcu_tasks called too soon"); 422 423 /* Wait for the grace period. */ 424 wait_rcu_gp(call_rcu_tasks); 425 } 426 427 /** 428 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 429 * 430 * Although the current implementation is guaranteed to wait, it is not 431 * obligated to, for example, if there are no pending callbacks. 432 */ 433 void rcu_barrier_tasks(void) 434 { 435 /* There is only one callback queue, so this is easy. ;-) */ 436 synchronize_rcu_tasks(); 437 } 438 439 /* See if the current task has stopped holding out, remove from list if so. */ 440 static void check_holdout_task(struct task_struct *t) 441 { 442 if (!ACCESS_ONCE(t->rcu_tasks_holdout) || 443 t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || 444 !ACCESS_ONCE(t->on_rq)) { 445 ACCESS_ONCE(t->rcu_tasks_holdout) = false; 446 list_del_rcu(&t->rcu_tasks_holdout_list); 447 put_task_struct(t); 448 } 449 } 450 451 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 452 static int __noreturn rcu_tasks_kthread(void *arg) 453 { 454 unsigned long flags; 455 struct task_struct *g, *t; 456 struct rcu_head *list; 457 struct rcu_head *next; 458 LIST_HEAD(rcu_tasks_holdouts); 459 460 /* FIXME: Add housekeeping affinity. */ 461 462 /* 463 * Each pass through the following loop makes one check for 464 * newly arrived callbacks, and, if there are some, waits for 465 * one RCU-tasks grace period and then invokes the callbacks. 466 * This loop is terminated by the system going down. ;-) 467 */ 468 for (;;) { 469 470 /* Pick up any new callbacks. */ 471 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 472 list = rcu_tasks_cbs_head; 473 rcu_tasks_cbs_head = NULL; 474 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 475 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 476 477 /* If there were none, wait a bit and start over. */ 478 if (!list) { 479 schedule_timeout_interruptible(HZ); 480 WARN_ON(signal_pending(current)); 481 continue; 482 } 483 484 /* 485 * Wait for all pre-existing t->on_rq and t->nvcsw 486 * transitions to complete. Invoking synchronize_sched() 487 * suffices because all these transitions occur with 488 * interrupts disabled. Without this synchronize_sched(), 489 * a read-side critical section that started before the 490 * grace period might be incorrectly seen as having started 491 * after the grace period. 492 * 493 * This synchronize_sched() also dispenses with the 494 * need for a memory barrier on the first store to 495 * ->rcu_tasks_holdout, as it forces the store to happen 496 * after the beginning of the grace period. 497 */ 498 synchronize_sched(); 499 500 /* 501 * There were callbacks, so we need to wait for an 502 * RCU-tasks grace period. Start off by scanning 503 * the task list for tasks that are not already 504 * voluntarily blocked. Mark these tasks and make 505 * a list of them in rcu_tasks_holdouts. 506 */ 507 rcu_read_lock(); 508 for_each_process_thread(g, t) { 509 if (t != current && ACCESS_ONCE(t->on_rq) && 510 !is_idle_task(t)) { 511 get_task_struct(t); 512 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw); 513 ACCESS_ONCE(t->rcu_tasks_holdout) = true; 514 list_add(&t->rcu_tasks_holdout_list, 515 &rcu_tasks_holdouts); 516 } 517 } 518 rcu_read_unlock(); 519 520 /* 521 * Each pass through the following loop scans the list 522 * of holdout tasks, removing any that are no longer 523 * holdouts. When the list is empty, we are done. 524 */ 525 while (!list_empty(&rcu_tasks_holdouts)) { 526 schedule_timeout_interruptible(HZ); 527 WARN_ON(signal_pending(current)); 528 rcu_read_lock(); 529 list_for_each_entry_rcu(t, &rcu_tasks_holdouts, 530 rcu_tasks_holdout_list) 531 check_holdout_task(t); 532 rcu_read_unlock(); 533 } 534 535 /* 536 * Because ->on_rq and ->nvcsw are not guaranteed 537 * to have a full memory barriers prior to them in the 538 * schedule() path, memory reordering on other CPUs could 539 * cause their RCU-tasks read-side critical sections to 540 * extend past the end of the grace period. However, 541 * because these ->nvcsw updates are carried out with 542 * interrupts disabled, we can use synchronize_sched() 543 * to force the needed ordering on all such CPUs. 544 * 545 * This synchronize_sched() also confines all 546 * ->rcu_tasks_holdout accesses to be within the grace 547 * period, avoiding the need for memory barriers for 548 * ->rcu_tasks_holdout accesses. 549 */ 550 synchronize_sched(); 551 552 /* Invoke the callbacks. */ 553 while (list) { 554 next = list->next; 555 local_bh_disable(); 556 list->func(list); 557 local_bh_enable(); 558 list = next; 559 cond_resched(); 560 } 561 } 562 } 563 564 /* Spawn rcu_tasks_kthread() at boot time. */ 565 static int __init rcu_spawn_tasks_kthread(void) 566 { 567 struct task_struct __maybe_unused *t; 568 569 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); 570 BUG_ON(IS_ERR(t)); 571 return 0; 572 } 573 early_initcall(rcu_spawn_tasks_kthread); 574 575 #endif /* #ifdef CONFIG_TASKS_RCU */ 576