1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion 4 * 5 * Copyright IBM Corporation, 2001 6 * 7 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 8 * Manfred Spraul <manfred@colorfullife.com> 9 * 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 12 * Papers: 13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 14 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 15 * 16 * For detailed explanation of Read-Copy Update mechanism see - 17 * http://lse.sourceforge.net/locking/rcupdate.html 18 * 19 */ 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/interrupt.h> 26 #include <linux/sched/signal.h> 27 #include <linux/sched/debug.h> 28 #include <linux/atomic.h> 29 #include <linux/bitops.h> 30 #include <linux/percpu.h> 31 #include <linux/notifier.h> 32 #include <linux/cpu.h> 33 #include <linux/mutex.h> 34 #include <linux/export.h> 35 #include <linux/hardirq.h> 36 #include <linux/delay.h> 37 #include <linux/moduleparam.h> 38 #include <linux/kthread.h> 39 #include <linux/tick.h> 40 #include <linux/rcupdate_wait.h> 41 #include <linux/sched/isolation.h> 42 #include <linux/kprobes.h> 43 44 #define CREATE_TRACE_POINTS 45 46 #include "rcu.h" 47 48 #ifdef MODULE_PARAM_PREFIX 49 #undef MODULE_PARAM_PREFIX 50 #endif 51 #define MODULE_PARAM_PREFIX "rcupdate." 52 53 #ifndef CONFIG_TINY_RCU 54 extern int rcu_expedited; /* from sysctl */ 55 module_param(rcu_expedited, int, 0); 56 extern int rcu_normal; /* from sysctl */ 57 module_param(rcu_normal, int, 0); 58 static int rcu_normal_after_boot; 59 module_param(rcu_normal_after_boot, int, 0); 60 #endif /* #ifndef CONFIG_TINY_RCU */ 61 62 #ifdef CONFIG_DEBUG_LOCK_ALLOC 63 /** 64 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? 65 * 66 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 67 * RCU-sched read-side critical section. In absence of 68 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 69 * critical section unless it can prove otherwise. Note that disabling 70 * of preemption (including disabling irqs) counts as an RCU-sched 71 * read-side critical section. This is useful for debug checks in functions 72 * that required that they be called within an RCU-sched read-side 73 * critical section. 74 * 75 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 76 * and while lockdep is disabled. 77 * 78 * Note that if the CPU is in the idle loop from an RCU point of 79 * view (ie: that we are in the section between rcu_idle_enter() and 80 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU 81 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs 82 * that are in such a section, considering these as in extended quiescent 83 * state, so such a CPU is effectively never in an RCU read-side critical 84 * section regardless of what RCU primitives it invokes. This state of 85 * affairs is required --- we need to keep an RCU-free window in idle 86 * where the CPU may possibly enter into low power mode. This way we can 87 * notice an extended quiescent state to other CPUs that started a grace 88 * period. Otherwise we would delay any grace period as long as we run in 89 * the idle task. 90 * 91 * Similarly, we avoid claiming an SRCU read lock held if the current 92 * CPU is offline. 93 */ 94 int rcu_read_lock_sched_held(void) 95 { 96 int lockdep_opinion = 0; 97 98 if (!debug_lockdep_rcu_enabled()) 99 return 1; 100 if (!rcu_is_watching()) 101 return 0; 102 if (!rcu_lockdep_current_cpu_online()) 103 return 0; 104 if (debug_locks) 105 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 106 return lockdep_opinion || !preemptible(); 107 } 108 EXPORT_SYMBOL(rcu_read_lock_sched_held); 109 #endif 110 111 #ifndef CONFIG_TINY_RCU 112 113 /* 114 * Should expedited grace-period primitives always fall back to their 115 * non-expedited counterparts? Intended for use within RCU. Note 116 * that if the user specifies both rcu_expedited and rcu_normal, then 117 * rcu_normal wins. (Except during the time period during boot from 118 * when the first task is spawned until the rcu_set_runtime_mode() 119 * core_initcall() is invoked, at which point everything is expedited.) 120 */ 121 bool rcu_gp_is_normal(void) 122 { 123 return READ_ONCE(rcu_normal) && 124 rcu_scheduler_active != RCU_SCHEDULER_INIT; 125 } 126 EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 127 128 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); 129 130 /* 131 * Should normal grace-period primitives be expedited? Intended for 132 * use within RCU. Note that this function takes the rcu_expedited 133 * sysfs/boot variable and rcu_scheduler_active into account as well 134 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() 135 * until rcu_gp_is_expedited() returns false is a -really- bad idea. 136 */ 137 bool rcu_gp_is_expedited(void) 138 { 139 return rcu_expedited || atomic_read(&rcu_expedited_nesting) || 140 rcu_scheduler_active == RCU_SCHEDULER_INIT; 141 } 142 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 143 144 /** 145 * rcu_expedite_gp - Expedite future RCU grace periods 146 * 147 * After a call to this function, future calls to synchronize_rcu() and 148 * friends act as the corresponding synchronize_rcu_expedited() function 149 * had instead been called. 150 */ 151 void rcu_expedite_gp(void) 152 { 153 atomic_inc(&rcu_expedited_nesting); 154 } 155 EXPORT_SYMBOL_GPL(rcu_expedite_gp); 156 157 /** 158 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation 159 * 160 * Undo a prior call to rcu_expedite_gp(). If all prior calls to 161 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), 162 * and if the rcu_expedited sysfs/boot parameter is not set, then all 163 * subsequent calls to synchronize_rcu() and friends will return to 164 * their normal non-expedited behavior. 165 */ 166 void rcu_unexpedite_gp(void) 167 { 168 atomic_dec(&rcu_expedited_nesting); 169 } 170 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); 171 172 /* 173 * Inform RCU of the end of the in-kernel boot sequence. 174 */ 175 void rcu_end_inkernel_boot(void) 176 { 177 rcu_unexpedite_gp(); 178 if (rcu_normal_after_boot) 179 WRITE_ONCE(rcu_normal, 1); 180 } 181 182 #endif /* #ifndef CONFIG_TINY_RCU */ 183 184 /* 185 * Test each non-SRCU synchronous grace-period wait API. This is 186 * useful just after a change in mode for these primitives, and 187 * during early boot. 188 */ 189 void rcu_test_sync_prims(void) 190 { 191 if (!IS_ENABLED(CONFIG_PROVE_RCU)) 192 return; 193 synchronize_rcu(); 194 synchronize_rcu_expedited(); 195 } 196 197 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) 198 199 /* 200 * Switch to run-time mode once RCU has fully initialized. 201 */ 202 static int __init rcu_set_runtime_mode(void) 203 { 204 rcu_test_sync_prims(); 205 rcu_scheduler_active = RCU_SCHEDULER_RUNNING; 206 rcu_test_sync_prims(); 207 return 0; 208 } 209 core_initcall(rcu_set_runtime_mode); 210 211 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */ 212 213 #ifdef CONFIG_DEBUG_LOCK_ALLOC 214 static struct lock_class_key rcu_lock_key; 215 struct lockdep_map rcu_lock_map = 216 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 217 EXPORT_SYMBOL_GPL(rcu_lock_map); 218 219 static struct lock_class_key rcu_bh_lock_key; 220 struct lockdep_map rcu_bh_lock_map = 221 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 222 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 223 224 static struct lock_class_key rcu_sched_lock_key; 225 struct lockdep_map rcu_sched_lock_map = 226 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 227 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 228 229 static struct lock_class_key rcu_callback_key; 230 struct lockdep_map rcu_callback_map = 231 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); 232 EXPORT_SYMBOL_GPL(rcu_callback_map); 233 234 int notrace debug_lockdep_rcu_enabled(void) 235 { 236 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && 237 current->lockdep_recursion == 0; 238 } 239 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 240 NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled); 241 242 /** 243 * rcu_read_lock_held() - might we be in RCU read-side critical section? 244 * 245 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU 246 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 247 * this assumes we are in an RCU read-side critical section unless it can 248 * prove otherwise. This is useful for debug checks in functions that 249 * require that they be called within an RCU read-side critical section. 250 * 251 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 252 * and while lockdep is disabled. 253 * 254 * Note that rcu_read_lock() and the matching rcu_read_unlock() must 255 * occur in the same context, for example, it is illegal to invoke 256 * rcu_read_unlock() in process context if the matching rcu_read_lock() 257 * was invoked from within an irq handler. 258 * 259 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 260 * offline from an RCU perspective, so check for those as well. 261 */ 262 int rcu_read_lock_held(void) 263 { 264 if (!debug_lockdep_rcu_enabled()) 265 return 1; 266 if (!rcu_is_watching()) 267 return 0; 268 if (!rcu_lockdep_current_cpu_online()) 269 return 0; 270 return lock_is_held(&rcu_lock_map); 271 } 272 EXPORT_SYMBOL_GPL(rcu_read_lock_held); 273 274 /** 275 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 276 * 277 * Check for bottom half being disabled, which covers both the 278 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 279 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 280 * will show the situation. This is useful for debug checks in functions 281 * that require that they be called within an RCU read-side critical 282 * section. 283 * 284 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 285 * 286 * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or 287 * offline from an RCU perspective, so check for those as well. 288 */ 289 int rcu_read_lock_bh_held(void) 290 { 291 if (!debug_lockdep_rcu_enabled()) 292 return 1; 293 if (!rcu_is_watching()) 294 return 0; 295 if (!rcu_lockdep_current_cpu_online()) 296 return 0; 297 return in_softirq() || irqs_disabled(); 298 } 299 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 300 301 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 302 303 /** 304 * wakeme_after_rcu() - Callback function to awaken a task after grace period 305 * @head: Pointer to rcu_head member within rcu_synchronize structure 306 * 307 * Awaken the corresponding task now that a grace period has elapsed. 308 */ 309 void wakeme_after_rcu(struct rcu_head *head) 310 { 311 struct rcu_synchronize *rcu; 312 313 rcu = container_of(head, struct rcu_synchronize, head); 314 complete(&rcu->completion); 315 } 316 EXPORT_SYMBOL_GPL(wakeme_after_rcu); 317 318 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, 319 struct rcu_synchronize *rs_array) 320 { 321 int i; 322 int j; 323 324 /* Initialize and register callbacks for each crcu_array element. */ 325 for (i = 0; i < n; i++) { 326 if (checktiny && 327 (crcu_array[i] == call_rcu)) { 328 might_sleep(); 329 continue; 330 } 331 init_rcu_head_on_stack(&rs_array[i].head); 332 init_completion(&rs_array[i].completion); 333 for (j = 0; j < i; j++) 334 if (crcu_array[j] == crcu_array[i]) 335 break; 336 if (j == i) 337 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); 338 } 339 340 /* Wait for all callbacks to be invoked. */ 341 for (i = 0; i < n; i++) { 342 if (checktiny && 343 (crcu_array[i] == call_rcu)) 344 continue; 345 for (j = 0; j < i; j++) 346 if (crcu_array[j] == crcu_array[i]) 347 break; 348 if (j == i) 349 wait_for_completion(&rs_array[i].completion); 350 destroy_rcu_head_on_stack(&rs_array[i].head); 351 } 352 } 353 EXPORT_SYMBOL_GPL(__wait_rcu_gp); 354 355 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 356 void init_rcu_head(struct rcu_head *head) 357 { 358 debug_object_init(head, &rcuhead_debug_descr); 359 } 360 EXPORT_SYMBOL_GPL(init_rcu_head); 361 362 void destroy_rcu_head(struct rcu_head *head) 363 { 364 debug_object_free(head, &rcuhead_debug_descr); 365 } 366 EXPORT_SYMBOL_GPL(destroy_rcu_head); 367 368 static bool rcuhead_is_static_object(void *addr) 369 { 370 return true; 371 } 372 373 /** 374 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 375 * @head: pointer to rcu_head structure to be initialized 376 * 377 * This function informs debugobjects of a new rcu_head structure that 378 * has been allocated as an auto variable on the stack. This function 379 * is not required for rcu_head structures that are statically defined or 380 * that are dynamically allocated on the heap. This function has no 381 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 382 */ 383 void init_rcu_head_on_stack(struct rcu_head *head) 384 { 385 debug_object_init_on_stack(head, &rcuhead_debug_descr); 386 } 387 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 388 389 /** 390 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 391 * @head: pointer to rcu_head structure to be initialized 392 * 393 * This function informs debugobjects that an on-stack rcu_head structure 394 * is about to go out of scope. As with init_rcu_head_on_stack(), this 395 * function is not required for rcu_head structures that are statically 396 * defined or that are dynamically allocated on the heap. Also as with 397 * init_rcu_head_on_stack(), this function has no effect for 398 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 399 */ 400 void destroy_rcu_head_on_stack(struct rcu_head *head) 401 { 402 debug_object_free(head, &rcuhead_debug_descr); 403 } 404 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 405 406 struct debug_obj_descr rcuhead_debug_descr = { 407 .name = "rcu_head", 408 .is_static_object = rcuhead_is_static_object, 409 }; 410 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 411 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 412 413 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 414 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 415 unsigned long secs, 416 unsigned long c_old, unsigned long c) 417 { 418 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 419 } 420 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 421 #else 422 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 423 do { } while (0) 424 #endif 425 426 #ifdef CONFIG_RCU_STALL_COMMON 427 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 428 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress); 429 module_param(rcu_cpu_stall_suppress, int, 0644); 430 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 431 module_param(rcu_cpu_stall_timeout, int, 0644); 432 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 433 434 #ifdef CONFIG_TASKS_RCU 435 436 /* 437 * Simple variant of RCU whose quiescent states are voluntary context 438 * switch, cond_resched_rcu_qs(), user-space execution, and idle. 439 * As such, grace periods can take one good long time. There are no 440 * read-side primitives similar to rcu_read_lock() and rcu_read_unlock() 441 * because this implementation is intended to get the system into a safe 442 * state for some of the manipulations involved in tracing and the like. 443 * Finally, this implementation does not support high call_rcu_tasks() 444 * rates from multiple CPUs. If this is required, per-CPU callback lists 445 * will be needed. 446 */ 447 448 /* Global list of callbacks and associated lock. */ 449 static struct rcu_head *rcu_tasks_cbs_head; 450 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 451 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); 452 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); 453 454 /* Track exiting tasks in order to allow them to be waited for. */ 455 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); 456 457 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 458 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) 459 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; 460 module_param(rcu_task_stall_timeout, int, 0644); 461 462 static struct task_struct *rcu_tasks_kthread_ptr; 463 464 /** 465 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 466 * @rhp: structure to be used for queueing the RCU updates. 467 * @func: actual callback function to be invoked after the grace period 468 * 469 * The callback function will be invoked some time after a full grace 470 * period elapses, in other words after all currently executing RCU 471 * read-side critical sections have completed. call_rcu_tasks() assumes 472 * that the read-side critical sections end at a voluntary context 473 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle, 474 * or transition to usermode execution. As such, there are no read-side 475 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 476 * this primitive is intended to determine that all tasks have passed 477 * through a safe state, not so much for data-strcuture synchronization. 478 * 479 * See the description of call_rcu() for more detailed information on 480 * memory ordering guarantees. 481 */ 482 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 483 { 484 unsigned long flags; 485 bool needwake; 486 487 rhp->next = NULL; 488 rhp->func = func; 489 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 490 needwake = !rcu_tasks_cbs_head; 491 *rcu_tasks_cbs_tail = rhp; 492 rcu_tasks_cbs_tail = &rhp->next; 493 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 494 /* We can't create the thread unless interrupts are enabled. */ 495 if (needwake && READ_ONCE(rcu_tasks_kthread_ptr)) 496 wake_up(&rcu_tasks_cbs_wq); 497 } 498 EXPORT_SYMBOL_GPL(call_rcu_tasks); 499 500 /** 501 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 502 * 503 * Control will return to the caller some time after a full rcu-tasks 504 * grace period has elapsed, in other words after all currently 505 * executing rcu-tasks read-side critical sections have elapsed. These 506 * read-side critical sections are delimited by calls to schedule(), 507 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls 508 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 509 * 510 * This is a very specialized primitive, intended only for a few uses in 511 * tracing and other situations requiring manipulation of function 512 * preambles and profiling hooks. The synchronize_rcu_tasks() function 513 * is not (yet) intended for heavy use from multiple CPUs. 514 * 515 * Note that this guarantee implies further memory-ordering guarantees. 516 * On systems with more than one CPU, when synchronize_rcu_tasks() returns, 517 * each CPU is guaranteed to have executed a full memory barrier since the 518 * end of its last RCU-tasks read-side critical section whose beginning 519 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU 520 * having an RCU-tasks read-side critical section that extends beyond 521 * the return from synchronize_rcu_tasks() is guaranteed to have executed 522 * a full memory barrier after the beginning of synchronize_rcu_tasks() 523 * and before the beginning of that RCU-tasks read-side critical section. 524 * Note that these guarantees include CPUs that are offline, idle, or 525 * executing in user mode, as well as CPUs that are executing in the kernel. 526 * 527 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned 528 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 529 * to have executed a full memory barrier during the execution of 530 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU 531 * (but again only if the system has more than one CPU). 532 */ 533 void synchronize_rcu_tasks(void) 534 { 535 /* Complain if the scheduler has not started. */ 536 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 537 "synchronize_rcu_tasks called too soon"); 538 539 /* Wait for the grace period. */ 540 wait_rcu_gp(call_rcu_tasks); 541 } 542 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 543 544 /** 545 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 546 * 547 * Although the current implementation is guaranteed to wait, it is not 548 * obligated to, for example, if there are no pending callbacks. 549 */ 550 void rcu_barrier_tasks(void) 551 { 552 /* There is only one callback queue, so this is easy. ;-) */ 553 synchronize_rcu_tasks(); 554 } 555 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 556 557 /* See if tasks are still holding out, complain if so. */ 558 static void check_holdout_task(struct task_struct *t, 559 bool needreport, bool *firstreport) 560 { 561 int cpu; 562 563 if (!READ_ONCE(t->rcu_tasks_holdout) || 564 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 565 !READ_ONCE(t->on_rq) || 566 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 567 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 568 WRITE_ONCE(t->rcu_tasks_holdout, false); 569 list_del_init(&t->rcu_tasks_holdout_list); 570 put_task_struct(t); 571 return; 572 } 573 rcu_request_urgent_qs_task(t); 574 if (!needreport) 575 return; 576 if (*firstreport) { 577 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 578 *firstreport = false; 579 } 580 cpu = task_cpu(t); 581 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 582 t, ".I"[is_idle_task(t)], 583 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 584 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 585 t->rcu_tasks_idle_cpu, cpu); 586 sched_show_task(t); 587 } 588 589 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 590 static int __noreturn rcu_tasks_kthread(void *arg) 591 { 592 unsigned long flags; 593 struct task_struct *g, *t; 594 unsigned long lastreport; 595 struct rcu_head *list; 596 struct rcu_head *next; 597 LIST_HEAD(rcu_tasks_holdouts); 598 int fract; 599 600 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 601 housekeeping_affine(current, HK_FLAG_RCU); 602 603 /* 604 * Each pass through the following loop makes one check for 605 * newly arrived callbacks, and, if there are some, waits for 606 * one RCU-tasks grace period and then invokes the callbacks. 607 * This loop is terminated by the system going down. ;-) 608 */ 609 for (;;) { 610 611 /* Pick up any new callbacks. */ 612 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); 613 list = rcu_tasks_cbs_head; 614 rcu_tasks_cbs_head = NULL; 615 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; 616 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); 617 618 /* If there were none, wait a bit and start over. */ 619 if (!list) { 620 wait_event_interruptible(rcu_tasks_cbs_wq, 621 rcu_tasks_cbs_head); 622 if (!rcu_tasks_cbs_head) { 623 WARN_ON(signal_pending(current)); 624 schedule_timeout_interruptible(HZ/10); 625 } 626 continue; 627 } 628 629 /* 630 * Wait for all pre-existing t->on_rq and t->nvcsw 631 * transitions to complete. Invoking synchronize_rcu() 632 * suffices because all these transitions occur with 633 * interrupts disabled. Without this synchronize_rcu(), 634 * a read-side critical section that started before the 635 * grace period might be incorrectly seen as having started 636 * after the grace period. 637 * 638 * This synchronize_rcu() also dispenses with the 639 * need for a memory barrier on the first store to 640 * ->rcu_tasks_holdout, as it forces the store to happen 641 * after the beginning of the grace period. 642 */ 643 synchronize_rcu(); 644 645 /* 646 * There were callbacks, so we need to wait for an 647 * RCU-tasks grace period. Start off by scanning 648 * the task list for tasks that are not already 649 * voluntarily blocked. Mark these tasks and make 650 * a list of them in rcu_tasks_holdouts. 651 */ 652 rcu_read_lock(); 653 for_each_process_thread(g, t) { 654 if (t != current && READ_ONCE(t->on_rq) && 655 !is_idle_task(t)) { 656 get_task_struct(t); 657 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 658 WRITE_ONCE(t->rcu_tasks_holdout, true); 659 list_add(&t->rcu_tasks_holdout_list, 660 &rcu_tasks_holdouts); 661 } 662 } 663 rcu_read_unlock(); 664 665 /* 666 * Wait for tasks that are in the process of exiting. 667 * This does only part of the job, ensuring that all 668 * tasks that were previously exiting reach the point 669 * where they have disabled preemption, allowing the 670 * later synchronize_rcu() to finish the job. 671 */ 672 synchronize_srcu(&tasks_rcu_exit_srcu); 673 674 /* 675 * Each pass through the following loop scans the list 676 * of holdout tasks, removing any that are no longer 677 * holdouts. When the list is empty, we are done. 678 */ 679 lastreport = jiffies; 680 681 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/ 682 fract = 10; 683 684 for (;;) { 685 bool firstreport; 686 bool needreport; 687 int rtst; 688 struct task_struct *t1; 689 690 if (list_empty(&rcu_tasks_holdouts)) 691 break; 692 693 /* Slowly back off waiting for holdouts */ 694 schedule_timeout_interruptible(HZ/fract); 695 696 if (fract > 1) 697 fract--; 698 699 rtst = READ_ONCE(rcu_task_stall_timeout); 700 needreport = rtst > 0 && 701 time_after(jiffies, lastreport + rtst); 702 if (needreport) 703 lastreport = jiffies; 704 firstreport = true; 705 WARN_ON(signal_pending(current)); 706 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, 707 rcu_tasks_holdout_list) { 708 check_holdout_task(t, needreport, &firstreport); 709 cond_resched(); 710 } 711 } 712 713 /* 714 * Because ->on_rq and ->nvcsw are not guaranteed 715 * to have a full memory barriers prior to them in the 716 * schedule() path, memory reordering on other CPUs could 717 * cause their RCU-tasks read-side critical sections to 718 * extend past the end of the grace period. However, 719 * because these ->nvcsw updates are carried out with 720 * interrupts disabled, we can use synchronize_rcu() 721 * to force the needed ordering on all such CPUs. 722 * 723 * This synchronize_rcu() also confines all 724 * ->rcu_tasks_holdout accesses to be within the grace 725 * period, avoiding the need for memory barriers for 726 * ->rcu_tasks_holdout accesses. 727 * 728 * In addition, this synchronize_rcu() waits for exiting 729 * tasks to complete their final preempt_disable() region 730 * of execution, cleaning up after the synchronize_srcu() 731 * above. 732 */ 733 synchronize_rcu(); 734 735 /* Invoke the callbacks. */ 736 while (list) { 737 next = list->next; 738 local_bh_disable(); 739 list->func(list); 740 local_bh_enable(); 741 list = next; 742 cond_resched(); 743 } 744 /* Paranoid sleep to keep this from entering a tight loop */ 745 schedule_timeout_uninterruptible(HZ/10); 746 } 747 } 748 749 /* Spawn rcu_tasks_kthread() at core_initcall() time. */ 750 static int __init rcu_spawn_tasks_kthread(void) 751 { 752 struct task_struct *t; 753 754 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); 755 if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__)) 756 return 0; 757 smp_mb(); /* Ensure others see full kthread. */ 758 WRITE_ONCE(rcu_tasks_kthread_ptr, t); 759 return 0; 760 } 761 core_initcall(rcu_spawn_tasks_kthread); 762 763 /* Do the srcu_read_lock() for the above synchronize_srcu(). */ 764 void exit_tasks_rcu_start(void) 765 { 766 preempt_disable(); 767 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); 768 preempt_enable(); 769 } 770 771 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ 772 void exit_tasks_rcu_finish(void) 773 { 774 preempt_disable(); 775 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx); 776 preempt_enable(); 777 } 778 779 #endif /* #ifdef CONFIG_TASKS_RCU */ 780 781 #ifndef CONFIG_TINY_RCU 782 783 /* 784 * Print any non-default Tasks RCU settings. 785 */ 786 static void __init rcu_tasks_bootup_oddness(void) 787 { 788 #ifdef CONFIG_TASKS_RCU 789 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) 790 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); 791 else 792 pr_info("\tTasks RCU enabled.\n"); 793 #endif /* #ifdef CONFIG_TASKS_RCU */ 794 } 795 796 #endif /* #ifndef CONFIG_TINY_RCU */ 797 798 #ifdef CONFIG_PROVE_RCU 799 800 /* 801 * Early boot self test parameters. 802 */ 803 static bool rcu_self_test; 804 module_param(rcu_self_test, bool, 0444); 805 806 static int rcu_self_test_counter; 807 808 static void test_callback(struct rcu_head *r) 809 { 810 rcu_self_test_counter++; 811 pr_info("RCU test callback executed %d\n", rcu_self_test_counter); 812 } 813 814 DEFINE_STATIC_SRCU(early_srcu); 815 816 static void early_boot_test_call_rcu(void) 817 { 818 static struct rcu_head head; 819 static struct rcu_head shead; 820 821 call_rcu(&head, test_callback); 822 if (IS_ENABLED(CONFIG_SRCU)) 823 call_srcu(&early_srcu, &shead, test_callback); 824 } 825 826 void rcu_early_boot_tests(void) 827 { 828 pr_info("Running RCU self tests\n"); 829 830 if (rcu_self_test) 831 early_boot_test_call_rcu(); 832 rcu_test_sync_prims(); 833 } 834 835 static int rcu_verify_early_boot_tests(void) 836 { 837 int ret = 0; 838 int early_boot_test_counter = 0; 839 840 if (rcu_self_test) { 841 early_boot_test_counter++; 842 rcu_barrier(); 843 if (IS_ENABLED(CONFIG_SRCU)) { 844 early_boot_test_counter++; 845 srcu_barrier(&early_srcu); 846 } 847 } 848 if (rcu_self_test_counter != early_boot_test_counter) { 849 WARN_ON(1); 850 ret = -1; 851 } 852 853 return ret; 854 } 855 late_initcall(rcu_verify_early_boot_tests); 856 #else 857 void rcu_early_boot_tests(void) {} 858 #endif /* CONFIG_PROVE_RCU */ 859 860 #ifndef CONFIG_TINY_RCU 861 862 /* 863 * Print any significant non-default boot-time settings. 864 */ 865 void __init rcupdate_announce_bootup_oddness(void) 866 { 867 if (rcu_normal) 868 pr_info("\tNo expedited grace period (rcu_normal).\n"); 869 else if (rcu_normal_after_boot) 870 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n"); 871 else if (rcu_expedited) 872 pr_info("\tAll grace periods are expedited (rcu_expedited).\n"); 873 if (rcu_cpu_stall_suppress) 874 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n"); 875 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT) 876 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout); 877 rcu_tasks_bootup_oddness(); 878 } 879 880 #endif /* #ifndef CONFIG_TINY_RCU */ 881