xref: /openbmc/linux/kernel/rcu/update.c (revision 5b4cb650)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *
23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25  * Papers:
26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28  *
29  * For detailed explanation of Read-Copy Update mechanism see -
30  *		http://lse.sourceforge.net/locking/rcupdate.html
31  *
32  */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/debug.h>
41 #include <linux/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/percpu.h>
44 #include <linux/notifier.h>
45 #include <linux/cpu.h>
46 #include <linux/mutex.h>
47 #include <linux/export.h>
48 #include <linux/hardirq.h>
49 #include <linux/delay.h>
50 #include <linux/moduleparam.h>
51 #include <linux/kthread.h>
52 #include <linux/tick.h>
53 #include <linux/rcupdate_wait.h>
54 #include <linux/sched/isolation.h>
55 
56 #define CREATE_TRACE_POINTS
57 
58 #include "rcu.h"
59 
60 #ifdef MODULE_PARAM_PREFIX
61 #undef MODULE_PARAM_PREFIX
62 #endif
63 #define MODULE_PARAM_PREFIX "rcupdate."
64 
65 #ifndef CONFIG_TINY_RCU
66 extern int rcu_expedited; /* from sysctl */
67 module_param(rcu_expedited, int, 0);
68 extern int rcu_normal; /* from sysctl */
69 module_param(rcu_normal, int, 0);
70 static int rcu_normal_after_boot;
71 module_param(rcu_normal_after_boot, int, 0);
72 #endif /* #ifndef CONFIG_TINY_RCU */
73 
74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
75 /**
76  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
77  *
78  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
79  * RCU-sched read-side critical section.  In absence of
80  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
81  * critical section unless it can prove otherwise.  Note that disabling
82  * of preemption (including disabling irqs) counts as an RCU-sched
83  * read-side critical section.  This is useful for debug checks in functions
84  * that required that they be called within an RCU-sched read-side
85  * critical section.
86  *
87  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
88  * and while lockdep is disabled.
89  *
90  * Note that if the CPU is in the idle loop from an RCU point of
91  * view (ie: that we are in the section between rcu_idle_enter() and
92  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
93  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
94  * that are in such a section, considering these as in extended quiescent
95  * state, so such a CPU is effectively never in an RCU read-side critical
96  * section regardless of what RCU primitives it invokes.  This state of
97  * affairs is required --- we need to keep an RCU-free window in idle
98  * where the CPU may possibly enter into low power mode. This way we can
99  * notice an extended quiescent state to other CPUs that started a grace
100  * period. Otherwise we would delay any grace period as long as we run in
101  * the idle task.
102  *
103  * Similarly, we avoid claiming an SRCU read lock held if the current
104  * CPU is offline.
105  */
106 int rcu_read_lock_sched_held(void)
107 {
108 	int lockdep_opinion = 0;
109 
110 	if (!debug_lockdep_rcu_enabled())
111 		return 1;
112 	if (!rcu_is_watching())
113 		return 0;
114 	if (!rcu_lockdep_current_cpu_online())
115 		return 0;
116 	if (debug_locks)
117 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
118 	return lockdep_opinion || !preemptible();
119 }
120 EXPORT_SYMBOL(rcu_read_lock_sched_held);
121 #endif
122 
123 #ifndef CONFIG_TINY_RCU
124 
125 /*
126  * Should expedited grace-period primitives always fall back to their
127  * non-expedited counterparts?  Intended for use within RCU.  Note
128  * that if the user specifies both rcu_expedited and rcu_normal, then
129  * rcu_normal wins.  (Except during the time period during boot from
130  * when the first task is spawned until the rcu_set_runtime_mode()
131  * core_initcall() is invoked, at which point everything is expedited.)
132  */
133 bool rcu_gp_is_normal(void)
134 {
135 	return READ_ONCE(rcu_normal) &&
136 	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
137 }
138 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
139 
140 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
141 
142 /*
143  * Should normal grace-period primitives be expedited?  Intended for
144  * use within RCU.  Note that this function takes the rcu_expedited
145  * sysfs/boot variable and rcu_scheduler_active into account as well
146  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
147  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
148  */
149 bool rcu_gp_is_expedited(void)
150 {
151 	return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
152 	       rcu_scheduler_active == RCU_SCHEDULER_INIT;
153 }
154 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
155 
156 /**
157  * rcu_expedite_gp - Expedite future RCU grace periods
158  *
159  * After a call to this function, future calls to synchronize_rcu() and
160  * friends act as the corresponding synchronize_rcu_expedited() function
161  * had instead been called.
162  */
163 void rcu_expedite_gp(void)
164 {
165 	atomic_inc(&rcu_expedited_nesting);
166 }
167 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
168 
169 /**
170  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
171  *
172  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
173  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
174  * and if the rcu_expedited sysfs/boot parameter is not set, then all
175  * subsequent calls to synchronize_rcu() and friends will return to
176  * their normal non-expedited behavior.
177  */
178 void rcu_unexpedite_gp(void)
179 {
180 	atomic_dec(&rcu_expedited_nesting);
181 }
182 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
183 
184 /*
185  * Inform RCU of the end of the in-kernel boot sequence.
186  */
187 void rcu_end_inkernel_boot(void)
188 {
189 	rcu_unexpedite_gp();
190 	if (rcu_normal_after_boot)
191 		WRITE_ONCE(rcu_normal, 1);
192 }
193 
194 #endif /* #ifndef CONFIG_TINY_RCU */
195 
196 /*
197  * Test each non-SRCU synchronous grace-period wait API.  This is
198  * useful just after a change in mode for these primitives, and
199  * during early boot.
200  */
201 void rcu_test_sync_prims(void)
202 {
203 	if (!IS_ENABLED(CONFIG_PROVE_RCU))
204 		return;
205 	synchronize_rcu();
206 	synchronize_rcu_expedited();
207 }
208 
209 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
210 
211 /*
212  * Switch to run-time mode once RCU has fully initialized.
213  */
214 static int __init rcu_set_runtime_mode(void)
215 {
216 	rcu_test_sync_prims();
217 	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
218 	rcu_test_sync_prims();
219 	return 0;
220 }
221 core_initcall(rcu_set_runtime_mode);
222 
223 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
224 
225 #ifdef CONFIG_DEBUG_LOCK_ALLOC
226 static struct lock_class_key rcu_lock_key;
227 struct lockdep_map rcu_lock_map =
228 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
229 EXPORT_SYMBOL_GPL(rcu_lock_map);
230 
231 static struct lock_class_key rcu_bh_lock_key;
232 struct lockdep_map rcu_bh_lock_map =
233 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
234 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
235 
236 static struct lock_class_key rcu_sched_lock_key;
237 struct lockdep_map rcu_sched_lock_map =
238 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
239 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
240 
241 static struct lock_class_key rcu_callback_key;
242 struct lockdep_map rcu_callback_map =
243 	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
244 EXPORT_SYMBOL_GPL(rcu_callback_map);
245 
246 int notrace debug_lockdep_rcu_enabled(void)
247 {
248 	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
249 	       current->lockdep_recursion == 0;
250 }
251 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
252 
253 /**
254  * rcu_read_lock_held() - might we be in RCU read-side critical section?
255  *
256  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
257  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
258  * this assumes we are in an RCU read-side critical section unless it can
259  * prove otherwise.  This is useful for debug checks in functions that
260  * require that they be called within an RCU read-side critical section.
261  *
262  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
263  * and while lockdep is disabled.
264  *
265  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
266  * occur in the same context, for example, it is illegal to invoke
267  * rcu_read_unlock() in process context if the matching rcu_read_lock()
268  * was invoked from within an irq handler.
269  *
270  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
271  * offline from an RCU perspective, so check for those as well.
272  */
273 int rcu_read_lock_held(void)
274 {
275 	if (!debug_lockdep_rcu_enabled())
276 		return 1;
277 	if (!rcu_is_watching())
278 		return 0;
279 	if (!rcu_lockdep_current_cpu_online())
280 		return 0;
281 	return lock_is_held(&rcu_lock_map);
282 }
283 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
284 
285 /**
286  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
287  *
288  * Check for bottom half being disabled, which covers both the
289  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
290  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
291  * will show the situation.  This is useful for debug checks in functions
292  * that require that they be called within an RCU read-side critical
293  * section.
294  *
295  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
296  *
297  * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
298  * offline from an RCU perspective, so check for those as well.
299  */
300 int rcu_read_lock_bh_held(void)
301 {
302 	if (!debug_lockdep_rcu_enabled())
303 		return 1;
304 	if (!rcu_is_watching())
305 		return 0;
306 	if (!rcu_lockdep_current_cpu_online())
307 		return 0;
308 	return in_softirq() || irqs_disabled();
309 }
310 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
311 
312 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
313 
314 /**
315  * wakeme_after_rcu() - Callback function to awaken a task after grace period
316  * @head: Pointer to rcu_head member within rcu_synchronize structure
317  *
318  * Awaken the corresponding task now that a grace period has elapsed.
319  */
320 void wakeme_after_rcu(struct rcu_head *head)
321 {
322 	struct rcu_synchronize *rcu;
323 
324 	rcu = container_of(head, struct rcu_synchronize, head);
325 	complete(&rcu->completion);
326 }
327 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
328 
329 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
330 		   struct rcu_synchronize *rs_array)
331 {
332 	int i;
333 	int j;
334 
335 	/* Initialize and register callbacks for each crcu_array element. */
336 	for (i = 0; i < n; i++) {
337 		if (checktiny &&
338 		    (crcu_array[i] == call_rcu)) {
339 			might_sleep();
340 			continue;
341 		}
342 		init_rcu_head_on_stack(&rs_array[i].head);
343 		init_completion(&rs_array[i].completion);
344 		for (j = 0; j < i; j++)
345 			if (crcu_array[j] == crcu_array[i])
346 				break;
347 		if (j == i)
348 			(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
349 	}
350 
351 	/* Wait for all callbacks to be invoked. */
352 	for (i = 0; i < n; i++) {
353 		if (checktiny &&
354 		    (crcu_array[i] == call_rcu))
355 			continue;
356 		for (j = 0; j < i; j++)
357 			if (crcu_array[j] == crcu_array[i])
358 				break;
359 		if (j == i)
360 			wait_for_completion(&rs_array[i].completion);
361 		destroy_rcu_head_on_stack(&rs_array[i].head);
362 	}
363 }
364 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
365 
366 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
367 void init_rcu_head(struct rcu_head *head)
368 {
369 	debug_object_init(head, &rcuhead_debug_descr);
370 }
371 EXPORT_SYMBOL_GPL(init_rcu_head);
372 
373 void destroy_rcu_head(struct rcu_head *head)
374 {
375 	debug_object_free(head, &rcuhead_debug_descr);
376 }
377 EXPORT_SYMBOL_GPL(destroy_rcu_head);
378 
379 static bool rcuhead_is_static_object(void *addr)
380 {
381 	return true;
382 }
383 
384 /**
385  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
386  * @head: pointer to rcu_head structure to be initialized
387  *
388  * This function informs debugobjects of a new rcu_head structure that
389  * has been allocated as an auto variable on the stack.  This function
390  * is not required for rcu_head structures that are statically defined or
391  * that are dynamically allocated on the heap.  This function has no
392  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
393  */
394 void init_rcu_head_on_stack(struct rcu_head *head)
395 {
396 	debug_object_init_on_stack(head, &rcuhead_debug_descr);
397 }
398 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
399 
400 /**
401  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
402  * @head: pointer to rcu_head structure to be initialized
403  *
404  * This function informs debugobjects that an on-stack rcu_head structure
405  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
406  * function is not required for rcu_head structures that are statically
407  * defined or that are dynamically allocated on the heap.  Also as with
408  * init_rcu_head_on_stack(), this function has no effect for
409  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
410  */
411 void destroy_rcu_head_on_stack(struct rcu_head *head)
412 {
413 	debug_object_free(head, &rcuhead_debug_descr);
414 }
415 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
416 
417 struct debug_obj_descr rcuhead_debug_descr = {
418 	.name = "rcu_head",
419 	.is_static_object = rcuhead_is_static_object,
420 };
421 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
422 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
423 
424 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
425 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
426 			       unsigned long secs,
427 			       unsigned long c_old, unsigned long c)
428 {
429 	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
430 }
431 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
432 #else
433 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
434 	do { } while (0)
435 #endif
436 
437 #ifdef CONFIG_RCU_STALL_COMMON
438 
439 #ifdef CONFIG_PROVE_RCU
440 #define RCU_STALL_DELAY_DELTA	       (5 * HZ)
441 #else
442 #define RCU_STALL_DELAY_DELTA	       0
443 #endif
444 
445 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
446 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
447 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
448 
449 module_param(rcu_cpu_stall_suppress, int, 0644);
450 module_param(rcu_cpu_stall_timeout, int, 0644);
451 
452 int rcu_jiffies_till_stall_check(void)
453 {
454 	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
455 
456 	/*
457 	 * Limit check must be consistent with the Kconfig limits
458 	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
459 	 */
460 	if (till_stall_check < 3) {
461 		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
462 		till_stall_check = 3;
463 	} else if (till_stall_check > 300) {
464 		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
465 		till_stall_check = 300;
466 	}
467 	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
468 }
469 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
470 
471 void rcu_sysrq_start(void)
472 {
473 	if (!rcu_cpu_stall_suppress)
474 		rcu_cpu_stall_suppress = 2;
475 }
476 
477 void rcu_sysrq_end(void)
478 {
479 	if (rcu_cpu_stall_suppress == 2)
480 		rcu_cpu_stall_suppress = 0;
481 }
482 
483 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
484 {
485 	rcu_cpu_stall_suppress = 1;
486 	return NOTIFY_DONE;
487 }
488 
489 static struct notifier_block rcu_panic_block = {
490 	.notifier_call = rcu_panic,
491 };
492 
493 static int __init check_cpu_stall_init(void)
494 {
495 	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
496 	return 0;
497 }
498 early_initcall(check_cpu_stall_init);
499 
500 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
501 
502 #ifdef CONFIG_TASKS_RCU
503 
504 /*
505  * Simple variant of RCU whose quiescent states are voluntary context
506  * switch, cond_resched_rcu_qs(), user-space execution, and idle.
507  * As such, grace periods can take one good long time.  There are no
508  * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
509  * because this implementation is intended to get the system into a safe
510  * state for some of the manipulations involved in tracing and the like.
511  * Finally, this implementation does not support high call_rcu_tasks()
512  * rates from multiple CPUs.  If this is required, per-CPU callback lists
513  * will be needed.
514  */
515 
516 /* Global list of callbacks and associated lock. */
517 static struct rcu_head *rcu_tasks_cbs_head;
518 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
519 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
520 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
521 
522 /* Track exiting tasks in order to allow them to be waited for. */
523 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
524 
525 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
526 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
527 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
528 module_param(rcu_task_stall_timeout, int, 0644);
529 
530 static struct task_struct *rcu_tasks_kthread_ptr;
531 
532 /**
533  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
534  * @rhp: structure to be used for queueing the RCU updates.
535  * @func: actual callback function to be invoked after the grace period
536  *
537  * The callback function will be invoked some time after a full grace
538  * period elapses, in other words after all currently executing RCU
539  * read-side critical sections have completed. call_rcu_tasks() assumes
540  * that the read-side critical sections end at a voluntary context
541  * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
542  * or transition to usermode execution.  As such, there are no read-side
543  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
544  * this primitive is intended to determine that all tasks have passed
545  * through a safe state, not so much for data-strcuture synchronization.
546  *
547  * See the description of call_rcu() for more detailed information on
548  * memory ordering guarantees.
549  */
550 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
551 {
552 	unsigned long flags;
553 	bool needwake;
554 
555 	rhp->next = NULL;
556 	rhp->func = func;
557 	raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
558 	needwake = !rcu_tasks_cbs_head;
559 	*rcu_tasks_cbs_tail = rhp;
560 	rcu_tasks_cbs_tail = &rhp->next;
561 	raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
562 	/* We can't create the thread unless interrupts are enabled. */
563 	if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
564 		wake_up(&rcu_tasks_cbs_wq);
565 }
566 EXPORT_SYMBOL_GPL(call_rcu_tasks);
567 
568 /**
569  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
570  *
571  * Control will return to the caller some time after a full rcu-tasks
572  * grace period has elapsed, in other words after all currently
573  * executing rcu-tasks read-side critical sections have elapsed.  These
574  * read-side critical sections are delimited by calls to schedule(),
575  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
576  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
577  *
578  * This is a very specialized primitive, intended only for a few uses in
579  * tracing and other situations requiring manipulation of function
580  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
581  * is not (yet) intended for heavy use from multiple CPUs.
582  *
583  * Note that this guarantee implies further memory-ordering guarantees.
584  * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
585  * each CPU is guaranteed to have executed a full memory barrier since the
586  * end of its last RCU-tasks read-side critical section whose beginning
587  * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
588  * having an RCU-tasks read-side critical section that extends beyond
589  * the return from synchronize_rcu_tasks() is guaranteed to have executed
590  * a full memory barrier after the beginning of synchronize_rcu_tasks()
591  * and before the beginning of that RCU-tasks read-side critical section.
592  * Note that these guarantees include CPUs that are offline, idle, or
593  * executing in user mode, as well as CPUs that are executing in the kernel.
594  *
595  * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
596  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
597  * to have executed a full memory barrier during the execution of
598  * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
599  * (but again only if the system has more than one CPU).
600  */
601 void synchronize_rcu_tasks(void)
602 {
603 	/* Complain if the scheduler has not started.  */
604 	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
605 			 "synchronize_rcu_tasks called too soon");
606 
607 	/* Wait for the grace period. */
608 	wait_rcu_gp(call_rcu_tasks);
609 }
610 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
611 
612 /**
613  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
614  *
615  * Although the current implementation is guaranteed to wait, it is not
616  * obligated to, for example, if there are no pending callbacks.
617  */
618 void rcu_barrier_tasks(void)
619 {
620 	/* There is only one callback queue, so this is easy.  ;-) */
621 	synchronize_rcu_tasks();
622 }
623 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
624 
625 /* See if tasks are still holding out, complain if so. */
626 static void check_holdout_task(struct task_struct *t,
627 			       bool needreport, bool *firstreport)
628 {
629 	int cpu;
630 
631 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
632 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
633 	    !READ_ONCE(t->on_rq) ||
634 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
635 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
636 		WRITE_ONCE(t->rcu_tasks_holdout, false);
637 		list_del_init(&t->rcu_tasks_holdout_list);
638 		put_task_struct(t);
639 		return;
640 	}
641 	rcu_request_urgent_qs_task(t);
642 	if (!needreport)
643 		return;
644 	if (*firstreport) {
645 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
646 		*firstreport = false;
647 	}
648 	cpu = task_cpu(t);
649 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
650 		 t, ".I"[is_idle_task(t)],
651 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
652 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
653 		 t->rcu_tasks_idle_cpu, cpu);
654 	sched_show_task(t);
655 }
656 
657 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
658 static int __noreturn rcu_tasks_kthread(void *arg)
659 {
660 	unsigned long flags;
661 	struct task_struct *g, *t;
662 	unsigned long lastreport;
663 	struct rcu_head *list;
664 	struct rcu_head *next;
665 	LIST_HEAD(rcu_tasks_holdouts);
666 	int fract;
667 
668 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
669 	housekeeping_affine(current, HK_FLAG_RCU);
670 
671 	/*
672 	 * Each pass through the following loop makes one check for
673 	 * newly arrived callbacks, and, if there are some, waits for
674 	 * one RCU-tasks grace period and then invokes the callbacks.
675 	 * This loop is terminated by the system going down.  ;-)
676 	 */
677 	for (;;) {
678 
679 		/* Pick up any new callbacks. */
680 		raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
681 		list = rcu_tasks_cbs_head;
682 		rcu_tasks_cbs_head = NULL;
683 		rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
684 		raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
685 
686 		/* If there were none, wait a bit and start over. */
687 		if (!list) {
688 			wait_event_interruptible(rcu_tasks_cbs_wq,
689 						 rcu_tasks_cbs_head);
690 			if (!rcu_tasks_cbs_head) {
691 				WARN_ON(signal_pending(current));
692 				schedule_timeout_interruptible(HZ/10);
693 			}
694 			continue;
695 		}
696 
697 		/*
698 		 * Wait for all pre-existing t->on_rq and t->nvcsw
699 		 * transitions to complete.  Invoking synchronize_rcu()
700 		 * suffices because all these transitions occur with
701 		 * interrupts disabled.  Without this synchronize_rcu(),
702 		 * a read-side critical section that started before the
703 		 * grace period might be incorrectly seen as having started
704 		 * after the grace period.
705 		 *
706 		 * This synchronize_rcu() also dispenses with the
707 		 * need for a memory barrier on the first store to
708 		 * ->rcu_tasks_holdout, as it forces the store to happen
709 		 * after the beginning of the grace period.
710 		 */
711 		synchronize_rcu();
712 
713 		/*
714 		 * There were callbacks, so we need to wait for an
715 		 * RCU-tasks grace period.  Start off by scanning
716 		 * the task list for tasks that are not already
717 		 * voluntarily blocked.  Mark these tasks and make
718 		 * a list of them in rcu_tasks_holdouts.
719 		 */
720 		rcu_read_lock();
721 		for_each_process_thread(g, t) {
722 			if (t != current && READ_ONCE(t->on_rq) &&
723 			    !is_idle_task(t)) {
724 				get_task_struct(t);
725 				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
726 				WRITE_ONCE(t->rcu_tasks_holdout, true);
727 				list_add(&t->rcu_tasks_holdout_list,
728 					 &rcu_tasks_holdouts);
729 			}
730 		}
731 		rcu_read_unlock();
732 
733 		/*
734 		 * Wait for tasks that are in the process of exiting.
735 		 * This does only part of the job, ensuring that all
736 		 * tasks that were previously exiting reach the point
737 		 * where they have disabled preemption, allowing the
738 		 * later synchronize_rcu() to finish the job.
739 		 */
740 		synchronize_srcu(&tasks_rcu_exit_srcu);
741 
742 		/*
743 		 * Each pass through the following loop scans the list
744 		 * of holdout tasks, removing any that are no longer
745 		 * holdouts.  When the list is empty, we are done.
746 		 */
747 		lastreport = jiffies;
748 
749 		/* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
750 		fract = 10;
751 
752 		for (;;) {
753 			bool firstreport;
754 			bool needreport;
755 			int rtst;
756 			struct task_struct *t1;
757 
758 			if (list_empty(&rcu_tasks_holdouts))
759 				break;
760 
761 			/* Slowly back off waiting for holdouts */
762 			schedule_timeout_interruptible(HZ/fract);
763 
764 			if (fract > 1)
765 				fract--;
766 
767 			rtst = READ_ONCE(rcu_task_stall_timeout);
768 			needreport = rtst > 0 &&
769 				     time_after(jiffies, lastreport + rtst);
770 			if (needreport)
771 				lastreport = jiffies;
772 			firstreport = true;
773 			WARN_ON(signal_pending(current));
774 			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
775 						rcu_tasks_holdout_list) {
776 				check_holdout_task(t, needreport, &firstreport);
777 				cond_resched();
778 			}
779 		}
780 
781 		/*
782 		 * Because ->on_rq and ->nvcsw are not guaranteed
783 		 * to have a full memory barriers prior to them in the
784 		 * schedule() path, memory reordering on other CPUs could
785 		 * cause their RCU-tasks read-side critical sections to
786 		 * extend past the end of the grace period.  However,
787 		 * because these ->nvcsw updates are carried out with
788 		 * interrupts disabled, we can use synchronize_rcu()
789 		 * to force the needed ordering on all such CPUs.
790 		 *
791 		 * This synchronize_rcu() also confines all
792 		 * ->rcu_tasks_holdout accesses to be within the grace
793 		 * period, avoiding the need for memory barriers for
794 		 * ->rcu_tasks_holdout accesses.
795 		 *
796 		 * In addition, this synchronize_rcu() waits for exiting
797 		 * tasks to complete their final preempt_disable() region
798 		 * of execution, cleaning up after the synchronize_srcu()
799 		 * above.
800 		 */
801 		synchronize_rcu();
802 
803 		/* Invoke the callbacks. */
804 		while (list) {
805 			next = list->next;
806 			local_bh_disable();
807 			list->func(list);
808 			local_bh_enable();
809 			list = next;
810 			cond_resched();
811 		}
812 		/* Paranoid sleep to keep this from entering a tight loop */
813 		schedule_timeout_uninterruptible(HZ/10);
814 	}
815 }
816 
817 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
818 static int __init rcu_spawn_tasks_kthread(void)
819 {
820 	struct task_struct *t;
821 
822 	t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
823 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
824 		return 0;
825 	smp_mb(); /* Ensure others see full kthread. */
826 	WRITE_ONCE(rcu_tasks_kthread_ptr, t);
827 	return 0;
828 }
829 core_initcall(rcu_spawn_tasks_kthread);
830 
831 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
832 void exit_tasks_rcu_start(void)
833 {
834 	preempt_disable();
835 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
836 	preempt_enable();
837 }
838 
839 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
840 void exit_tasks_rcu_finish(void)
841 {
842 	preempt_disable();
843 	__srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
844 	preempt_enable();
845 }
846 
847 #endif /* #ifdef CONFIG_TASKS_RCU */
848 
849 #ifndef CONFIG_TINY_RCU
850 
851 /*
852  * Print any non-default Tasks RCU settings.
853  */
854 static void __init rcu_tasks_bootup_oddness(void)
855 {
856 #ifdef CONFIG_TASKS_RCU
857 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
858 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
859 	else
860 		pr_info("\tTasks RCU enabled.\n");
861 #endif /* #ifdef CONFIG_TASKS_RCU */
862 }
863 
864 #endif /* #ifndef CONFIG_TINY_RCU */
865 
866 #ifdef CONFIG_PROVE_RCU
867 
868 /*
869  * Early boot self test parameters.
870  */
871 static bool rcu_self_test;
872 module_param(rcu_self_test, bool, 0444);
873 
874 static int rcu_self_test_counter;
875 
876 static void test_callback(struct rcu_head *r)
877 {
878 	rcu_self_test_counter++;
879 	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
880 }
881 
882 DEFINE_STATIC_SRCU(early_srcu);
883 
884 static void early_boot_test_call_rcu(void)
885 {
886 	static struct rcu_head head;
887 	static struct rcu_head shead;
888 
889 	call_rcu(&head, test_callback);
890 	if (IS_ENABLED(CONFIG_SRCU))
891 		call_srcu(&early_srcu, &shead, test_callback);
892 }
893 
894 void rcu_early_boot_tests(void)
895 {
896 	pr_info("Running RCU self tests\n");
897 
898 	if (rcu_self_test)
899 		early_boot_test_call_rcu();
900 	rcu_test_sync_prims();
901 }
902 
903 static int rcu_verify_early_boot_tests(void)
904 {
905 	int ret = 0;
906 	int early_boot_test_counter = 0;
907 
908 	if (rcu_self_test) {
909 		early_boot_test_counter++;
910 		rcu_barrier();
911 		if (IS_ENABLED(CONFIG_SRCU)) {
912 			early_boot_test_counter++;
913 			srcu_barrier(&early_srcu);
914 		}
915 	}
916 	if (rcu_self_test_counter != early_boot_test_counter) {
917 		WARN_ON(1);
918 		ret = -1;
919 	}
920 
921 	return ret;
922 }
923 late_initcall(rcu_verify_early_boot_tests);
924 #else
925 void rcu_early_boot_tests(void) {}
926 #endif /* CONFIG_PROVE_RCU */
927 
928 #ifndef CONFIG_TINY_RCU
929 
930 /*
931  * Print any significant non-default boot-time settings.
932  */
933 void __init rcupdate_announce_bootup_oddness(void)
934 {
935 	if (rcu_normal)
936 		pr_info("\tNo expedited grace period (rcu_normal).\n");
937 	else if (rcu_normal_after_boot)
938 		pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
939 	else if (rcu_expedited)
940 		pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
941 	if (rcu_cpu_stall_suppress)
942 		pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
943 	if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
944 		pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
945 	rcu_tasks_bootup_oddness();
946 }
947 
948 #endif /* #ifndef CONFIG_TINY_RCU */
949