xref: /openbmc/linux/kernel/rcu/update.c (revision 3e42ec1a)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *
23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25  * Papers:
26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28  *
29  * For detailed explanation of Read-Copy Update mechanism see -
30  *		http://lse.sourceforge.net/locking/rcupdate.html
31  *
32  */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/export.h>
47 #include <linux/hardirq.h>
48 #include <linux/delay.h>
49 #include <linux/module.h>
50 #include <linux/kthread.h>
51 #include <linux/tick.h>
52 
53 #define CREATE_TRACE_POINTS
54 
55 #include "rcu.h"
56 
57 MODULE_ALIAS("rcupdate");
58 #ifdef MODULE_PARAM_PREFIX
59 #undef MODULE_PARAM_PREFIX
60 #endif
61 #define MODULE_PARAM_PREFIX "rcupdate."
62 
63 module_param(rcu_expedited, int, 0);
64 module_param(rcu_normal, int, 0);
65 
66 static int rcu_normal_after_boot;
67 module_param(rcu_normal_after_boot, int, 0);
68 
69 #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
70 /**
71  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
72  *
73  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
74  * RCU-sched read-side critical section.  In absence of
75  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
76  * critical section unless it can prove otherwise.  Note that disabling
77  * of preemption (including disabling irqs) counts as an RCU-sched
78  * read-side critical section.  This is useful for debug checks in functions
79  * that required that they be called within an RCU-sched read-side
80  * critical section.
81  *
82  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
83  * and while lockdep is disabled.
84  *
85  * Note that if the CPU is in the idle loop from an RCU point of
86  * view (ie: that we are in the section between rcu_idle_enter() and
87  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
88  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
89  * that are in such a section, considering these as in extended quiescent
90  * state, so such a CPU is effectively never in an RCU read-side critical
91  * section regardless of what RCU primitives it invokes.  This state of
92  * affairs is required --- we need to keep an RCU-free window in idle
93  * where the CPU may possibly enter into low power mode. This way we can
94  * notice an extended quiescent state to other CPUs that started a grace
95  * period. Otherwise we would delay any grace period as long as we run in
96  * the idle task.
97  *
98  * Similarly, we avoid claiming an SRCU read lock held if the current
99  * CPU is offline.
100  */
101 int rcu_read_lock_sched_held(void)
102 {
103 	int lockdep_opinion = 0;
104 
105 	if (!debug_lockdep_rcu_enabled())
106 		return 1;
107 	if (!rcu_is_watching())
108 		return 0;
109 	if (!rcu_lockdep_current_cpu_online())
110 		return 0;
111 	if (debug_locks)
112 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
113 	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
114 }
115 EXPORT_SYMBOL(rcu_read_lock_sched_held);
116 #endif
117 
118 #ifndef CONFIG_TINY_RCU
119 
120 /*
121  * Should expedited grace-period primitives always fall back to their
122  * non-expedited counterparts?  Intended for use within RCU.  Note
123  * that if the user specifies both rcu_expedited and rcu_normal, then
124  * rcu_normal wins.
125  */
126 bool rcu_gp_is_normal(void)
127 {
128 	return READ_ONCE(rcu_normal);
129 }
130 
131 static atomic_t rcu_expedited_nesting =
132 	ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
133 
134 /*
135  * Should normal grace-period primitives be expedited?  Intended for
136  * use within RCU.  Note that this function takes the rcu_expedited
137  * sysfs/boot variable into account as well as the rcu_expedite_gp()
138  * nesting.  So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
139  * returns false is a -really- bad idea.
140  */
141 bool rcu_gp_is_expedited(void)
142 {
143 	return rcu_expedited || atomic_read(&rcu_expedited_nesting);
144 }
145 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
146 
147 /**
148  * rcu_expedite_gp - Expedite future RCU grace periods
149  *
150  * After a call to this function, future calls to synchronize_rcu() and
151  * friends act as the corresponding synchronize_rcu_expedited() function
152  * had instead been called.
153  */
154 void rcu_expedite_gp(void)
155 {
156 	atomic_inc(&rcu_expedited_nesting);
157 }
158 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
159 
160 /**
161  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
162  *
163  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
164  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
165  * and if the rcu_expedited sysfs/boot parameter is not set, then all
166  * subsequent calls to synchronize_rcu() and friends will return to
167  * their normal non-expedited behavior.
168  */
169 void rcu_unexpedite_gp(void)
170 {
171 	atomic_dec(&rcu_expedited_nesting);
172 }
173 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
174 
175 #endif /* #ifndef CONFIG_TINY_RCU */
176 
177 /*
178  * Inform RCU of the end of the in-kernel boot sequence.
179  */
180 void rcu_end_inkernel_boot(void)
181 {
182 	if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
183 		rcu_unexpedite_gp();
184 	if (rcu_normal_after_boot)
185 		WRITE_ONCE(rcu_normal, 1);
186 }
187 
188 #ifdef CONFIG_PREEMPT_RCU
189 
190 /*
191  * Preemptible RCU implementation for rcu_read_lock().
192  * Just increment ->rcu_read_lock_nesting, shared state will be updated
193  * if we block.
194  */
195 void __rcu_read_lock(void)
196 {
197 	current->rcu_read_lock_nesting++;
198 	barrier();  /* critical section after entry code. */
199 }
200 EXPORT_SYMBOL_GPL(__rcu_read_lock);
201 
202 /*
203  * Preemptible RCU implementation for rcu_read_unlock().
204  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
205  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
206  * invoke rcu_read_unlock_special() to clean up after a context switch
207  * in an RCU read-side critical section and other special cases.
208  */
209 void __rcu_read_unlock(void)
210 {
211 	struct task_struct *t = current;
212 
213 	if (t->rcu_read_lock_nesting != 1) {
214 		--t->rcu_read_lock_nesting;
215 	} else {
216 		barrier();  /* critical section before exit code. */
217 		t->rcu_read_lock_nesting = INT_MIN;
218 		barrier();  /* assign before ->rcu_read_unlock_special load */
219 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
220 			rcu_read_unlock_special(t);
221 		barrier();  /* ->rcu_read_unlock_special load before assign */
222 		t->rcu_read_lock_nesting = 0;
223 	}
224 #ifdef CONFIG_PROVE_LOCKING
225 	{
226 		int rrln = READ_ONCE(t->rcu_read_lock_nesting);
227 
228 		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
229 	}
230 #endif /* #ifdef CONFIG_PROVE_LOCKING */
231 }
232 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
233 
234 #endif /* #ifdef CONFIG_PREEMPT_RCU */
235 
236 #ifdef CONFIG_DEBUG_LOCK_ALLOC
237 static struct lock_class_key rcu_lock_key;
238 struct lockdep_map rcu_lock_map =
239 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
240 EXPORT_SYMBOL_GPL(rcu_lock_map);
241 
242 static struct lock_class_key rcu_bh_lock_key;
243 struct lockdep_map rcu_bh_lock_map =
244 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
245 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
246 
247 static struct lock_class_key rcu_sched_lock_key;
248 struct lockdep_map rcu_sched_lock_map =
249 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
250 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
251 
252 static struct lock_class_key rcu_callback_key;
253 struct lockdep_map rcu_callback_map =
254 	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
255 EXPORT_SYMBOL_GPL(rcu_callback_map);
256 
257 int notrace debug_lockdep_rcu_enabled(void)
258 {
259 	return rcu_scheduler_active && debug_locks &&
260 	       current->lockdep_recursion == 0;
261 }
262 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
263 
264 /**
265  * rcu_read_lock_held() - might we be in RCU read-side critical section?
266  *
267  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
268  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
269  * this assumes we are in an RCU read-side critical section unless it can
270  * prove otherwise.  This is useful for debug checks in functions that
271  * require that they be called within an RCU read-side critical section.
272  *
273  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
274  * and while lockdep is disabled.
275  *
276  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
277  * occur in the same context, for example, it is illegal to invoke
278  * rcu_read_unlock() in process context if the matching rcu_read_lock()
279  * was invoked from within an irq handler.
280  *
281  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
282  * offline from an RCU perspective, so check for those as well.
283  */
284 int rcu_read_lock_held(void)
285 {
286 	if (!debug_lockdep_rcu_enabled())
287 		return 1;
288 	if (!rcu_is_watching())
289 		return 0;
290 	if (!rcu_lockdep_current_cpu_online())
291 		return 0;
292 	return lock_is_held(&rcu_lock_map);
293 }
294 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
295 
296 /**
297  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
298  *
299  * Check for bottom half being disabled, which covers both the
300  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
301  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
302  * will show the situation.  This is useful for debug checks in functions
303  * that require that they be called within an RCU read-side critical
304  * section.
305  *
306  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
307  *
308  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
309  * offline from an RCU perspective, so check for those as well.
310  */
311 int rcu_read_lock_bh_held(void)
312 {
313 	if (!debug_lockdep_rcu_enabled())
314 		return 1;
315 	if (!rcu_is_watching())
316 		return 0;
317 	if (!rcu_lockdep_current_cpu_online())
318 		return 0;
319 	return in_softirq() || irqs_disabled();
320 }
321 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
322 
323 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
324 
325 /**
326  * wakeme_after_rcu() - Callback function to awaken a task after grace period
327  * @head: Pointer to rcu_head member within rcu_synchronize structure
328  *
329  * Awaken the corresponding task now that a grace period has elapsed.
330  */
331 void wakeme_after_rcu(struct rcu_head *head)
332 {
333 	struct rcu_synchronize *rcu;
334 
335 	rcu = container_of(head, struct rcu_synchronize, head);
336 	complete(&rcu->completion);
337 }
338 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
339 
340 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
341 		   struct rcu_synchronize *rs_array)
342 {
343 	int i;
344 
345 	/* Initialize and register callbacks for each flavor specified. */
346 	for (i = 0; i < n; i++) {
347 		if (checktiny &&
348 		    (crcu_array[i] == call_rcu ||
349 		     crcu_array[i] == call_rcu_bh)) {
350 			might_sleep();
351 			continue;
352 		}
353 		init_rcu_head_on_stack(&rs_array[i].head);
354 		init_completion(&rs_array[i].completion);
355 		(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
356 	}
357 
358 	/* Wait for all callbacks to be invoked. */
359 	for (i = 0; i < n; i++) {
360 		if (checktiny &&
361 		    (crcu_array[i] == call_rcu ||
362 		     crcu_array[i] == call_rcu_bh))
363 			continue;
364 		wait_for_completion(&rs_array[i].completion);
365 		destroy_rcu_head_on_stack(&rs_array[i].head);
366 	}
367 }
368 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
369 
370 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
371 void init_rcu_head(struct rcu_head *head)
372 {
373 	debug_object_init(head, &rcuhead_debug_descr);
374 }
375 
376 void destroy_rcu_head(struct rcu_head *head)
377 {
378 	debug_object_free(head, &rcuhead_debug_descr);
379 }
380 
381 /*
382  * fixup_activate is called when:
383  * - an active object is activated
384  * - an unknown object is activated (might be a statically initialized object)
385  * Activation is performed internally by call_rcu().
386  */
387 static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
388 {
389 	struct rcu_head *head = addr;
390 
391 	switch (state) {
392 
393 	case ODEBUG_STATE_NOTAVAILABLE:
394 		/*
395 		 * This is not really a fixup. We just make sure that it is
396 		 * tracked in the object tracker.
397 		 */
398 		debug_object_init(head, &rcuhead_debug_descr);
399 		debug_object_activate(head, &rcuhead_debug_descr);
400 		return 0;
401 	default:
402 		return 1;
403 	}
404 }
405 
406 /**
407  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
408  * @head: pointer to rcu_head structure to be initialized
409  *
410  * This function informs debugobjects of a new rcu_head structure that
411  * has been allocated as an auto variable on the stack.  This function
412  * is not required for rcu_head structures that are statically defined or
413  * that are dynamically allocated on the heap.  This function has no
414  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
415  */
416 void init_rcu_head_on_stack(struct rcu_head *head)
417 {
418 	debug_object_init_on_stack(head, &rcuhead_debug_descr);
419 }
420 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
421 
422 /**
423  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
424  * @head: pointer to rcu_head structure to be initialized
425  *
426  * This function informs debugobjects that an on-stack rcu_head structure
427  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
428  * function is not required for rcu_head structures that are statically
429  * defined or that are dynamically allocated on the heap.  Also as with
430  * init_rcu_head_on_stack(), this function has no effect for
431  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
432  */
433 void destroy_rcu_head_on_stack(struct rcu_head *head)
434 {
435 	debug_object_free(head, &rcuhead_debug_descr);
436 }
437 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
438 
439 struct debug_obj_descr rcuhead_debug_descr = {
440 	.name = "rcu_head",
441 	.fixup_activate = rcuhead_fixup_activate,
442 };
443 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
444 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
445 
446 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
447 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
448 			       unsigned long secs,
449 			       unsigned long c_old, unsigned long c)
450 {
451 	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
452 }
453 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
454 #else
455 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
456 	do { } while (0)
457 #endif
458 
459 #ifdef CONFIG_RCU_STALL_COMMON
460 
461 #ifdef CONFIG_PROVE_RCU
462 #define RCU_STALL_DELAY_DELTA	       (5 * HZ)
463 #else
464 #define RCU_STALL_DELAY_DELTA	       0
465 #endif
466 
467 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
468 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
469 
470 module_param(rcu_cpu_stall_suppress, int, 0644);
471 module_param(rcu_cpu_stall_timeout, int, 0644);
472 
473 int rcu_jiffies_till_stall_check(void)
474 {
475 	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
476 
477 	/*
478 	 * Limit check must be consistent with the Kconfig limits
479 	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
480 	 */
481 	if (till_stall_check < 3) {
482 		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
483 		till_stall_check = 3;
484 	} else if (till_stall_check > 300) {
485 		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
486 		till_stall_check = 300;
487 	}
488 	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
489 }
490 
491 void rcu_sysrq_start(void)
492 {
493 	if (!rcu_cpu_stall_suppress)
494 		rcu_cpu_stall_suppress = 2;
495 }
496 
497 void rcu_sysrq_end(void)
498 {
499 	if (rcu_cpu_stall_suppress == 2)
500 		rcu_cpu_stall_suppress = 0;
501 }
502 
503 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
504 {
505 	rcu_cpu_stall_suppress = 1;
506 	return NOTIFY_DONE;
507 }
508 
509 static struct notifier_block rcu_panic_block = {
510 	.notifier_call = rcu_panic,
511 };
512 
513 static int __init check_cpu_stall_init(void)
514 {
515 	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
516 	return 0;
517 }
518 early_initcall(check_cpu_stall_init);
519 
520 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
521 
522 #ifdef CONFIG_TASKS_RCU
523 
524 /*
525  * Simple variant of RCU whose quiescent states are voluntary context switch,
526  * user-space execution, and idle.  As such, grace periods can take one good
527  * long time.  There are no read-side primitives similar to rcu_read_lock()
528  * and rcu_read_unlock() because this implementation is intended to get
529  * the system into a safe state for some of the manipulations involved in
530  * tracing and the like.  Finally, this implementation does not support
531  * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
532  * per-CPU callback lists will be needed.
533  */
534 
535 /* Global list of callbacks and associated lock. */
536 static struct rcu_head *rcu_tasks_cbs_head;
537 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
538 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
539 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
540 
541 /* Track exiting tasks in order to allow them to be waited for. */
542 DEFINE_SRCU(tasks_rcu_exit_srcu);
543 
544 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
545 static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
546 module_param(rcu_task_stall_timeout, int, 0644);
547 
548 static void rcu_spawn_tasks_kthread(void);
549 
550 /*
551  * Post an RCU-tasks callback.  First call must be from process context
552  * after the scheduler if fully operational.
553  */
554 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
555 {
556 	unsigned long flags;
557 	bool needwake;
558 
559 	rhp->next = NULL;
560 	rhp->func = func;
561 	raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
562 	needwake = !rcu_tasks_cbs_head;
563 	*rcu_tasks_cbs_tail = rhp;
564 	rcu_tasks_cbs_tail = &rhp->next;
565 	raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
566 	if (needwake) {
567 		rcu_spawn_tasks_kthread();
568 		wake_up(&rcu_tasks_cbs_wq);
569 	}
570 }
571 EXPORT_SYMBOL_GPL(call_rcu_tasks);
572 
573 /**
574  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
575  *
576  * Control will return to the caller some time after a full rcu-tasks
577  * grace period has elapsed, in other words after all currently
578  * executing rcu-tasks read-side critical sections have elapsed.  These
579  * read-side critical sections are delimited by calls to schedule(),
580  * cond_resched_rcu_qs(), idle execution, userspace execution, calls
581  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
582  *
583  * This is a very specialized primitive, intended only for a few uses in
584  * tracing and other situations requiring manipulation of function
585  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
586  * is not (yet) intended for heavy use from multiple CPUs.
587  *
588  * Note that this guarantee implies further memory-ordering guarantees.
589  * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
590  * each CPU is guaranteed to have executed a full memory barrier since the
591  * end of its last RCU-tasks read-side critical section whose beginning
592  * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
593  * having an RCU-tasks read-side critical section that extends beyond
594  * the return from synchronize_rcu_tasks() is guaranteed to have executed
595  * a full memory barrier after the beginning of synchronize_rcu_tasks()
596  * and before the beginning of that RCU-tasks read-side critical section.
597  * Note that these guarantees include CPUs that are offline, idle, or
598  * executing in user mode, as well as CPUs that are executing in the kernel.
599  *
600  * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
601  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
602  * to have executed a full memory barrier during the execution of
603  * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
604  * (but again only if the system has more than one CPU).
605  */
606 void synchronize_rcu_tasks(void)
607 {
608 	/* Complain if the scheduler has not started.  */
609 	RCU_LOCKDEP_WARN(!rcu_scheduler_active,
610 			 "synchronize_rcu_tasks called too soon");
611 
612 	/* Wait for the grace period. */
613 	wait_rcu_gp(call_rcu_tasks);
614 }
615 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
616 
617 /**
618  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
619  *
620  * Although the current implementation is guaranteed to wait, it is not
621  * obligated to, for example, if there are no pending callbacks.
622  */
623 void rcu_barrier_tasks(void)
624 {
625 	/* There is only one callback queue, so this is easy.  ;-) */
626 	synchronize_rcu_tasks();
627 }
628 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
629 
630 /* See if tasks are still holding out, complain if so. */
631 static void check_holdout_task(struct task_struct *t,
632 			       bool needreport, bool *firstreport)
633 {
634 	int cpu;
635 
636 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
637 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
638 	    !READ_ONCE(t->on_rq) ||
639 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
640 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
641 		WRITE_ONCE(t->rcu_tasks_holdout, false);
642 		list_del_init(&t->rcu_tasks_holdout_list);
643 		put_task_struct(t);
644 		return;
645 	}
646 	if (!needreport)
647 		return;
648 	if (*firstreport) {
649 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
650 		*firstreport = false;
651 	}
652 	cpu = task_cpu(t);
653 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
654 		 t, ".I"[is_idle_task(t)],
655 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
656 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
657 		 t->rcu_tasks_idle_cpu, cpu);
658 	sched_show_task(t);
659 }
660 
661 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
662 static int __noreturn rcu_tasks_kthread(void *arg)
663 {
664 	unsigned long flags;
665 	struct task_struct *g, *t;
666 	unsigned long lastreport;
667 	struct rcu_head *list;
668 	struct rcu_head *next;
669 	LIST_HEAD(rcu_tasks_holdouts);
670 
671 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
672 	housekeeping_affine(current);
673 
674 	/*
675 	 * Each pass through the following loop makes one check for
676 	 * newly arrived callbacks, and, if there are some, waits for
677 	 * one RCU-tasks grace period and then invokes the callbacks.
678 	 * This loop is terminated by the system going down.  ;-)
679 	 */
680 	for (;;) {
681 
682 		/* Pick up any new callbacks. */
683 		raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
684 		list = rcu_tasks_cbs_head;
685 		rcu_tasks_cbs_head = NULL;
686 		rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
687 		raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
688 
689 		/* If there were none, wait a bit and start over. */
690 		if (!list) {
691 			wait_event_interruptible(rcu_tasks_cbs_wq,
692 						 rcu_tasks_cbs_head);
693 			if (!rcu_tasks_cbs_head) {
694 				WARN_ON(signal_pending(current));
695 				schedule_timeout_interruptible(HZ/10);
696 			}
697 			continue;
698 		}
699 
700 		/*
701 		 * Wait for all pre-existing t->on_rq and t->nvcsw
702 		 * transitions to complete.  Invoking synchronize_sched()
703 		 * suffices because all these transitions occur with
704 		 * interrupts disabled.  Without this synchronize_sched(),
705 		 * a read-side critical section that started before the
706 		 * grace period might be incorrectly seen as having started
707 		 * after the grace period.
708 		 *
709 		 * This synchronize_sched() also dispenses with the
710 		 * need for a memory barrier on the first store to
711 		 * ->rcu_tasks_holdout, as it forces the store to happen
712 		 * after the beginning of the grace period.
713 		 */
714 		synchronize_sched();
715 
716 		/*
717 		 * There were callbacks, so we need to wait for an
718 		 * RCU-tasks grace period.  Start off by scanning
719 		 * the task list for tasks that are not already
720 		 * voluntarily blocked.  Mark these tasks and make
721 		 * a list of them in rcu_tasks_holdouts.
722 		 */
723 		rcu_read_lock();
724 		for_each_process_thread(g, t) {
725 			if (t != current && READ_ONCE(t->on_rq) &&
726 			    !is_idle_task(t)) {
727 				get_task_struct(t);
728 				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
729 				WRITE_ONCE(t->rcu_tasks_holdout, true);
730 				list_add(&t->rcu_tasks_holdout_list,
731 					 &rcu_tasks_holdouts);
732 			}
733 		}
734 		rcu_read_unlock();
735 
736 		/*
737 		 * Wait for tasks that are in the process of exiting.
738 		 * This does only part of the job, ensuring that all
739 		 * tasks that were previously exiting reach the point
740 		 * where they have disabled preemption, allowing the
741 		 * later synchronize_sched() to finish the job.
742 		 */
743 		synchronize_srcu(&tasks_rcu_exit_srcu);
744 
745 		/*
746 		 * Each pass through the following loop scans the list
747 		 * of holdout tasks, removing any that are no longer
748 		 * holdouts.  When the list is empty, we are done.
749 		 */
750 		lastreport = jiffies;
751 		while (!list_empty(&rcu_tasks_holdouts)) {
752 			bool firstreport;
753 			bool needreport;
754 			int rtst;
755 			struct task_struct *t1;
756 
757 			schedule_timeout_interruptible(HZ);
758 			rtst = READ_ONCE(rcu_task_stall_timeout);
759 			needreport = rtst > 0 &&
760 				     time_after(jiffies, lastreport + rtst);
761 			if (needreport)
762 				lastreport = jiffies;
763 			firstreport = true;
764 			WARN_ON(signal_pending(current));
765 			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
766 						rcu_tasks_holdout_list) {
767 				check_holdout_task(t, needreport, &firstreport);
768 				cond_resched();
769 			}
770 		}
771 
772 		/*
773 		 * Because ->on_rq and ->nvcsw are not guaranteed
774 		 * to have a full memory barriers prior to them in the
775 		 * schedule() path, memory reordering on other CPUs could
776 		 * cause their RCU-tasks read-side critical sections to
777 		 * extend past the end of the grace period.  However,
778 		 * because these ->nvcsw updates are carried out with
779 		 * interrupts disabled, we can use synchronize_sched()
780 		 * to force the needed ordering on all such CPUs.
781 		 *
782 		 * This synchronize_sched() also confines all
783 		 * ->rcu_tasks_holdout accesses to be within the grace
784 		 * period, avoiding the need for memory barriers for
785 		 * ->rcu_tasks_holdout accesses.
786 		 *
787 		 * In addition, this synchronize_sched() waits for exiting
788 		 * tasks to complete their final preempt_disable() region
789 		 * of execution, cleaning up after the synchronize_srcu()
790 		 * above.
791 		 */
792 		synchronize_sched();
793 
794 		/* Invoke the callbacks. */
795 		while (list) {
796 			next = list->next;
797 			local_bh_disable();
798 			list->func(list);
799 			local_bh_enable();
800 			list = next;
801 			cond_resched();
802 		}
803 		schedule_timeout_uninterruptible(HZ/10);
804 	}
805 }
806 
807 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
808 static void rcu_spawn_tasks_kthread(void)
809 {
810 	static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
811 	static struct task_struct *rcu_tasks_kthread_ptr;
812 	struct task_struct *t;
813 
814 	if (READ_ONCE(rcu_tasks_kthread_ptr)) {
815 		smp_mb(); /* Ensure caller sees full kthread. */
816 		return;
817 	}
818 	mutex_lock(&rcu_tasks_kthread_mutex);
819 	if (rcu_tasks_kthread_ptr) {
820 		mutex_unlock(&rcu_tasks_kthread_mutex);
821 		return;
822 	}
823 	t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
824 	BUG_ON(IS_ERR(t));
825 	smp_mb(); /* Ensure others see full kthread. */
826 	WRITE_ONCE(rcu_tasks_kthread_ptr, t);
827 	mutex_unlock(&rcu_tasks_kthread_mutex);
828 }
829 
830 #endif /* #ifdef CONFIG_TASKS_RCU */
831 
832 #ifdef CONFIG_PROVE_RCU
833 
834 /*
835  * Early boot self test parameters, one for each flavor
836  */
837 static bool rcu_self_test;
838 static bool rcu_self_test_bh;
839 static bool rcu_self_test_sched;
840 
841 module_param(rcu_self_test, bool, 0444);
842 module_param(rcu_self_test_bh, bool, 0444);
843 module_param(rcu_self_test_sched, bool, 0444);
844 
845 static int rcu_self_test_counter;
846 
847 static void test_callback(struct rcu_head *r)
848 {
849 	rcu_self_test_counter++;
850 	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
851 }
852 
853 static void early_boot_test_call_rcu(void)
854 {
855 	static struct rcu_head head;
856 
857 	call_rcu(&head, test_callback);
858 }
859 
860 static void early_boot_test_call_rcu_bh(void)
861 {
862 	static struct rcu_head head;
863 
864 	call_rcu_bh(&head, test_callback);
865 }
866 
867 static void early_boot_test_call_rcu_sched(void)
868 {
869 	static struct rcu_head head;
870 
871 	call_rcu_sched(&head, test_callback);
872 }
873 
874 void rcu_early_boot_tests(void)
875 {
876 	pr_info("Running RCU self tests\n");
877 
878 	if (rcu_self_test)
879 		early_boot_test_call_rcu();
880 	if (rcu_self_test_bh)
881 		early_boot_test_call_rcu_bh();
882 	if (rcu_self_test_sched)
883 		early_boot_test_call_rcu_sched();
884 }
885 
886 static int rcu_verify_early_boot_tests(void)
887 {
888 	int ret = 0;
889 	int early_boot_test_counter = 0;
890 
891 	if (rcu_self_test) {
892 		early_boot_test_counter++;
893 		rcu_barrier();
894 	}
895 	if (rcu_self_test_bh) {
896 		early_boot_test_counter++;
897 		rcu_barrier_bh();
898 	}
899 	if (rcu_self_test_sched) {
900 		early_boot_test_counter++;
901 		rcu_barrier_sched();
902 	}
903 
904 	if (rcu_self_test_counter != early_boot_test_counter) {
905 		WARN_ON(1);
906 		ret = -1;
907 	}
908 
909 	return ret;
910 }
911 late_initcall(rcu_verify_early_boot_tests);
912 #else
913 void rcu_early_boot_tests(void) {}
914 #endif /* CONFIG_PROVE_RCU */
915