xref: /openbmc/linux/kernel/rcu/tasks.h (revision dfc66bef)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Task-based RCU implementations.
4  *
5  * Copyright (C) 2020 Paul E. McKenney
6  */
7 
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 
10 ////////////////////////////////////////////////////////////////////////
11 //
12 // Generic data structures.
13 
14 struct rcu_tasks;
15 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16 typedef void (*pregp_func_t)(void);
17 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18 typedef void (*postscan_func_t)(struct list_head *hop);
19 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
21 
22 /**
23  * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
24  * @cbs_head: Head of callback list.
25  * @cbs_tail: Tail pointer for callback list.
26  * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
27  * @cbs_lock: Lock protecting callback list.
28  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29  * @gp_func: This flavor's grace-period-wait function.
30  * @gp_state: Grace period's most recent state transition (debugging).
31  * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
32  * @init_fract: Initial backoff sleep interval.
33  * @gp_jiffies: Time of last @gp_state transition.
34  * @gp_start: Most recent grace-period start in jiffies.
35  * @n_gps: Number of grace periods completed since boot.
36  * @n_ipis: Number of IPIs sent to encourage grace periods to end.
37  * @n_ipis_fails: Number of IPI-send failures.
38  * @pregp_func: This flavor's pre-grace-period function (optional).
39  * @pertask_func: This flavor's per-task scan function (optional).
40  * @postscan_func: This flavor's post-task scan function (optional).
41  * @holdouts_func: This flavor's holdout-list scan function (optional).
42  * @postgp_func: This flavor's post-grace-period function (optional).
43  * @call_func: This flavor's call_rcu()-equivalent function.
44  * @name: This flavor's textual name.
45  * @kname: This flavor's kthread name.
46  */
47 struct rcu_tasks {
48 	struct rcu_head *cbs_head;
49 	struct rcu_head **cbs_tail;
50 	struct wait_queue_head cbs_wq;
51 	raw_spinlock_t cbs_lock;
52 	int gp_state;
53 	int gp_sleep;
54 	int init_fract;
55 	unsigned long gp_jiffies;
56 	unsigned long gp_start;
57 	unsigned long n_gps;
58 	unsigned long n_ipis;
59 	unsigned long n_ipis_fails;
60 	struct task_struct *kthread_ptr;
61 	rcu_tasks_gp_func_t gp_func;
62 	pregp_func_t pregp_func;
63 	pertask_func_t pertask_func;
64 	postscan_func_t postscan_func;
65 	holdouts_func_t holdouts_func;
66 	postgp_func_t postgp_func;
67 	call_rcu_func_t call_func;
68 	char *name;
69 	char *kname;
70 };
71 
72 #define DEFINE_RCU_TASKS(rt_name, gp, call, n)				\
73 static struct rcu_tasks rt_name =					\
74 {									\
75 	.cbs_tail = &rt_name.cbs_head,					\
76 	.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq),	\
77 	.cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock),		\
78 	.gp_func = gp,							\
79 	.call_func = call,						\
80 	.name = n,							\
81 	.kname = #rt_name,						\
82 }
83 
84 /* Track exiting tasks in order to allow them to be waited for. */
85 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86 
87 /* Avoid IPIing CPUs early in the grace period. */
88 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
89 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90 module_param(rcu_task_ipi_delay, int, 0644);
91 
92 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
93 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95 module_param(rcu_task_stall_timeout, int, 0644);
96 
97 /* RCU tasks grace-period state for debugging. */
98 #define RTGS_INIT		 0
99 #define RTGS_WAIT_WAIT_CBS	 1
100 #define RTGS_WAIT_GP		 2
101 #define RTGS_PRE_WAIT_GP	 3
102 #define RTGS_SCAN_TASKLIST	 4
103 #define RTGS_POST_SCAN_TASKLIST	 5
104 #define RTGS_WAIT_SCAN_HOLDOUTS	 6
105 #define RTGS_SCAN_HOLDOUTS	 7
106 #define RTGS_POST_GP		 8
107 #define RTGS_WAIT_READERS	 9
108 #define RTGS_INVOKE_CBS		10
109 #define RTGS_WAIT_CBS		11
110 #ifndef CONFIG_TINY_RCU
111 static const char * const rcu_tasks_gp_state_names[] = {
112 	"RTGS_INIT",
113 	"RTGS_WAIT_WAIT_CBS",
114 	"RTGS_WAIT_GP",
115 	"RTGS_PRE_WAIT_GP",
116 	"RTGS_SCAN_TASKLIST",
117 	"RTGS_POST_SCAN_TASKLIST",
118 	"RTGS_WAIT_SCAN_HOLDOUTS",
119 	"RTGS_SCAN_HOLDOUTS",
120 	"RTGS_POST_GP",
121 	"RTGS_WAIT_READERS",
122 	"RTGS_INVOKE_CBS",
123 	"RTGS_WAIT_CBS",
124 };
125 #endif /* #ifndef CONFIG_TINY_RCU */
126 
127 ////////////////////////////////////////////////////////////////////////
128 //
129 // Generic code.
130 
131 /* Record grace-period phase and time. */
132 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133 {
134 	rtp->gp_state = newstate;
135 	rtp->gp_jiffies = jiffies;
136 }
137 
138 #ifndef CONFIG_TINY_RCU
139 /* Return state name. */
140 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141 {
142 	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 	int j = READ_ONCE(i); // Prevent the compiler from reading twice
144 
145 	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 		return "???";
147 	return rcu_tasks_gp_state_names[j];
148 }
149 #endif /* #ifndef CONFIG_TINY_RCU */
150 
151 // Enqueue a callback for the specified flavor of Tasks RCU.
152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 				   struct rcu_tasks *rtp)
154 {
155 	unsigned long flags;
156 	bool needwake;
157 
158 	rhp->next = NULL;
159 	rhp->func = func;
160 	raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 	needwake = !rtp->cbs_head;
162 	WRITE_ONCE(*rtp->cbs_tail, rhp);
163 	rtp->cbs_tail = &rhp->next;
164 	raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
165 	/* We can't create the thread unless interrupts are enabled. */
166 	if (needwake && READ_ONCE(rtp->kthread_ptr))
167 		wake_up(&rtp->cbs_wq);
168 }
169 
170 // Wait for a grace period for the specified flavor of Tasks RCU.
171 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
172 {
173 	/* Complain if the scheduler has not started.  */
174 	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 			 "synchronize_rcu_tasks called too soon");
176 
177 	/* Wait for the grace period. */
178 	wait_rcu_gp(rtp->call_func);
179 }
180 
181 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
182 static int __noreturn rcu_tasks_kthread(void *arg)
183 {
184 	unsigned long flags;
185 	struct rcu_head *list;
186 	struct rcu_head *next;
187 	struct rcu_tasks *rtp = arg;
188 
189 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
190 	housekeeping_affine(current, HK_FLAG_RCU);
191 	WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
192 
193 	/*
194 	 * Each pass through the following loop makes one check for
195 	 * newly arrived callbacks, and, if there are some, waits for
196 	 * one RCU-tasks grace period and then invokes the callbacks.
197 	 * This loop is terminated by the system going down.  ;-)
198 	 */
199 	for (;;) {
200 		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
201 
202 		/* Pick up any new callbacks. */
203 		raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
204 		smp_mb__after_spinlock(); // Order updates vs. GP.
205 		list = rtp->cbs_head;
206 		rtp->cbs_head = NULL;
207 		rtp->cbs_tail = &rtp->cbs_head;
208 		raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
209 
210 		/* If there were none, wait a bit and start over. */
211 		if (!list) {
212 			wait_event_interruptible(rtp->cbs_wq,
213 						 READ_ONCE(rtp->cbs_head));
214 			if (!rtp->cbs_head) {
215 				WARN_ON(signal_pending(current));
216 				set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
217 				schedule_timeout_idle(HZ/10);
218 			}
219 			continue;
220 		}
221 
222 		// Wait for one grace period.
223 		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
224 		rtp->gp_start = jiffies;
225 		rtp->gp_func(rtp);
226 		rtp->n_gps++;
227 
228 		/* Invoke the callbacks. */
229 		set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
230 		while (list) {
231 			next = list->next;
232 			local_bh_disable();
233 			list->func(list);
234 			local_bh_enable();
235 			list = next;
236 			cond_resched();
237 		}
238 		/* Paranoid sleep to keep this from entering a tight loop */
239 		schedule_timeout_idle(rtp->gp_sleep);
240 	}
241 }
242 
243 /* Spawn RCU-tasks grace-period kthread. */
244 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
245 {
246 	struct task_struct *t;
247 
248 	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
249 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
250 		return;
251 	smp_mb(); /* Ensure others see full kthread. */
252 }
253 
254 #ifndef CONFIG_TINY_RCU
255 
256 /*
257  * Print any non-default Tasks RCU settings.
258  */
259 static void __init rcu_tasks_bootup_oddness(void)
260 {
261 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
262 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
263 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
264 #endif /* #ifdef CONFIG_TASKS_RCU */
265 #ifdef CONFIG_TASKS_RCU
266 	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
267 #endif /* #ifdef CONFIG_TASKS_RCU */
268 #ifdef CONFIG_TASKS_RUDE_RCU
269 	pr_info("\tRude variant of Tasks RCU enabled.\n");
270 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
271 #ifdef CONFIG_TASKS_TRACE_RCU
272 	pr_info("\tTracing variant of Tasks RCU enabled.\n");
273 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
274 }
275 
276 #endif /* #ifndef CONFIG_TINY_RCU */
277 
278 #ifndef CONFIG_TINY_RCU
279 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
280 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
281 {
282 	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
283 		rtp->kname,
284 		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
285 		jiffies - data_race(rtp->gp_jiffies),
286 		data_race(rtp->n_gps),
287 		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
288 		".k"[!!data_race(rtp->kthread_ptr)],
289 		".C"[!!data_race(rtp->cbs_head)],
290 		s);
291 }
292 #endif // #ifndef CONFIG_TINY_RCU
293 
294 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
295 
296 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
297 
298 ////////////////////////////////////////////////////////////////////////
299 //
300 // Shared code between task-list-scanning variants of Tasks RCU.
301 
302 /* Wait for one RCU-tasks grace period. */
303 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
304 {
305 	struct task_struct *g, *t;
306 	unsigned long lastreport;
307 	LIST_HEAD(holdouts);
308 	int fract;
309 
310 	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
311 	rtp->pregp_func();
312 
313 	/*
314 	 * There were callbacks, so we need to wait for an RCU-tasks
315 	 * grace period.  Start off by scanning the task list for tasks
316 	 * that are not already voluntarily blocked.  Mark these tasks
317 	 * and make a list of them in holdouts.
318 	 */
319 	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
320 	rcu_read_lock();
321 	for_each_process_thread(g, t)
322 		rtp->pertask_func(t, &holdouts);
323 	rcu_read_unlock();
324 
325 	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
326 	rtp->postscan_func(&holdouts);
327 
328 	/*
329 	 * Each pass through the following loop scans the list of holdout
330 	 * tasks, removing any that are no longer holdouts.  When the list
331 	 * is empty, we are done.
332 	 */
333 	lastreport = jiffies;
334 
335 	// Start off with initial wait and slowly back off to 1 HZ wait.
336 	fract = rtp->init_fract;
337 
338 	while (!list_empty(&holdouts)) {
339 		bool firstreport;
340 		bool needreport;
341 		int rtst;
342 
343 		/* Slowly back off waiting for holdouts */
344 		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
345 		schedule_timeout_idle(fract);
346 
347 		if (fract < HZ)
348 			fract++;
349 
350 		rtst = READ_ONCE(rcu_task_stall_timeout);
351 		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
352 		if (needreport)
353 			lastreport = jiffies;
354 		firstreport = true;
355 		WARN_ON(signal_pending(current));
356 		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
357 		rtp->holdouts_func(&holdouts, needreport, &firstreport);
358 	}
359 
360 	set_tasks_gp_state(rtp, RTGS_POST_GP);
361 	rtp->postgp_func(rtp);
362 }
363 
364 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
365 
366 #ifdef CONFIG_TASKS_RCU
367 
368 ////////////////////////////////////////////////////////////////////////
369 //
370 // Simple variant of RCU whose quiescent states are voluntary context
371 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
372 // As such, grace periods can take one good long time.  There are no
373 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
374 // because this implementation is intended to get the system into a safe
375 // state for some of the manipulations involved in tracing and the like.
376 // Finally, this implementation does not support high call_rcu_tasks()
377 // rates from multiple CPUs.  If this is required, per-CPU callback lists
378 // will be needed.
379 //
380 // The implementation uses rcu_tasks_wait_gp(), which relies on function
381 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
382 // function sets these function pointers up so that rcu_tasks_wait_gp()
383 // invokes these functions in this order:
384 //
385 // rcu_tasks_pregp_step():
386 //	Invokes synchronize_rcu() in order to wait for all in-flight
387 //	t->on_rq and t->nvcsw transitions to complete.	This works because
388 //	all such transitions are carried out with interrupts disabled.
389 // rcu_tasks_pertask(), invoked on every non-idle task:
390 //	For every runnable non-idle task other than the current one, use
391 //	get_task_struct() to pin down that task, snapshot that task's
392 //	number of voluntary context switches, and add that task to the
393 //	holdout list.
394 // rcu_tasks_postscan():
395 //	Invoke synchronize_srcu() to ensure that all tasks that were
396 //	in the process of exiting (and which thus might not know to
397 //	synchronize with this RCU Tasks grace period) have completed
398 //	exiting.
399 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
400 //	Scans the holdout list, attempting to identify a quiescent state
401 //	for each task on the list.  If there is a quiescent state, the
402 //	corresponding task is removed from the holdout list.
403 // rcu_tasks_postgp():
404 //	Invokes synchronize_rcu() in order to ensure that all prior
405 //	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
406 //	to have happened before the end of this RCU Tasks grace period.
407 //	Again, this works because all such transitions are carried out
408 //	with interrupts disabled.
409 //
410 // For each exiting task, the exit_tasks_rcu_start() and
411 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
412 // read-side critical sections waited for by rcu_tasks_postscan().
413 //
414 // Pre-grace-period update-side code is ordered before the grace via the
415 // ->cbs_lock and the smp_mb__after_spinlock().  Pre-grace-period read-side
416 // code is ordered before the grace period via synchronize_rcu() call
417 // in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
418 // disabling.
419 
420 /* Pre-grace-period preparation. */
421 static void rcu_tasks_pregp_step(void)
422 {
423 	/*
424 	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
425 	 * to complete.  Invoking synchronize_rcu() suffices because all
426 	 * these transitions occur with interrupts disabled.  Without this
427 	 * synchronize_rcu(), a read-side critical section that started
428 	 * before the grace period might be incorrectly seen as having
429 	 * started after the grace period.
430 	 *
431 	 * This synchronize_rcu() also dispenses with the need for a
432 	 * memory barrier on the first store to t->rcu_tasks_holdout,
433 	 * as it forces the store to happen after the beginning of the
434 	 * grace period.
435 	 */
436 	synchronize_rcu();
437 }
438 
439 /* Per-task initial processing. */
440 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
441 {
442 	if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
443 		get_task_struct(t);
444 		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
445 		WRITE_ONCE(t->rcu_tasks_holdout, true);
446 		list_add(&t->rcu_tasks_holdout_list, hop);
447 	}
448 }
449 
450 /* Processing between scanning taskslist and draining the holdout list. */
451 static void rcu_tasks_postscan(struct list_head *hop)
452 {
453 	/*
454 	 * Wait for tasks that are in the process of exiting.  This
455 	 * does only part of the job, ensuring that all tasks that were
456 	 * previously exiting reach the point where they have disabled
457 	 * preemption, allowing the later synchronize_rcu() to finish
458 	 * the job.
459 	 */
460 	synchronize_srcu(&tasks_rcu_exit_srcu);
461 }
462 
463 /* See if tasks are still holding out, complain if so. */
464 static void check_holdout_task(struct task_struct *t,
465 			       bool needreport, bool *firstreport)
466 {
467 	int cpu;
468 
469 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
470 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
471 	    !READ_ONCE(t->on_rq) ||
472 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
473 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
474 		WRITE_ONCE(t->rcu_tasks_holdout, false);
475 		list_del_init(&t->rcu_tasks_holdout_list);
476 		put_task_struct(t);
477 		return;
478 	}
479 	rcu_request_urgent_qs_task(t);
480 	if (!needreport)
481 		return;
482 	if (*firstreport) {
483 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
484 		*firstreport = false;
485 	}
486 	cpu = task_cpu(t);
487 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
488 		 t, ".I"[is_idle_task(t)],
489 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
490 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
491 		 t->rcu_tasks_idle_cpu, cpu);
492 	sched_show_task(t);
493 }
494 
495 /* Scan the holdout lists for tasks no longer holding out. */
496 static void check_all_holdout_tasks(struct list_head *hop,
497 				    bool needreport, bool *firstreport)
498 {
499 	struct task_struct *t, *t1;
500 
501 	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
502 		check_holdout_task(t, needreport, firstreport);
503 		cond_resched();
504 	}
505 }
506 
507 /* Finish off the Tasks-RCU grace period. */
508 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
509 {
510 	/*
511 	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
512 	 * memory barriers prior to them in the schedule() path, memory
513 	 * reordering on other CPUs could cause their RCU-tasks read-side
514 	 * critical sections to extend past the end of the grace period.
515 	 * However, because these ->nvcsw updates are carried out with
516 	 * interrupts disabled, we can use synchronize_rcu() to force the
517 	 * needed ordering on all such CPUs.
518 	 *
519 	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
520 	 * accesses to be within the grace period, avoiding the need for
521 	 * memory barriers for ->rcu_tasks_holdout accesses.
522 	 *
523 	 * In addition, this synchronize_rcu() waits for exiting tasks
524 	 * to complete their final preempt_disable() region of execution,
525 	 * cleaning up after the synchronize_srcu() above.
526 	 */
527 	synchronize_rcu();
528 }
529 
530 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
531 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
532 
533 /**
534  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
535  * @rhp: structure to be used for queueing the RCU updates.
536  * @func: actual callback function to be invoked after the grace period
537  *
538  * The callback function will be invoked some time after a full grace
539  * period elapses, in other words after all currently executing RCU
540  * read-side critical sections have completed. call_rcu_tasks() assumes
541  * that the read-side critical sections end at a voluntary context
542  * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
543  * or transition to usermode execution.  As such, there are no read-side
544  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
545  * this primitive is intended to determine that all tasks have passed
546  * through a safe state, not so much for data-structure synchronization.
547  *
548  * See the description of call_rcu() for more detailed information on
549  * memory ordering guarantees.
550  */
551 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
552 {
553 	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
554 }
555 EXPORT_SYMBOL_GPL(call_rcu_tasks);
556 
557 /**
558  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
559  *
560  * Control will return to the caller some time after a full rcu-tasks
561  * grace period has elapsed, in other words after all currently
562  * executing rcu-tasks read-side critical sections have elapsed.  These
563  * read-side critical sections are delimited by calls to schedule(),
564  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
565  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
566  *
567  * This is a very specialized primitive, intended only for a few uses in
568  * tracing and other situations requiring manipulation of function
569  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
570  * is not (yet) intended for heavy use from multiple CPUs.
571  *
572  * See the description of synchronize_rcu() for more detailed information
573  * on memory ordering guarantees.
574  */
575 void synchronize_rcu_tasks(void)
576 {
577 	synchronize_rcu_tasks_generic(&rcu_tasks);
578 }
579 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
580 
581 /**
582  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
583  *
584  * Although the current implementation is guaranteed to wait, it is not
585  * obligated to, for example, if there are no pending callbacks.
586  */
587 void rcu_barrier_tasks(void)
588 {
589 	/* There is only one callback queue, so this is easy.  ;-) */
590 	synchronize_rcu_tasks();
591 }
592 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
593 
594 static int __init rcu_spawn_tasks_kthread(void)
595 {
596 	rcu_tasks.gp_sleep = HZ / 10;
597 	rcu_tasks.init_fract = HZ / 10;
598 	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
599 	rcu_tasks.pertask_func = rcu_tasks_pertask;
600 	rcu_tasks.postscan_func = rcu_tasks_postscan;
601 	rcu_tasks.holdouts_func = check_all_holdout_tasks;
602 	rcu_tasks.postgp_func = rcu_tasks_postgp;
603 	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
604 	return 0;
605 }
606 
607 #if !defined(CONFIG_TINY_RCU)
608 void show_rcu_tasks_classic_gp_kthread(void)
609 {
610 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
611 }
612 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
613 #endif // !defined(CONFIG_TINY_RCU)
614 
615 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
616 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
617 {
618 	preempt_disable();
619 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
620 	preempt_enable();
621 }
622 
623 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
624 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
625 {
626 	struct task_struct *t = current;
627 
628 	preempt_disable();
629 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
630 	preempt_enable();
631 	exit_tasks_rcu_finish_trace(t);
632 }
633 
634 #else /* #ifdef CONFIG_TASKS_RCU */
635 void exit_tasks_rcu_start(void) { }
636 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
637 #endif /* #else #ifdef CONFIG_TASKS_RCU */
638 
639 #ifdef CONFIG_TASKS_RUDE_RCU
640 
641 ////////////////////////////////////////////////////////////////////////
642 //
643 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
644 // passing an empty function to schedule_on_each_cpu().  This approach
645 // provides an asynchronous call_rcu_tasks_rude() API and batching of
646 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
647 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
648 // and induces otherwise unnecessary context switches on all online CPUs,
649 // whether idle or not.
650 //
651 // Callback handling is provided by the rcu_tasks_kthread() function.
652 //
653 // Ordering is provided by the scheduler's context-switch code.
654 
655 // Empty function to allow workqueues to force a context switch.
656 static void rcu_tasks_be_rude(struct work_struct *work)
657 {
658 }
659 
660 // Wait for one rude RCU-tasks grace period.
661 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
662 {
663 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
664 	schedule_on_each_cpu(rcu_tasks_be_rude);
665 }
666 
667 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
668 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
669 		 "RCU Tasks Rude");
670 
671 /**
672  * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
673  * @rhp: structure to be used for queueing the RCU updates.
674  * @func: actual callback function to be invoked after the grace period
675  *
676  * The callback function will be invoked some time after a full grace
677  * period elapses, in other words after all currently executing RCU
678  * read-side critical sections have completed. call_rcu_tasks_rude()
679  * assumes that the read-side critical sections end at context switch,
680  * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
681  * usermode execution is schedulable). As such, there are no read-side
682  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
683  * this primitive is intended to determine that all tasks have passed
684  * through a safe state, not so much for data-structure synchronization.
685  *
686  * See the description of call_rcu() for more detailed information on
687  * memory ordering guarantees.
688  */
689 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
690 {
691 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
692 }
693 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
694 
695 /**
696  * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
697  *
698  * Control will return to the caller some time after a rude rcu-tasks
699  * grace period has elapsed, in other words after all currently
700  * executing rcu-tasks read-side critical sections have elapsed.  These
701  * read-side critical sections are delimited by calls to schedule(),
702  * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
703  * context), and (in theory, anyway) cond_resched().
704  *
705  * This is a very specialized primitive, intended only for a few uses in
706  * tracing and other situations requiring manipulation of function preambles
707  * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
708  * (yet) intended for heavy use from multiple CPUs.
709  *
710  * See the description of synchronize_rcu() for more detailed information
711  * on memory ordering guarantees.
712  */
713 void synchronize_rcu_tasks_rude(void)
714 {
715 	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
716 }
717 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
718 
719 /**
720  * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
721  *
722  * Although the current implementation is guaranteed to wait, it is not
723  * obligated to, for example, if there are no pending callbacks.
724  */
725 void rcu_barrier_tasks_rude(void)
726 {
727 	/* There is only one callback queue, so this is easy.  ;-) */
728 	synchronize_rcu_tasks_rude();
729 }
730 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
731 
732 static int __init rcu_spawn_tasks_rude_kthread(void)
733 {
734 	rcu_tasks_rude.gp_sleep = HZ / 10;
735 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
736 	return 0;
737 }
738 
739 #if !defined(CONFIG_TINY_RCU)
740 void show_rcu_tasks_rude_gp_kthread(void)
741 {
742 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
743 }
744 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
745 #endif // !defined(CONFIG_TINY_RCU)
746 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
747 
748 ////////////////////////////////////////////////////////////////////////
749 //
750 // Tracing variant of Tasks RCU.  This variant is designed to be used
751 // to protect tracing hooks, including those of BPF.  This variant
752 // therefore:
753 //
754 // 1.	Has explicit read-side markers to allow finite grace periods
755 //	in the face of in-kernel loops for PREEMPT=n builds.
756 //
757 // 2.	Protects code in the idle loop, exception entry/exit, and
758 //	CPU-hotplug code paths, similar to the capabilities of SRCU.
759 //
760 // 3.	Avoids expensive read-side instructions, having overhead similar
761 //	to that of Preemptible RCU.
762 //
763 // There are of course downsides.  The grace-period code can send IPIs to
764 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
765 // It is necessary to scan the full tasklist, much as for Tasks RCU.  There
766 // is a single callback queue guarded by a single lock, again, much as for
767 // Tasks RCU.  If needed, these downsides can be at least partially remedied.
768 //
769 // Perhaps most important, this variant of RCU does not affect the vanilla
770 // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
771 // readers can operate from idle, offline, and exception entry/exit in no
772 // way allows rcu_preempt and rcu_sched readers to also do so.
773 //
774 // The implementation uses rcu_tasks_wait_gp(), which relies on function
775 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
776 // function sets these function pointers up so that rcu_tasks_wait_gp()
777 // invokes these functions in this order:
778 //
779 // rcu_tasks_trace_pregp_step():
780 //	Initialize the count of readers and block CPU-hotplug operations.
781 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
782 //	Initialize per-task state and attempt to identify an immediate
783 //	quiescent state for that task, or, failing that, attempt to
784 //	set that task's .need_qs flag so that task's next outermost
785 //	rcu_read_unlock_trace() will report the quiescent state (in which
786 //	case the count of readers is incremented).  If both attempts fail,
787 //	the task is added to a "holdout" list.  Note that IPIs are used
788 //	to invoke trc_read_check_handler() in the context of running tasks
789 //	in order to avoid ordering overhead on common-case shared-variable
790 //	accessses.
791 // rcu_tasks_trace_postscan():
792 //	Initialize state and attempt to identify an immediate quiescent
793 //	state as above (but only for idle tasks), unblock CPU-hotplug
794 //	operations, and wait for an RCU grace period to avoid races with
795 //	tasks that are in the process of exiting.
796 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
797 //	Scans the holdout list, attempting to identify a quiescent state
798 //	for each task on the list.  If there is a quiescent state, the
799 //	corresponding task is removed from the holdout list.
800 // rcu_tasks_trace_postgp():
801 //	Wait for the count of readers do drop to zero, reporting any stalls.
802 //	Also execute full memory barriers to maintain ordering with code
803 //	executing after the grace period.
804 //
805 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
806 //
807 // Pre-grace-period update-side code is ordered before the grace
808 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
809 // Pre-grace-period read-side code is ordered before the grace period by
810 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
811 // scheduler context-switch ordering (for locked-down non-running readers).
812 
813 // The lockdep state must be outside of #ifdef to be useful.
814 #ifdef CONFIG_DEBUG_LOCK_ALLOC
815 static struct lock_class_key rcu_lock_trace_key;
816 struct lockdep_map rcu_trace_lock_map =
817 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
818 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
819 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
820 
821 #ifdef CONFIG_TASKS_TRACE_RCU
822 
823 static atomic_t trc_n_readers_need_end;		// Number of waited-for readers.
824 static DECLARE_WAIT_QUEUE_HEAD(trc_wait);	// List of holdout tasks.
825 
826 // Record outstanding IPIs to each CPU.  No point in sending two...
827 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
828 
829 // The number of detections of task quiescent state relying on
830 // heavyweight readers executing explicit memory barriers.
831 static unsigned long n_heavy_reader_attempts;
832 static unsigned long n_heavy_reader_updates;
833 static unsigned long n_heavy_reader_ofl_updates;
834 
835 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
836 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
837 		 "RCU Tasks Trace");
838 
839 /*
840  * This irq_work handler allows rcu_read_unlock_trace() to be invoked
841  * while the scheduler locks are held.
842  */
843 static void rcu_read_unlock_iw(struct irq_work *iwp)
844 {
845 	wake_up(&trc_wait);
846 }
847 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
848 
849 /* If we are the last reader, wake up the grace-period kthread. */
850 void rcu_read_unlock_trace_special(struct task_struct *t)
851 {
852 	int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
853 
854 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
855 	    t->trc_reader_special.b.need_mb)
856 		smp_mb(); // Pairs with update-side barriers.
857 	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
858 	if (nq)
859 		WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
860 	WRITE_ONCE(t->trc_reader_nesting, 0);
861 	if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
862 		irq_work_queue(&rcu_tasks_trace_iw);
863 }
864 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
865 
866 /* Add a task to the holdout list, if it is not already on the list. */
867 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
868 {
869 	if (list_empty(&t->trc_holdout_list)) {
870 		get_task_struct(t);
871 		list_add(&t->trc_holdout_list, bhp);
872 	}
873 }
874 
875 /* Remove a task from the holdout list, if it is in fact present. */
876 static void trc_del_holdout(struct task_struct *t)
877 {
878 	if (!list_empty(&t->trc_holdout_list)) {
879 		list_del_init(&t->trc_holdout_list);
880 		put_task_struct(t);
881 	}
882 }
883 
884 /* IPI handler to check task state. */
885 static void trc_read_check_handler(void *t_in)
886 {
887 	struct task_struct *t = current;
888 	struct task_struct *texp = t_in;
889 
890 	// If the task is no longer running on this CPU, leave.
891 	if (unlikely(texp != t)) {
892 		goto reset_ipi; // Already on holdout list, so will check later.
893 	}
894 
895 	// If the task is not in a read-side critical section, and
896 	// if this is the last reader, awaken the grace-period kthread.
897 	if (likely(!READ_ONCE(t->trc_reader_nesting))) {
898 		WRITE_ONCE(t->trc_reader_checked, true);
899 		goto reset_ipi;
900 	}
901 	// If we are racing with an rcu_read_unlock_trace(), try again later.
902 	if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
903 		goto reset_ipi;
904 	WRITE_ONCE(t->trc_reader_checked, true);
905 
906 	// Get here if the task is in a read-side critical section.  Set
907 	// its state so that it will awaken the grace-period kthread upon
908 	// exit from that critical section.
909 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
910 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
911 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
912 
913 reset_ipi:
914 	// Allow future IPIs to be sent on CPU and for task.
915 	// Also order this IPI handler against any later manipulations of
916 	// the intended task.
917 	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
918 	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
919 }
920 
921 /* Callback function for scheduler to check locked-down task.  */
922 static int trc_inspect_reader(struct task_struct *t, void *arg)
923 {
924 	int cpu = task_cpu(t);
925 	int nesting;
926 	bool ofl = cpu_is_offline(cpu);
927 
928 	if (task_curr(t)) {
929 		WARN_ON_ONCE(ofl && !is_idle_task(t));
930 
931 		// If no chance of heavyweight readers, do it the hard way.
932 		if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
933 			return -EINVAL;
934 
935 		// If heavyweight readers are enabled on the remote task,
936 		// we can inspect its state despite its currently running.
937 		// However, we cannot safely change its state.
938 		n_heavy_reader_attempts++;
939 		if (!ofl && // Check for "running" idle tasks on offline CPUs.
940 		    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
941 			return -EINVAL; // No quiescent state, do it the hard way.
942 		n_heavy_reader_updates++;
943 		if (ofl)
944 			n_heavy_reader_ofl_updates++;
945 		nesting = 0;
946 	} else {
947 		// The task is not running, so C-language access is safe.
948 		nesting = t->trc_reader_nesting;
949 	}
950 
951 	// If not exiting a read-side critical section, mark as checked
952 	// so that the grace-period kthread will remove it from the
953 	// holdout list.
954 	t->trc_reader_checked = nesting >= 0;
955 	if (nesting <= 0)
956 		return nesting ? -EINVAL : 0;  // If in QS, done, otherwise try again later.
957 
958 	// The task is in a read-side critical section, so set up its
959 	// state so that it will awaken the grace-period kthread upon exit
960 	// from that critical section.
961 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
962 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
963 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
964 	return 0;
965 }
966 
967 /* Attempt to extract the state for the specified task. */
968 static void trc_wait_for_one_reader(struct task_struct *t,
969 				    struct list_head *bhp)
970 {
971 	int cpu;
972 
973 	// If a previous IPI is still in flight, let it complete.
974 	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
975 		return;
976 
977 	// The current task had better be in a quiescent state.
978 	if (t == current) {
979 		t->trc_reader_checked = true;
980 		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
981 		return;
982 	}
983 
984 	// Attempt to nail down the task for inspection.
985 	get_task_struct(t);
986 	if (!task_call_func(t, trc_inspect_reader, NULL)) {
987 		put_task_struct(t);
988 		return;
989 	}
990 	put_task_struct(t);
991 
992 	// If this task is not yet on the holdout list, then we are in
993 	// an RCU read-side critical section.  Otherwise, the invocation of
994 	// trc_add_holdout() that added it to the list did the necessary
995 	// get_task_struct().  Either way, the task cannot be freed out
996 	// from under this code.
997 
998 	// If currently running, send an IPI, either way, add to list.
999 	trc_add_holdout(t, bhp);
1000 	if (task_curr(t) &&
1001 	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1002 		// The task is currently running, so try IPIing it.
1003 		cpu = task_cpu(t);
1004 
1005 		// If there is already an IPI outstanding, let it happen.
1006 		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1007 			return;
1008 
1009 		per_cpu(trc_ipi_to_cpu, cpu) = true;
1010 		t->trc_ipi_to_cpu = cpu;
1011 		rcu_tasks_trace.n_ipis++;
1012 		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1013 			// Just in case there is some other reason for
1014 			// failure than the target CPU being offline.
1015 			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1016 				  __func__, cpu);
1017 			rcu_tasks_trace.n_ipis_fails++;
1018 			per_cpu(trc_ipi_to_cpu, cpu) = false;
1019 			t->trc_ipi_to_cpu = -1;
1020 		}
1021 	}
1022 }
1023 
1024 /* Initialize for a new RCU-tasks-trace grace period. */
1025 static void rcu_tasks_trace_pregp_step(void)
1026 {
1027 	int cpu;
1028 
1029 	// Allow for fast-acting IPIs.
1030 	atomic_set(&trc_n_readers_need_end, 1);
1031 
1032 	// There shouldn't be any old IPIs, but...
1033 	for_each_possible_cpu(cpu)
1034 		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1035 
1036 	// Disable CPU hotplug across the tasklist scan.
1037 	// This also waits for all readers in CPU-hotplug code paths.
1038 	cpus_read_lock();
1039 }
1040 
1041 /* Do first-round processing for the specified task. */
1042 static void rcu_tasks_trace_pertask(struct task_struct *t,
1043 				    struct list_head *hop)
1044 {
1045 	// During early boot when there is only the one boot CPU, there
1046 	// is no idle task for the other CPUs. Just return.
1047 	if (unlikely(t == NULL))
1048 		return;
1049 
1050 	WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1051 	WRITE_ONCE(t->trc_reader_checked, false);
1052 	t->trc_ipi_to_cpu = -1;
1053 	trc_wait_for_one_reader(t, hop);
1054 }
1055 
1056 /*
1057  * Do intermediate processing between task and holdout scans and
1058  * pick up the idle tasks.
1059  */
1060 static void rcu_tasks_trace_postscan(struct list_head *hop)
1061 {
1062 	int cpu;
1063 
1064 	for_each_possible_cpu(cpu)
1065 		rcu_tasks_trace_pertask(idle_task(cpu), hop);
1066 
1067 	// Re-enable CPU hotplug now that the tasklist scan has completed.
1068 	cpus_read_unlock();
1069 
1070 	// Wait for late-stage exiting tasks to finish exiting.
1071 	// These might have passed the call to exit_tasks_rcu_finish().
1072 	synchronize_rcu();
1073 	// Any tasks that exit after this point will set ->trc_reader_checked.
1074 }
1075 
1076 /* Show the state of a task stalling the current RCU tasks trace GP. */
1077 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1078 {
1079 	int cpu;
1080 
1081 	if (*firstreport) {
1082 		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1083 		*firstreport = false;
1084 	}
1085 	// FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1086 	cpu = task_cpu(t);
1087 	pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1088 		 t->pid,
1089 		 ".I"[READ_ONCE(t->trc_ipi_to_cpu) >= 0],
1090 		 ".i"[is_idle_task(t)],
1091 		 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1092 		 READ_ONCE(t->trc_reader_nesting),
1093 		 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
1094 		 cpu);
1095 	sched_show_task(t);
1096 }
1097 
1098 /* List stalled IPIs for RCU tasks trace. */
1099 static void show_stalled_ipi_trace(void)
1100 {
1101 	int cpu;
1102 
1103 	for_each_possible_cpu(cpu)
1104 		if (per_cpu(trc_ipi_to_cpu, cpu))
1105 			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1106 }
1107 
1108 /* Do one scan of the holdout list. */
1109 static void check_all_holdout_tasks_trace(struct list_head *hop,
1110 					  bool needreport, bool *firstreport)
1111 {
1112 	struct task_struct *g, *t;
1113 
1114 	// Disable CPU hotplug across the holdout list scan.
1115 	cpus_read_lock();
1116 
1117 	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1118 		// If safe and needed, try to check the current task.
1119 		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1120 		    !READ_ONCE(t->trc_reader_checked))
1121 			trc_wait_for_one_reader(t, hop);
1122 
1123 		// If check succeeded, remove this task from the list.
1124 		if (READ_ONCE(t->trc_reader_checked))
1125 			trc_del_holdout(t);
1126 		else if (needreport)
1127 			show_stalled_task_trace(t, firstreport);
1128 	}
1129 
1130 	// Re-enable CPU hotplug now that the holdout list scan has completed.
1131 	cpus_read_unlock();
1132 
1133 	if (needreport) {
1134 		if (*firstreport)
1135 			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1136 		show_stalled_ipi_trace();
1137 	}
1138 }
1139 
1140 static void rcu_tasks_trace_empty_fn(void *unused)
1141 {
1142 }
1143 
1144 /* Wait for grace period to complete and provide ordering. */
1145 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1146 {
1147 	int cpu;
1148 	bool firstreport;
1149 	struct task_struct *g, *t;
1150 	LIST_HEAD(holdouts);
1151 	long ret;
1152 
1153 	// Wait for any lingering IPI handlers to complete.  Note that
1154 	// if a CPU has gone offline or transitioned to userspace in the
1155 	// meantime, all IPI handlers should have been drained beforehand.
1156 	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1157 	// changes, there will need to be a recheck and/or timed wait.
1158 	for_each_online_cpu(cpu)
1159 		if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
1160 			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1161 
1162 	// Remove the safety count.
1163 	smp_mb__before_atomic();  // Order vs. earlier atomics
1164 	atomic_dec(&trc_n_readers_need_end);
1165 	smp_mb__after_atomic();  // Order vs. later atomics
1166 
1167 	// Wait for readers.
1168 	set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1169 	for (;;) {
1170 		ret = wait_event_idle_exclusive_timeout(
1171 				trc_wait,
1172 				atomic_read(&trc_n_readers_need_end) == 0,
1173 				READ_ONCE(rcu_task_stall_timeout));
1174 		if (ret)
1175 			break;  // Count reached zero.
1176 		// Stall warning time, so make a list of the offenders.
1177 		rcu_read_lock();
1178 		for_each_process_thread(g, t)
1179 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1180 				trc_add_holdout(t, &holdouts);
1181 		rcu_read_unlock();
1182 		firstreport = true;
1183 		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1184 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1185 				show_stalled_task_trace(t, &firstreport);
1186 			trc_del_holdout(t); // Release task_struct reference.
1187 		}
1188 		if (firstreport)
1189 			pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1190 		show_stalled_ipi_trace();
1191 		pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1192 	}
1193 	smp_mb(); // Caller's code must be ordered after wakeup.
1194 		  // Pairs with pretty much every ordering primitive.
1195 }
1196 
1197 /* Report any needed quiescent state for this exiting task. */
1198 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1199 {
1200 	WRITE_ONCE(t->trc_reader_checked, true);
1201 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1202 	WRITE_ONCE(t->trc_reader_nesting, 0);
1203 	if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1204 		rcu_read_unlock_trace_special(t);
1205 }
1206 
1207 /**
1208  * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1209  * @rhp: structure to be used for queueing the RCU updates.
1210  * @func: actual callback function to be invoked after the grace period
1211  *
1212  * The callback function will be invoked some time after a trace rcu-tasks
1213  * grace period elapses, in other words after all currently executing
1214  * trace rcu-tasks read-side critical sections have completed. These
1215  * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1216  * and rcu_read_unlock_trace().
1217  *
1218  * See the description of call_rcu() for more detailed information on
1219  * memory ordering guarantees.
1220  */
1221 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1222 {
1223 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1224 }
1225 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1226 
1227 /**
1228  * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1229  *
1230  * Control will return to the caller some time after a trace rcu-tasks
1231  * grace period has elapsed, in other words after all currently executing
1232  * trace rcu-tasks read-side critical sections have elapsed. These read-side
1233  * critical sections are delimited by calls to rcu_read_lock_trace()
1234  * and rcu_read_unlock_trace().
1235  *
1236  * This is a very specialized primitive, intended only for a few uses in
1237  * tracing and other situations requiring manipulation of function preambles
1238  * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1239  * (yet) intended for heavy use from multiple CPUs.
1240  *
1241  * See the description of synchronize_rcu() for more detailed information
1242  * on memory ordering guarantees.
1243  */
1244 void synchronize_rcu_tasks_trace(void)
1245 {
1246 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1247 	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1248 }
1249 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1250 
1251 /**
1252  * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1253  *
1254  * Although the current implementation is guaranteed to wait, it is not
1255  * obligated to, for example, if there are no pending callbacks.
1256  */
1257 void rcu_barrier_tasks_trace(void)
1258 {
1259 	/* There is only one callback queue, so this is easy.  ;-) */
1260 	synchronize_rcu_tasks_trace();
1261 }
1262 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1263 
1264 static int __init rcu_spawn_tasks_trace_kthread(void)
1265 {
1266 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1267 		rcu_tasks_trace.gp_sleep = HZ / 10;
1268 		rcu_tasks_trace.init_fract = HZ / 10;
1269 	} else {
1270 		rcu_tasks_trace.gp_sleep = HZ / 200;
1271 		if (rcu_tasks_trace.gp_sleep <= 0)
1272 			rcu_tasks_trace.gp_sleep = 1;
1273 		rcu_tasks_trace.init_fract = HZ / 200;
1274 		if (rcu_tasks_trace.init_fract <= 0)
1275 			rcu_tasks_trace.init_fract = 1;
1276 	}
1277 	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1278 	rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1279 	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1280 	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1281 	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1282 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1283 	return 0;
1284 }
1285 
1286 #if !defined(CONFIG_TINY_RCU)
1287 void show_rcu_tasks_trace_gp_kthread(void)
1288 {
1289 	char buf[64];
1290 
1291 	sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1292 		data_race(n_heavy_reader_ofl_updates),
1293 		data_race(n_heavy_reader_updates),
1294 		data_race(n_heavy_reader_attempts));
1295 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1296 }
1297 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1298 #endif // !defined(CONFIG_TINY_RCU)
1299 
1300 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1301 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1302 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1303 
1304 #ifndef CONFIG_TINY_RCU
1305 void show_rcu_tasks_gp_kthreads(void)
1306 {
1307 	show_rcu_tasks_classic_gp_kthread();
1308 	show_rcu_tasks_rude_gp_kthread();
1309 	show_rcu_tasks_trace_gp_kthread();
1310 }
1311 #endif /* #ifndef CONFIG_TINY_RCU */
1312 
1313 #ifdef CONFIG_PROVE_RCU
1314 struct rcu_tasks_test_desc {
1315 	struct rcu_head rh;
1316 	const char *name;
1317 	bool notrun;
1318 };
1319 
1320 static struct rcu_tasks_test_desc tests[] = {
1321 	{
1322 		.name = "call_rcu_tasks()",
1323 		/* If not defined, the test is skipped. */
1324 		.notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1325 	},
1326 	{
1327 		.name = "call_rcu_tasks_rude()",
1328 		/* If not defined, the test is skipped. */
1329 		.notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1330 	},
1331 	{
1332 		.name = "call_rcu_tasks_trace()",
1333 		/* If not defined, the test is skipped. */
1334 		.notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1335 	}
1336 };
1337 
1338 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1339 {
1340 	struct rcu_tasks_test_desc *rttd =
1341 		container_of(rhp, struct rcu_tasks_test_desc, rh);
1342 
1343 	pr_info("Callback from %s invoked.\n", rttd->name);
1344 
1345 	rttd->notrun = true;
1346 }
1347 
1348 static void rcu_tasks_initiate_self_tests(void)
1349 {
1350 	pr_info("Running RCU-tasks wait API self tests\n");
1351 #ifdef CONFIG_TASKS_RCU
1352 	synchronize_rcu_tasks();
1353 	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1354 #endif
1355 
1356 #ifdef CONFIG_TASKS_RUDE_RCU
1357 	synchronize_rcu_tasks_rude();
1358 	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1359 #endif
1360 
1361 #ifdef CONFIG_TASKS_TRACE_RCU
1362 	synchronize_rcu_tasks_trace();
1363 	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1364 #endif
1365 }
1366 
1367 static int rcu_tasks_verify_self_tests(void)
1368 {
1369 	int ret = 0;
1370 	int i;
1371 
1372 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
1373 		if (!tests[i].notrun) {		// still hanging.
1374 			pr_err("%s has been failed.\n", tests[i].name);
1375 			ret = -1;
1376 		}
1377 	}
1378 
1379 	if (ret)
1380 		WARN_ON(1);
1381 
1382 	return ret;
1383 }
1384 late_initcall(rcu_tasks_verify_self_tests);
1385 #else /* #ifdef CONFIG_PROVE_RCU */
1386 static void rcu_tasks_initiate_self_tests(void) { }
1387 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1388 
1389 void __init rcu_init_tasks_generic(void)
1390 {
1391 #ifdef CONFIG_TASKS_RCU
1392 	rcu_spawn_tasks_kthread();
1393 #endif
1394 
1395 #ifdef CONFIG_TASKS_RUDE_RCU
1396 	rcu_spawn_tasks_rude_kthread();
1397 #endif
1398 
1399 #ifdef CONFIG_TASKS_TRACE_RCU
1400 	rcu_spawn_tasks_trace_kthread();
1401 #endif
1402 
1403 	// Run the self-tests.
1404 	rcu_tasks_initiate_self_tests();
1405 }
1406 
1407 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1408 static inline void rcu_tasks_bootup_oddness(void) {}
1409 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1410