xref: /openbmc/linux/kernel/rcu/tasks.h (revision a9d85efb)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Task-based RCU implementations.
4  *
5  * Copyright (C) 2020 Paul E. McKenney
6  */
7 
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 
10 ////////////////////////////////////////////////////////////////////////
11 //
12 // Generic data structures.
13 
14 struct rcu_tasks;
15 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16 typedef void (*pregp_func_t)(void);
17 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18 typedef void (*postscan_func_t)(struct list_head *hop);
19 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
21 
22 /**
23  * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
24  * @cbs_head: Head of callback list.
25  * @cbs_tail: Tail pointer for callback list.
26  * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
27  * @cbs_lock: Lock protecting callback list.
28  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29  * @gp_func: This flavor's grace-period-wait function.
30  * @gp_state: Grace period's most recent state transition (debugging).
31  * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
32  * @init_fract: Initial backoff sleep interval.
33  * @gp_jiffies: Time of last @gp_state transition.
34  * @gp_start: Most recent grace-period start in jiffies.
35  * @n_gps: Number of grace periods completed since boot.
36  * @n_ipis: Number of IPIs sent to encourage grace periods to end.
37  * @n_ipis_fails: Number of IPI-send failures.
38  * @pregp_func: This flavor's pre-grace-period function (optional).
39  * @pertask_func: This flavor's per-task scan function (optional).
40  * @postscan_func: This flavor's post-task scan function (optional).
41  * @holdouts_func: This flavor's holdout-list scan function (optional).
42  * @postgp_func: This flavor's post-grace-period function (optional).
43  * @call_func: This flavor's call_rcu()-equivalent function.
44  * @name: This flavor's textual name.
45  * @kname: This flavor's kthread name.
46  */
47 struct rcu_tasks {
48 	struct rcu_head *cbs_head;
49 	struct rcu_head **cbs_tail;
50 	struct wait_queue_head cbs_wq;
51 	raw_spinlock_t cbs_lock;
52 	int gp_state;
53 	int gp_sleep;
54 	int init_fract;
55 	unsigned long gp_jiffies;
56 	unsigned long gp_start;
57 	unsigned long n_gps;
58 	unsigned long n_ipis;
59 	unsigned long n_ipis_fails;
60 	struct task_struct *kthread_ptr;
61 	rcu_tasks_gp_func_t gp_func;
62 	pregp_func_t pregp_func;
63 	pertask_func_t pertask_func;
64 	postscan_func_t postscan_func;
65 	holdouts_func_t holdouts_func;
66 	postgp_func_t postgp_func;
67 	call_rcu_func_t call_func;
68 	char *name;
69 	char *kname;
70 };
71 
72 #define DEFINE_RCU_TASKS(rt_name, gp, call, n)				\
73 static struct rcu_tasks rt_name =					\
74 {									\
75 	.cbs_tail = &rt_name.cbs_head,					\
76 	.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq),	\
77 	.cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock),		\
78 	.gp_func = gp,							\
79 	.call_func = call,						\
80 	.name = n,							\
81 	.kname = #rt_name,						\
82 }
83 
84 /* Track exiting tasks in order to allow them to be waited for. */
85 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86 
87 /* Avoid IPIing CPUs early in the grace period. */
88 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
89 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90 module_param(rcu_task_ipi_delay, int, 0644);
91 
92 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
93 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95 module_param(rcu_task_stall_timeout, int, 0644);
96 
97 /* RCU tasks grace-period state for debugging. */
98 #define RTGS_INIT		 0
99 #define RTGS_WAIT_WAIT_CBS	 1
100 #define RTGS_WAIT_GP		 2
101 #define RTGS_PRE_WAIT_GP	 3
102 #define RTGS_SCAN_TASKLIST	 4
103 #define RTGS_POST_SCAN_TASKLIST	 5
104 #define RTGS_WAIT_SCAN_HOLDOUTS	 6
105 #define RTGS_SCAN_HOLDOUTS	 7
106 #define RTGS_POST_GP		 8
107 #define RTGS_WAIT_READERS	 9
108 #define RTGS_INVOKE_CBS		10
109 #define RTGS_WAIT_CBS		11
110 #ifndef CONFIG_TINY_RCU
111 static const char * const rcu_tasks_gp_state_names[] = {
112 	"RTGS_INIT",
113 	"RTGS_WAIT_WAIT_CBS",
114 	"RTGS_WAIT_GP",
115 	"RTGS_PRE_WAIT_GP",
116 	"RTGS_SCAN_TASKLIST",
117 	"RTGS_POST_SCAN_TASKLIST",
118 	"RTGS_WAIT_SCAN_HOLDOUTS",
119 	"RTGS_SCAN_HOLDOUTS",
120 	"RTGS_POST_GP",
121 	"RTGS_WAIT_READERS",
122 	"RTGS_INVOKE_CBS",
123 	"RTGS_WAIT_CBS",
124 };
125 #endif /* #ifndef CONFIG_TINY_RCU */
126 
127 ////////////////////////////////////////////////////////////////////////
128 //
129 // Generic code.
130 
131 /* Record grace-period phase and time. */
132 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133 {
134 	rtp->gp_state = newstate;
135 	rtp->gp_jiffies = jiffies;
136 }
137 
138 #ifndef CONFIG_TINY_RCU
139 /* Return state name. */
140 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141 {
142 	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 	int j = READ_ONCE(i); // Prevent the compiler from reading twice
144 
145 	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 		return "???";
147 	return rcu_tasks_gp_state_names[j];
148 }
149 #endif /* #ifndef CONFIG_TINY_RCU */
150 
151 // Enqueue a callback for the specified flavor of Tasks RCU.
152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 				   struct rcu_tasks *rtp)
154 {
155 	unsigned long flags;
156 	bool needwake;
157 
158 	rhp->next = NULL;
159 	rhp->func = func;
160 	raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 	needwake = !rtp->cbs_head;
162 	WRITE_ONCE(*rtp->cbs_tail, rhp);
163 	rtp->cbs_tail = &rhp->next;
164 	raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
165 	/* We can't create the thread unless interrupts are enabled. */
166 	if (needwake && READ_ONCE(rtp->kthread_ptr))
167 		wake_up(&rtp->cbs_wq);
168 }
169 
170 // Wait for a grace period for the specified flavor of Tasks RCU.
171 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
172 {
173 	/* Complain if the scheduler has not started.  */
174 	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 			 "synchronize_rcu_tasks called too soon");
176 
177 	/* Wait for the grace period. */
178 	wait_rcu_gp(rtp->call_func);
179 }
180 
181 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
182 static int __noreturn rcu_tasks_kthread(void *arg)
183 {
184 	unsigned long flags;
185 	struct rcu_head *list;
186 	struct rcu_head *next;
187 	struct rcu_tasks *rtp = arg;
188 
189 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
190 	housekeeping_affine(current, HK_FLAG_RCU);
191 	WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
192 
193 	/*
194 	 * Each pass through the following loop makes one check for
195 	 * newly arrived callbacks, and, if there are some, waits for
196 	 * one RCU-tasks grace period and then invokes the callbacks.
197 	 * This loop is terminated by the system going down.  ;-)
198 	 */
199 	for (;;) {
200 
201 		/* Pick up any new callbacks. */
202 		raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
203 		smp_mb__after_spinlock(); // Order updates vs. GP.
204 		list = rtp->cbs_head;
205 		rtp->cbs_head = NULL;
206 		rtp->cbs_tail = &rtp->cbs_head;
207 		raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
208 
209 		/* If there were none, wait a bit and start over. */
210 		if (!list) {
211 			wait_event_interruptible(rtp->cbs_wq,
212 						 READ_ONCE(rtp->cbs_head));
213 			if (!rtp->cbs_head) {
214 				WARN_ON(signal_pending(current));
215 				set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
216 				schedule_timeout_idle(HZ/10);
217 			}
218 			continue;
219 		}
220 
221 		// Wait for one grace period.
222 		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
223 		rtp->gp_start = jiffies;
224 		rtp->gp_func(rtp);
225 		rtp->n_gps++;
226 
227 		/* Invoke the callbacks. */
228 		set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
229 		while (list) {
230 			next = list->next;
231 			local_bh_disable();
232 			list->func(list);
233 			local_bh_enable();
234 			list = next;
235 			cond_resched();
236 		}
237 		/* Paranoid sleep to keep this from entering a tight loop */
238 		schedule_timeout_idle(rtp->gp_sleep);
239 
240 		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
241 	}
242 }
243 
244 /* Spawn RCU-tasks grace-period kthread. */
245 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
246 {
247 	struct task_struct *t;
248 
249 	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
250 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
251 		return;
252 	smp_mb(); /* Ensure others see full kthread. */
253 }
254 
255 #ifndef CONFIG_TINY_RCU
256 
257 /*
258  * Print any non-default Tasks RCU settings.
259  */
260 static void __init rcu_tasks_bootup_oddness(void)
261 {
262 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
263 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
264 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
265 #endif /* #ifdef CONFIG_TASKS_RCU */
266 #ifdef CONFIG_TASKS_RCU
267 	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
268 #endif /* #ifdef CONFIG_TASKS_RCU */
269 #ifdef CONFIG_TASKS_RUDE_RCU
270 	pr_info("\tRude variant of Tasks RCU enabled.\n");
271 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
272 #ifdef CONFIG_TASKS_TRACE_RCU
273 	pr_info("\tTracing variant of Tasks RCU enabled.\n");
274 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
275 }
276 
277 #endif /* #ifndef CONFIG_TINY_RCU */
278 
279 #ifndef CONFIG_TINY_RCU
280 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
281 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
282 {
283 	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
284 		rtp->kname,
285 		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
286 		jiffies - data_race(rtp->gp_jiffies),
287 		data_race(rtp->n_gps),
288 		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
289 		".k"[!!data_race(rtp->kthread_ptr)],
290 		".C"[!!data_race(rtp->cbs_head)],
291 		s);
292 }
293 #endif // #ifndef CONFIG_TINY_RCU
294 
295 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
296 
297 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
298 
299 ////////////////////////////////////////////////////////////////////////
300 //
301 // Shared code between task-list-scanning variants of Tasks RCU.
302 
303 /* Wait for one RCU-tasks grace period. */
304 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
305 {
306 	struct task_struct *g, *t;
307 	unsigned long lastreport;
308 	LIST_HEAD(holdouts);
309 	int fract;
310 
311 	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
312 	rtp->pregp_func();
313 
314 	/*
315 	 * There were callbacks, so we need to wait for an RCU-tasks
316 	 * grace period.  Start off by scanning the task list for tasks
317 	 * that are not already voluntarily blocked.  Mark these tasks
318 	 * and make a list of them in holdouts.
319 	 */
320 	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
321 	rcu_read_lock();
322 	for_each_process_thread(g, t)
323 		rtp->pertask_func(t, &holdouts);
324 	rcu_read_unlock();
325 
326 	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
327 	rtp->postscan_func(&holdouts);
328 
329 	/*
330 	 * Each pass through the following loop scans the list of holdout
331 	 * tasks, removing any that are no longer holdouts.  When the list
332 	 * is empty, we are done.
333 	 */
334 	lastreport = jiffies;
335 
336 	// Start off with initial wait and slowly back off to 1 HZ wait.
337 	fract = rtp->init_fract;
338 
339 	while (!list_empty(&holdouts)) {
340 		bool firstreport;
341 		bool needreport;
342 		int rtst;
343 
344 		/* Slowly back off waiting for holdouts */
345 		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
346 		schedule_timeout_idle(fract);
347 
348 		if (fract < HZ)
349 			fract++;
350 
351 		rtst = READ_ONCE(rcu_task_stall_timeout);
352 		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
353 		if (needreport)
354 			lastreport = jiffies;
355 		firstreport = true;
356 		WARN_ON(signal_pending(current));
357 		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
358 		rtp->holdouts_func(&holdouts, needreport, &firstreport);
359 	}
360 
361 	set_tasks_gp_state(rtp, RTGS_POST_GP);
362 	rtp->postgp_func(rtp);
363 }
364 
365 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
366 
367 #ifdef CONFIG_TASKS_RCU
368 
369 ////////////////////////////////////////////////////////////////////////
370 //
371 // Simple variant of RCU whose quiescent states are voluntary context
372 // switch, cond_resched_rcu_qs(), user-space execution, and idle.
373 // As such, grace periods can take one good long time.  There are no
374 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
375 // because this implementation is intended to get the system into a safe
376 // state for some of the manipulations involved in tracing and the like.
377 // Finally, this implementation does not support high call_rcu_tasks()
378 // rates from multiple CPUs.  If this is required, per-CPU callback lists
379 // will be needed.
380 //
381 // The implementation uses rcu_tasks_wait_gp(), which relies on function
382 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
383 // function sets these function pointers up so that rcu_tasks_wait_gp()
384 // invokes these functions in this order:
385 //
386 // rcu_tasks_pregp_step():
387 //	Invokes synchronize_rcu() in order to wait for all in-flight
388 //	t->on_rq and t->nvcsw transitions to complete.	This works because
389 //	all such transitions are carried out with interrupts disabled.
390 // rcu_tasks_pertask(), invoked on every non-idle task:
391 //	For every runnable non-idle task other than the current one, use
392 //	get_task_struct() to pin down that task, snapshot that task's
393 //	number of voluntary context switches, and add that task to the
394 //	holdout list.
395 // rcu_tasks_postscan():
396 //	Invoke synchronize_srcu() to ensure that all tasks that were
397 //	in the process of exiting (and which thus might not know to
398 //	synchronize with this RCU Tasks grace period) have completed
399 //	exiting.
400 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
401 //	Scans the holdout list, attempting to identify a quiescent state
402 //	for each task on the list.  If there is a quiescent state, the
403 //	corresponding task is removed from the holdout list.
404 // rcu_tasks_postgp():
405 //	Invokes synchronize_rcu() in order to ensure that all prior
406 //	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
407 //	to have happened before the end of this RCU Tasks grace period.
408 //	Again, this works because all such transitions are carried out
409 //	with interrupts disabled.
410 //
411 // For each exiting task, the exit_tasks_rcu_start() and
412 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
413 // read-side critical sections waited for by rcu_tasks_postscan().
414 //
415 // Pre-grace-period update-side code is ordered before the grace via the
416 // ->cbs_lock and the smp_mb__after_spinlock().  Pre-grace-period read-side
417 // code is ordered before the grace period via synchronize_rcu() call
418 // in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
419 // disabling.
420 
421 /* Pre-grace-period preparation. */
422 static void rcu_tasks_pregp_step(void)
423 {
424 	/*
425 	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
426 	 * to complete.  Invoking synchronize_rcu() suffices because all
427 	 * these transitions occur with interrupts disabled.  Without this
428 	 * synchronize_rcu(), a read-side critical section that started
429 	 * before the grace period might be incorrectly seen as having
430 	 * started after the grace period.
431 	 *
432 	 * This synchronize_rcu() also dispenses with the need for a
433 	 * memory barrier on the first store to t->rcu_tasks_holdout,
434 	 * as it forces the store to happen after the beginning of the
435 	 * grace period.
436 	 */
437 	synchronize_rcu();
438 }
439 
440 /* Per-task initial processing. */
441 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
442 {
443 	if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
444 		get_task_struct(t);
445 		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
446 		WRITE_ONCE(t->rcu_tasks_holdout, true);
447 		list_add(&t->rcu_tasks_holdout_list, hop);
448 	}
449 }
450 
451 /* Processing between scanning taskslist and draining the holdout list. */
452 static void rcu_tasks_postscan(struct list_head *hop)
453 {
454 	/*
455 	 * Wait for tasks that are in the process of exiting.  This
456 	 * does only part of the job, ensuring that all tasks that were
457 	 * previously exiting reach the point where they have disabled
458 	 * preemption, allowing the later synchronize_rcu() to finish
459 	 * the job.
460 	 */
461 	synchronize_srcu(&tasks_rcu_exit_srcu);
462 }
463 
464 /* See if tasks are still holding out, complain if so. */
465 static void check_holdout_task(struct task_struct *t,
466 			       bool needreport, bool *firstreport)
467 {
468 	int cpu;
469 
470 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
471 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
472 	    !READ_ONCE(t->on_rq) ||
473 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
474 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
475 		WRITE_ONCE(t->rcu_tasks_holdout, false);
476 		list_del_init(&t->rcu_tasks_holdout_list);
477 		put_task_struct(t);
478 		return;
479 	}
480 	rcu_request_urgent_qs_task(t);
481 	if (!needreport)
482 		return;
483 	if (*firstreport) {
484 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
485 		*firstreport = false;
486 	}
487 	cpu = task_cpu(t);
488 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
489 		 t, ".I"[is_idle_task(t)],
490 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
491 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
492 		 t->rcu_tasks_idle_cpu, cpu);
493 	sched_show_task(t);
494 }
495 
496 /* Scan the holdout lists for tasks no longer holding out. */
497 static void check_all_holdout_tasks(struct list_head *hop,
498 				    bool needreport, bool *firstreport)
499 {
500 	struct task_struct *t, *t1;
501 
502 	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
503 		check_holdout_task(t, needreport, firstreport);
504 		cond_resched();
505 	}
506 }
507 
508 /* Finish off the Tasks-RCU grace period. */
509 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
510 {
511 	/*
512 	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
513 	 * memory barriers prior to them in the schedule() path, memory
514 	 * reordering on other CPUs could cause their RCU-tasks read-side
515 	 * critical sections to extend past the end of the grace period.
516 	 * However, because these ->nvcsw updates are carried out with
517 	 * interrupts disabled, we can use synchronize_rcu() to force the
518 	 * needed ordering on all such CPUs.
519 	 *
520 	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
521 	 * accesses to be within the grace period, avoiding the need for
522 	 * memory barriers for ->rcu_tasks_holdout accesses.
523 	 *
524 	 * In addition, this synchronize_rcu() waits for exiting tasks
525 	 * to complete their final preempt_disable() region of execution,
526 	 * cleaning up after the synchronize_srcu() above.
527 	 */
528 	synchronize_rcu();
529 }
530 
531 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
532 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
533 
534 /**
535  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
536  * @rhp: structure to be used for queueing the RCU updates.
537  * @func: actual callback function to be invoked after the grace period
538  *
539  * The callback function will be invoked some time after a full grace
540  * period elapses, in other words after all currently executing RCU
541  * read-side critical sections have completed. call_rcu_tasks() assumes
542  * that the read-side critical sections end at a voluntary context
543  * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
544  * or transition to usermode execution.  As such, there are no read-side
545  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
546  * this primitive is intended to determine that all tasks have passed
547  * through a safe state, not so much for data-structure synchronization.
548  *
549  * See the description of call_rcu() for more detailed information on
550  * memory ordering guarantees.
551  */
552 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
553 {
554 	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
555 }
556 EXPORT_SYMBOL_GPL(call_rcu_tasks);
557 
558 /**
559  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
560  *
561  * Control will return to the caller some time after a full rcu-tasks
562  * grace period has elapsed, in other words after all currently
563  * executing rcu-tasks read-side critical sections have elapsed.  These
564  * read-side critical sections are delimited by calls to schedule(),
565  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
566  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
567  *
568  * This is a very specialized primitive, intended only for a few uses in
569  * tracing and other situations requiring manipulation of function
570  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
571  * is not (yet) intended for heavy use from multiple CPUs.
572  *
573  * See the description of synchronize_rcu() for more detailed information
574  * on memory ordering guarantees.
575  */
576 void synchronize_rcu_tasks(void)
577 {
578 	synchronize_rcu_tasks_generic(&rcu_tasks);
579 }
580 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
581 
582 /**
583  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
584  *
585  * Although the current implementation is guaranteed to wait, it is not
586  * obligated to, for example, if there are no pending callbacks.
587  */
588 void rcu_barrier_tasks(void)
589 {
590 	/* There is only one callback queue, so this is easy.  ;-) */
591 	synchronize_rcu_tasks();
592 }
593 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
594 
595 static int __init rcu_spawn_tasks_kthread(void)
596 {
597 	rcu_tasks.gp_sleep = HZ / 10;
598 	rcu_tasks.init_fract = HZ / 10;
599 	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
600 	rcu_tasks.pertask_func = rcu_tasks_pertask;
601 	rcu_tasks.postscan_func = rcu_tasks_postscan;
602 	rcu_tasks.holdouts_func = check_all_holdout_tasks;
603 	rcu_tasks.postgp_func = rcu_tasks_postgp;
604 	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
605 	return 0;
606 }
607 
608 #if !defined(CONFIG_TINY_RCU)
609 void show_rcu_tasks_classic_gp_kthread(void)
610 {
611 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
612 }
613 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
614 #endif // !defined(CONFIG_TINY_RCU)
615 
616 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
617 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
618 {
619 	preempt_disable();
620 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
621 	preempt_enable();
622 }
623 
624 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
625 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
626 {
627 	struct task_struct *t = current;
628 
629 	preempt_disable();
630 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
631 	preempt_enable();
632 	exit_tasks_rcu_finish_trace(t);
633 }
634 
635 #else /* #ifdef CONFIG_TASKS_RCU */
636 void exit_tasks_rcu_start(void) { }
637 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
638 #endif /* #else #ifdef CONFIG_TASKS_RCU */
639 
640 #ifdef CONFIG_TASKS_RUDE_RCU
641 
642 ////////////////////////////////////////////////////////////////////////
643 //
644 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
645 // passing an empty function to schedule_on_each_cpu().  This approach
646 // provides an asynchronous call_rcu_tasks_rude() API and batching of
647 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
648 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
649 // and induces otherwise unnecessary context switches on all online CPUs,
650 // whether idle or not.
651 //
652 // Callback handling is provided by the rcu_tasks_kthread() function.
653 //
654 // Ordering is provided by the scheduler's context-switch code.
655 
656 // Empty function to allow workqueues to force a context switch.
657 static void rcu_tasks_be_rude(struct work_struct *work)
658 {
659 }
660 
661 // Wait for one rude RCU-tasks grace period.
662 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
663 {
664 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
665 	schedule_on_each_cpu(rcu_tasks_be_rude);
666 }
667 
668 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
669 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
670 		 "RCU Tasks Rude");
671 
672 /**
673  * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
674  * @rhp: structure to be used for queueing the RCU updates.
675  * @func: actual callback function to be invoked after the grace period
676  *
677  * The callback function will be invoked some time after a full grace
678  * period elapses, in other words after all currently executing RCU
679  * read-side critical sections have completed. call_rcu_tasks_rude()
680  * assumes that the read-side critical sections end at context switch,
681  * cond_resched_rcu_qs(), or transition to usermode execution.  As such,
682  * there are no read-side primitives analogous to rcu_read_lock() and
683  * rcu_read_unlock() because this primitive is intended to determine
684  * that all tasks have passed through a safe state, not so much for
685  * data-structure synchronization.
686  *
687  * See the description of call_rcu() for more detailed information on
688  * memory ordering guarantees.
689  */
690 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
691 {
692 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
693 }
694 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
695 
696 /**
697  * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
698  *
699  * Control will return to the caller some time after a rude rcu-tasks
700  * grace period has elapsed, in other words after all currently
701  * executing rcu-tasks read-side critical sections have elapsed.  These
702  * read-side critical sections are delimited by calls to schedule(),
703  * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
704  * anyway) cond_resched().
705  *
706  * This is a very specialized primitive, intended only for a few uses in
707  * tracing and other situations requiring manipulation of function preambles
708  * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
709  * (yet) intended for heavy use from multiple CPUs.
710  *
711  * See the description of synchronize_rcu() for more detailed information
712  * on memory ordering guarantees.
713  */
714 void synchronize_rcu_tasks_rude(void)
715 {
716 	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
717 }
718 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
719 
720 /**
721  * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
722  *
723  * Although the current implementation is guaranteed to wait, it is not
724  * obligated to, for example, if there are no pending callbacks.
725  */
726 void rcu_barrier_tasks_rude(void)
727 {
728 	/* There is only one callback queue, so this is easy.  ;-) */
729 	synchronize_rcu_tasks_rude();
730 }
731 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
732 
733 static int __init rcu_spawn_tasks_rude_kthread(void)
734 {
735 	rcu_tasks_rude.gp_sleep = HZ / 10;
736 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
737 	return 0;
738 }
739 
740 #if !defined(CONFIG_TINY_RCU)
741 void show_rcu_tasks_rude_gp_kthread(void)
742 {
743 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
744 }
745 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
746 #endif // !defined(CONFIG_TINY_RCU)
747 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
748 
749 ////////////////////////////////////////////////////////////////////////
750 //
751 // Tracing variant of Tasks RCU.  This variant is designed to be used
752 // to protect tracing hooks, including those of BPF.  This variant
753 // therefore:
754 //
755 // 1.	Has explicit read-side markers to allow finite grace periods
756 //	in the face of in-kernel loops for PREEMPT=n builds.
757 //
758 // 2.	Protects code in the idle loop, exception entry/exit, and
759 //	CPU-hotplug code paths, similar to the capabilities of SRCU.
760 //
761 // 3.	Avoids expensive read-side instruction, having overhead similar
762 //	to that of Preemptible RCU.
763 //
764 // There are of course downsides.  The grace-period code can send IPIs to
765 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
766 // It is necessary to scan the full tasklist, much as for Tasks RCU.  There
767 // is a single callback queue guarded by a single lock, again, much as for
768 // Tasks RCU.  If needed, these downsides can be at least partially remedied.
769 //
770 // Perhaps most important, this variant of RCU does not affect the vanilla
771 // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
772 // readers can operate from idle, offline, and exception entry/exit in no
773 // way allows rcu_preempt and rcu_sched readers to also do so.
774 //
775 // The implementation uses rcu_tasks_wait_gp(), which relies on function
776 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
777 // function sets these function pointers up so that rcu_tasks_wait_gp()
778 // invokes these functions in this order:
779 //
780 // rcu_tasks_trace_pregp_step():
781 //	Initialize the count of readers and block CPU-hotplug operations.
782 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
783 //	Initialize per-task state and attempt to identify an immediate
784 //	quiescent state for that task, or, failing that, attempt to
785 //	set that task's .need_qs flag so that task's next outermost
786 //	rcu_read_unlock_trace() will report the quiescent state (in which
787 //	case the count of readers is incremented).  If both attempts fail,
788 //	the task is added to a "holdout" list.  Note that IPIs are used
789 //	to invoke trc_read_check_handler() in the context of running tasks
790 //	in order to avoid ordering overhead on common-case shared-variable
791 //	accessses.
792 // rcu_tasks_trace_postscan():
793 //	Initialize state and attempt to identify an immediate quiescent
794 //	state as above (but only for idle tasks), unblock CPU-hotplug
795 //	operations, and wait for an RCU grace period to avoid races with
796 //	tasks that are in the process of exiting.
797 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
798 //	Scans the holdout list, attempting to identify a quiescent state
799 //	for each task on the list.  If there is a quiescent state, the
800 //	corresponding task is removed from the holdout list.
801 // rcu_tasks_trace_postgp():
802 //	Wait for the count of readers do drop to zero, reporting any stalls.
803 //	Also execute full memory barriers to maintain ordering with code
804 //	executing after the grace period.
805 //
806 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
807 //
808 // Pre-grace-period update-side code is ordered before the grace
809 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
810 // Pre-grace-period read-side code is ordered before the grace period by
811 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
812 // scheduler context-switch ordering (for locked-down non-running readers).
813 
814 // The lockdep state must be outside of #ifdef to be useful.
815 #ifdef CONFIG_DEBUG_LOCK_ALLOC
816 static struct lock_class_key rcu_lock_trace_key;
817 struct lockdep_map rcu_trace_lock_map =
818 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
819 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
820 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
821 
822 #ifdef CONFIG_TASKS_TRACE_RCU
823 
824 static atomic_t trc_n_readers_need_end;		// Number of waited-for readers.
825 static DECLARE_WAIT_QUEUE_HEAD(trc_wait);	// List of holdout tasks.
826 
827 // Record outstanding IPIs to each CPU.  No point in sending two...
828 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
829 
830 // The number of detections of task quiescent state relying on
831 // heavyweight readers executing explicit memory barriers.
832 static unsigned long n_heavy_reader_attempts;
833 static unsigned long n_heavy_reader_updates;
834 static unsigned long n_heavy_reader_ofl_updates;
835 
836 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
837 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
838 		 "RCU Tasks Trace");
839 
840 /*
841  * This irq_work handler allows rcu_read_unlock_trace() to be invoked
842  * while the scheduler locks are held.
843  */
844 static void rcu_read_unlock_iw(struct irq_work *iwp)
845 {
846 	wake_up(&trc_wait);
847 }
848 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
849 
850 /* If we are the last reader, wake up the grace-period kthread. */
851 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
852 {
853 	int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
854 
855 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
856 	    t->trc_reader_special.b.need_mb)
857 		smp_mb(); // Pairs with update-side barriers.
858 	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
859 	if (nq)
860 		WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
861 	WRITE_ONCE(t->trc_reader_nesting, nesting);
862 	if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
863 		irq_work_queue(&rcu_tasks_trace_iw);
864 }
865 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
866 
867 /* Add a task to the holdout list, if it is not already on the list. */
868 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
869 {
870 	if (list_empty(&t->trc_holdout_list)) {
871 		get_task_struct(t);
872 		list_add(&t->trc_holdout_list, bhp);
873 	}
874 }
875 
876 /* Remove a task from the holdout list, if it is in fact present. */
877 static void trc_del_holdout(struct task_struct *t)
878 {
879 	if (!list_empty(&t->trc_holdout_list)) {
880 		list_del_init(&t->trc_holdout_list);
881 		put_task_struct(t);
882 	}
883 }
884 
885 /* IPI handler to check task state. */
886 static void trc_read_check_handler(void *t_in)
887 {
888 	struct task_struct *t = current;
889 	struct task_struct *texp = t_in;
890 
891 	// If the task is no longer running on this CPU, leave.
892 	if (unlikely(texp != t)) {
893 		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
894 			wake_up(&trc_wait);
895 		goto reset_ipi; // Already on holdout list, so will check later.
896 	}
897 
898 	// If the task is not in a read-side critical section, and
899 	// if this is the last reader, awaken the grace-period kthread.
900 	if (likely(!READ_ONCE(t->trc_reader_nesting))) {
901 		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
902 			wake_up(&trc_wait);
903 		// Mark as checked after decrement to avoid false
904 		// positives on the above WARN_ON_ONCE().
905 		WRITE_ONCE(t->trc_reader_checked, true);
906 		goto reset_ipi;
907 	}
908 	// If we are racing with an rcu_read_unlock_trace(), try again later.
909 	if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
910 		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
911 			wake_up(&trc_wait);
912 		goto reset_ipi;
913 	}
914 	WRITE_ONCE(t->trc_reader_checked, true);
915 
916 	// Get here if the task is in a read-side critical section.  Set
917 	// its state so that it will awaken the grace-period kthread upon
918 	// exit from that critical section.
919 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
920 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
921 
922 reset_ipi:
923 	// Allow future IPIs to be sent on CPU and for task.
924 	// Also order this IPI handler against any later manipulations of
925 	// the intended task.
926 	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
927 	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
928 }
929 
930 /* Callback function for scheduler to check locked-down task.  */
931 static bool trc_inspect_reader(struct task_struct *t, void *arg)
932 {
933 	int cpu = task_cpu(t);
934 	bool in_qs = false;
935 	bool ofl = cpu_is_offline(cpu);
936 
937 	if (task_curr(t)) {
938 		WARN_ON_ONCE(ofl && !is_idle_task(t));
939 
940 		// If no chance of heavyweight readers, do it the hard way.
941 		if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
942 			return false;
943 
944 		// If heavyweight readers are enabled on the remote task,
945 		// we can inspect its state despite its currently running.
946 		// However, we cannot safely change its state.
947 		n_heavy_reader_attempts++;
948 		if (!ofl && // Check for "running" idle tasks on offline CPUs.
949 		    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
950 			return false; // No quiescent state, do it the hard way.
951 		n_heavy_reader_updates++;
952 		if (ofl)
953 			n_heavy_reader_ofl_updates++;
954 		in_qs = true;
955 	} else {
956 		// The task is not running, so C-language access is safe.
957 		in_qs = likely(!t->trc_reader_nesting);
958 	}
959 
960 	// Mark as checked so that the grace-period kthread will
961 	// remove it from the holdout list.
962 	t->trc_reader_checked = true;
963 
964 	if (in_qs)
965 		return true;  // Already in quiescent state, done!!!
966 
967 	// The task is in a read-side critical section, so set up its
968 	// state so that it will awaken the grace-period kthread upon exit
969 	// from that critical section.
970 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
971 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
972 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
973 	return true;
974 }
975 
976 /* Attempt to extract the state for the specified task. */
977 static void trc_wait_for_one_reader(struct task_struct *t,
978 				    struct list_head *bhp)
979 {
980 	int cpu;
981 
982 	// If a previous IPI is still in flight, let it complete.
983 	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
984 		return;
985 
986 	// The current task had better be in a quiescent state.
987 	if (t == current) {
988 		t->trc_reader_checked = true;
989 		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
990 		return;
991 	}
992 
993 	// Attempt to nail down the task for inspection.
994 	get_task_struct(t);
995 	if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
996 		put_task_struct(t);
997 		return;
998 	}
999 	put_task_struct(t);
1000 
1001 	// If this task is not yet on the holdout list, then we are in
1002 	// an RCU read-side critical section.  Otherwise, the invocation of
1003 	// rcu_add_holdout() that added it to the list did the necessary
1004 	// get_task_struct().  Either way, the task cannot be freed out
1005 	// from under this code.
1006 
1007 	// If currently running, send an IPI, either way, add to list.
1008 	trc_add_holdout(t, bhp);
1009 	if (task_curr(t) &&
1010 	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1011 		// The task is currently running, so try IPIing it.
1012 		cpu = task_cpu(t);
1013 
1014 		// If there is already an IPI outstanding, let it happen.
1015 		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1016 			return;
1017 
1018 		atomic_inc(&trc_n_readers_need_end);
1019 		per_cpu(trc_ipi_to_cpu, cpu) = true;
1020 		t->trc_ipi_to_cpu = cpu;
1021 		rcu_tasks_trace.n_ipis++;
1022 		if (smp_call_function_single(cpu,
1023 					     trc_read_check_handler, t, 0)) {
1024 			// Just in case there is some other reason for
1025 			// failure than the target CPU being offline.
1026 			rcu_tasks_trace.n_ipis_fails++;
1027 			per_cpu(trc_ipi_to_cpu, cpu) = false;
1028 			t->trc_ipi_to_cpu = cpu;
1029 			if (atomic_dec_and_test(&trc_n_readers_need_end)) {
1030 				WARN_ON_ONCE(1);
1031 				wake_up(&trc_wait);
1032 			}
1033 		}
1034 	}
1035 }
1036 
1037 /* Initialize for a new RCU-tasks-trace grace period. */
1038 static void rcu_tasks_trace_pregp_step(void)
1039 {
1040 	int cpu;
1041 
1042 	// Allow for fast-acting IPIs.
1043 	atomic_set(&trc_n_readers_need_end, 1);
1044 
1045 	// There shouldn't be any old IPIs, but...
1046 	for_each_possible_cpu(cpu)
1047 		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1048 
1049 	// Disable CPU hotplug across the tasklist scan.
1050 	// This also waits for all readers in CPU-hotplug code paths.
1051 	cpus_read_lock();
1052 }
1053 
1054 /* Do first-round processing for the specified task. */
1055 static void rcu_tasks_trace_pertask(struct task_struct *t,
1056 				    struct list_head *hop)
1057 {
1058 	// During early boot when there is only the one boot CPU, there
1059 	// is no idle task for the other CPUs. Just return.
1060 	if (unlikely(t == NULL))
1061 		return;
1062 
1063 	WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1064 	WRITE_ONCE(t->trc_reader_checked, false);
1065 	t->trc_ipi_to_cpu = -1;
1066 	trc_wait_for_one_reader(t, hop);
1067 }
1068 
1069 /*
1070  * Do intermediate processing between task and holdout scans and
1071  * pick up the idle tasks.
1072  */
1073 static void rcu_tasks_trace_postscan(struct list_head *hop)
1074 {
1075 	int cpu;
1076 
1077 	for_each_possible_cpu(cpu)
1078 		rcu_tasks_trace_pertask(idle_task(cpu), hop);
1079 
1080 	// Re-enable CPU hotplug now that the tasklist scan has completed.
1081 	cpus_read_unlock();
1082 
1083 	// Wait for late-stage exiting tasks to finish exiting.
1084 	// These might have passed the call to exit_tasks_rcu_finish().
1085 	synchronize_rcu();
1086 	// Any tasks that exit after this point will set ->trc_reader_checked.
1087 }
1088 
1089 /* Show the state of a task stalling the current RCU tasks trace GP. */
1090 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1091 {
1092 	int cpu;
1093 
1094 	if (*firstreport) {
1095 		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1096 		*firstreport = false;
1097 	}
1098 	// FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1099 	cpu = task_cpu(t);
1100 	pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1101 		 t->pid,
1102 		 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1103 		 ".i"[is_idle_task(t)],
1104 		 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1105 		 READ_ONCE(t->trc_reader_nesting),
1106 		 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
1107 		 cpu);
1108 	sched_show_task(t);
1109 }
1110 
1111 /* List stalled IPIs for RCU tasks trace. */
1112 static void show_stalled_ipi_trace(void)
1113 {
1114 	int cpu;
1115 
1116 	for_each_possible_cpu(cpu)
1117 		if (per_cpu(trc_ipi_to_cpu, cpu))
1118 			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1119 }
1120 
1121 /* Do one scan of the holdout list. */
1122 static void check_all_holdout_tasks_trace(struct list_head *hop,
1123 					  bool needreport, bool *firstreport)
1124 {
1125 	struct task_struct *g, *t;
1126 
1127 	// Disable CPU hotplug across the holdout list scan.
1128 	cpus_read_lock();
1129 
1130 	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1131 		// If safe and needed, try to check the current task.
1132 		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1133 		    !READ_ONCE(t->trc_reader_checked))
1134 			trc_wait_for_one_reader(t, hop);
1135 
1136 		// If check succeeded, remove this task from the list.
1137 		if (READ_ONCE(t->trc_reader_checked))
1138 			trc_del_holdout(t);
1139 		else if (needreport)
1140 			show_stalled_task_trace(t, firstreport);
1141 	}
1142 
1143 	// Re-enable CPU hotplug now that the holdout list scan has completed.
1144 	cpus_read_unlock();
1145 
1146 	if (needreport) {
1147 		if (firstreport)
1148 			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1149 		show_stalled_ipi_trace();
1150 	}
1151 }
1152 
1153 /* Wait for grace period to complete and provide ordering. */
1154 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1155 {
1156 	bool firstreport;
1157 	struct task_struct *g, *t;
1158 	LIST_HEAD(holdouts);
1159 	long ret;
1160 
1161 	// Remove the safety count.
1162 	smp_mb__before_atomic();  // Order vs. earlier atomics
1163 	atomic_dec(&trc_n_readers_need_end);
1164 	smp_mb__after_atomic();  // Order vs. later atomics
1165 
1166 	// Wait for readers.
1167 	set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1168 	for (;;) {
1169 		ret = wait_event_idle_exclusive_timeout(
1170 				trc_wait,
1171 				atomic_read(&trc_n_readers_need_end) == 0,
1172 				READ_ONCE(rcu_task_stall_timeout));
1173 		if (ret)
1174 			break;  // Count reached zero.
1175 		// Stall warning time, so make a list of the offenders.
1176 		rcu_read_lock();
1177 		for_each_process_thread(g, t)
1178 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1179 				trc_add_holdout(t, &holdouts);
1180 		rcu_read_unlock();
1181 		firstreport = true;
1182 		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1183 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1184 				show_stalled_task_trace(t, &firstreport);
1185 			trc_del_holdout(t); // Release task_struct reference.
1186 		}
1187 		if (firstreport)
1188 			pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1189 		show_stalled_ipi_trace();
1190 		pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1191 	}
1192 	smp_mb(); // Caller's code must be ordered after wakeup.
1193 		  // Pairs with pretty much every ordering primitive.
1194 }
1195 
1196 /* Report any needed quiescent state for this exiting task. */
1197 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1198 {
1199 	WRITE_ONCE(t->trc_reader_checked, true);
1200 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1201 	WRITE_ONCE(t->trc_reader_nesting, 0);
1202 	if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1203 		rcu_read_unlock_trace_special(t, 0);
1204 }
1205 
1206 /**
1207  * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1208  * @rhp: structure to be used for queueing the RCU updates.
1209  * @func: actual callback function to be invoked after the grace period
1210  *
1211  * The callback function will be invoked some time after a full grace
1212  * period elapses, in other words after all currently executing RCU
1213  * read-side critical sections have completed. call_rcu_tasks_trace()
1214  * assumes that the read-side critical sections end at context switch,
1215  * cond_resched_rcu_qs(), or transition to usermode execution.  As such,
1216  * there are no read-side primitives analogous to rcu_read_lock() and
1217  * rcu_read_unlock() because this primitive is intended to determine
1218  * that all tasks have passed through a safe state, not so much for
1219  * data-structure synchronization.
1220  *
1221  * See the description of call_rcu() for more detailed information on
1222  * memory ordering guarantees.
1223  */
1224 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1225 {
1226 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1227 }
1228 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1229 
1230 /**
1231  * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1232  *
1233  * Control will return to the caller some time after a trace rcu-tasks
1234  * grace period has elapsed, in other words after all currently executing
1235  * rcu-tasks read-side critical sections have elapsed.  These read-side
1236  * critical sections are delimited by calls to rcu_read_lock_trace()
1237  * and rcu_read_unlock_trace().
1238  *
1239  * This is a very specialized primitive, intended only for a few uses in
1240  * tracing and other situations requiring manipulation of function preambles
1241  * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1242  * (yet) intended for heavy use from multiple CPUs.
1243  *
1244  * See the description of synchronize_rcu() for more detailed information
1245  * on memory ordering guarantees.
1246  */
1247 void synchronize_rcu_tasks_trace(void)
1248 {
1249 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1250 	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1251 }
1252 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1253 
1254 /**
1255  * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1256  *
1257  * Although the current implementation is guaranteed to wait, it is not
1258  * obligated to, for example, if there are no pending callbacks.
1259  */
1260 void rcu_barrier_tasks_trace(void)
1261 {
1262 	/* There is only one callback queue, so this is easy.  ;-) */
1263 	synchronize_rcu_tasks_trace();
1264 }
1265 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1266 
1267 static int __init rcu_spawn_tasks_trace_kthread(void)
1268 {
1269 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1270 		rcu_tasks_trace.gp_sleep = HZ / 10;
1271 		rcu_tasks_trace.init_fract = HZ / 10;
1272 	} else {
1273 		rcu_tasks_trace.gp_sleep = HZ / 200;
1274 		if (rcu_tasks_trace.gp_sleep <= 0)
1275 			rcu_tasks_trace.gp_sleep = 1;
1276 		rcu_tasks_trace.init_fract = HZ / 200;
1277 		if (rcu_tasks_trace.init_fract <= 0)
1278 			rcu_tasks_trace.init_fract = 1;
1279 	}
1280 	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1281 	rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1282 	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1283 	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1284 	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1285 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1286 	return 0;
1287 }
1288 
1289 #if !defined(CONFIG_TINY_RCU)
1290 void show_rcu_tasks_trace_gp_kthread(void)
1291 {
1292 	char buf[64];
1293 
1294 	sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1295 		data_race(n_heavy_reader_ofl_updates),
1296 		data_race(n_heavy_reader_updates),
1297 		data_race(n_heavy_reader_attempts));
1298 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1299 }
1300 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1301 #endif // !defined(CONFIG_TINY_RCU)
1302 
1303 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1304 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1305 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1306 
1307 #ifndef CONFIG_TINY_RCU
1308 void show_rcu_tasks_gp_kthreads(void)
1309 {
1310 	show_rcu_tasks_classic_gp_kthread();
1311 	show_rcu_tasks_rude_gp_kthread();
1312 	show_rcu_tasks_trace_gp_kthread();
1313 }
1314 #endif /* #ifndef CONFIG_TINY_RCU */
1315 
1316 #ifdef CONFIG_PROVE_RCU
1317 struct rcu_tasks_test_desc {
1318 	struct rcu_head rh;
1319 	const char *name;
1320 	bool notrun;
1321 };
1322 
1323 static struct rcu_tasks_test_desc tests[] = {
1324 	{
1325 		.name = "call_rcu_tasks()",
1326 		/* If not defined, the test is skipped. */
1327 		.notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1328 	},
1329 	{
1330 		.name = "call_rcu_tasks_rude()",
1331 		/* If not defined, the test is skipped. */
1332 		.notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1333 	},
1334 	{
1335 		.name = "call_rcu_tasks_trace()",
1336 		/* If not defined, the test is skipped. */
1337 		.notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1338 	}
1339 };
1340 
1341 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1342 {
1343 	struct rcu_tasks_test_desc *rttd =
1344 		container_of(rhp, struct rcu_tasks_test_desc, rh);
1345 
1346 	pr_info("Callback from %s invoked.\n", rttd->name);
1347 
1348 	rttd->notrun = true;
1349 }
1350 
1351 static void rcu_tasks_initiate_self_tests(void)
1352 {
1353 	pr_info("Running RCU-tasks wait API self tests\n");
1354 #ifdef CONFIG_TASKS_RCU
1355 	synchronize_rcu_tasks();
1356 	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1357 #endif
1358 
1359 #ifdef CONFIG_TASKS_RUDE_RCU
1360 	synchronize_rcu_tasks_rude();
1361 	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1362 #endif
1363 
1364 #ifdef CONFIG_TASKS_TRACE_RCU
1365 	synchronize_rcu_tasks_trace();
1366 	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1367 #endif
1368 }
1369 
1370 static int rcu_tasks_verify_self_tests(void)
1371 {
1372 	int ret = 0;
1373 	int i;
1374 
1375 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
1376 		if (!tests[i].notrun) {		// still hanging.
1377 			pr_err("%s has been failed.\n", tests[i].name);
1378 			ret = -1;
1379 		}
1380 	}
1381 
1382 	if (ret)
1383 		WARN_ON(1);
1384 
1385 	return ret;
1386 }
1387 late_initcall(rcu_tasks_verify_self_tests);
1388 #else /* #ifdef CONFIG_PROVE_RCU */
1389 static void rcu_tasks_initiate_self_tests(void) { }
1390 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1391 
1392 void __init rcu_init_tasks_generic(void)
1393 {
1394 #ifdef CONFIG_TASKS_RCU
1395 	rcu_spawn_tasks_kthread();
1396 #endif
1397 
1398 #ifdef CONFIG_TASKS_RUDE_RCU
1399 	rcu_spawn_tasks_rude_kthread();
1400 #endif
1401 
1402 #ifdef CONFIG_TASKS_TRACE_RCU
1403 	rcu_spawn_tasks_trace_kthread();
1404 #endif
1405 
1406 	// Run the self-tests.
1407 	rcu_tasks_initiate_self_tests();
1408 }
1409 
1410 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1411 static inline void rcu_tasks_bootup_oddness(void) {}
1412 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1413