xref: /openbmc/linux/kernel/rcu/tasks.h (revision 5f8b7d4b2e9604d03ae06f1a2dd5a1f34c33e533)
1  /* SPDX-License-Identifier: GPL-2.0+ */
2  /*
3   * Task-based RCU implementations.
4   *
5   * Copyright (C) 2020 Paul E. McKenney
6   */
7  
8  #ifdef CONFIG_TASKS_RCU_GENERIC
9  #include "rcu_segcblist.h"
10  
11  ////////////////////////////////////////////////////////////////////////
12  //
13  // Generic data structures.
14  
15  struct rcu_tasks;
16  typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17  typedef void (*pregp_func_t)(struct list_head *hop);
18  typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19  typedef void (*postscan_func_t)(struct list_head *hop);
20  typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21  typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22  
23  /**
24   * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25   * @cblist: Callback list.
26   * @lock: Lock protecting per-CPU callback list.
27   * @rtp_jiffies: Jiffies counter value for statistics.
28   * @lazy_timer: Timer to unlazify callbacks.
29   * @urgent_gp: Number of additional non-lazy grace periods.
30   * @rtp_n_lock_retries: Rough lock-contention statistic.
31   * @rtp_work: Work queue for invoking callbacks.
32   * @rtp_irq_work: IRQ work queue for deferred wakeups.
33   * @barrier_q_head: RCU callback for barrier operation.
34   * @rtp_blkd_tasks: List of tasks blocked as readers.
35   * @rtp_exit_list: List of tasks in the latter portion of do_exit().
36   * @cpu: CPU number corresponding to this entry.
37   * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
38   * @rtpp: Pointer to the rcu_tasks structure.
39   */
40  struct rcu_tasks_percpu {
41  	struct rcu_segcblist cblist;
42  	raw_spinlock_t __private lock;
43  	unsigned long rtp_jiffies;
44  	unsigned long rtp_n_lock_retries;
45  	struct timer_list lazy_timer;
46  	unsigned int urgent_gp;
47  	struct work_struct rtp_work;
48  	struct irq_work rtp_irq_work;
49  	struct rcu_head barrier_q_head;
50  	struct list_head rtp_blkd_tasks;
51  	struct list_head rtp_exit_list;
52  	int cpu;
53  	int index;
54  	struct rcu_tasks *rtpp;
55  };
56  
57  /**
58   * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
59   * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
60   * @cbs_gbl_lock: Lock protecting callback list.
61   * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
62   * @gp_func: This flavor's grace-period-wait function.
63   * @gp_state: Grace period's most recent state transition (debugging).
64   * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
65   * @init_fract: Initial backoff sleep interval.
66   * @gp_jiffies: Time of last @gp_state transition.
67   * @gp_start: Most recent grace-period start in jiffies.
68   * @tasks_gp_seq: Number of grace periods completed since boot.
69   * @n_ipis: Number of IPIs sent to encourage grace periods to end.
70   * @n_ipis_fails: Number of IPI-send failures.
71   * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
72   * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
73   * @pregp_func: This flavor's pre-grace-period function (optional).
74   * @pertask_func: This flavor's per-task scan function (optional).
75   * @postscan_func: This flavor's post-task scan function (optional).
76   * @holdouts_func: This flavor's holdout-list scan function (optional).
77   * @postgp_func: This flavor's post-grace-period function (optional).
78   * @call_func: This flavor's call_rcu()-equivalent function.
79   * @rtpcpu: This flavor's rcu_tasks_percpu structure.
80   * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
81   * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
82   * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
83   * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
84   * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
85   * @barrier_q_mutex: Serialize barrier operations.
86   * @barrier_q_count: Number of queues being waited on.
87   * @barrier_q_completion: Barrier wait/wakeup mechanism.
88   * @barrier_q_seq: Sequence number for barrier operations.
89   * @name: This flavor's textual name.
90   * @kname: This flavor's kthread name.
91   */
92  struct rcu_tasks {
93  	struct rcuwait cbs_wait;
94  	raw_spinlock_t cbs_gbl_lock;
95  	struct mutex tasks_gp_mutex;
96  	int gp_state;
97  	int gp_sleep;
98  	int init_fract;
99  	unsigned long gp_jiffies;
100  	unsigned long gp_start;
101  	unsigned long tasks_gp_seq;
102  	unsigned long n_ipis;
103  	unsigned long n_ipis_fails;
104  	struct task_struct *kthread_ptr;
105  	unsigned long lazy_jiffies;
106  	rcu_tasks_gp_func_t gp_func;
107  	pregp_func_t pregp_func;
108  	pertask_func_t pertask_func;
109  	postscan_func_t postscan_func;
110  	holdouts_func_t holdouts_func;
111  	postgp_func_t postgp_func;
112  	call_rcu_func_t call_func;
113  	struct rcu_tasks_percpu __percpu *rtpcpu;
114  	struct rcu_tasks_percpu **rtpcp_array;
115  	int percpu_enqueue_shift;
116  	int percpu_enqueue_lim;
117  	int percpu_dequeue_lim;
118  	unsigned long percpu_dequeue_gpseq;
119  	struct mutex barrier_q_mutex;
120  	atomic_t barrier_q_count;
121  	struct completion barrier_q_completion;
122  	unsigned long barrier_q_seq;
123  	char *name;
124  	char *kname;
125  };
126  
127  static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
128  
129  #define DEFINE_RCU_TASKS(rt_name, gp, call, n)						\
130  static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {			\
131  	.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),		\
132  	.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),			\
133  };											\
134  static struct rcu_tasks rt_name =							\
135  {											\
136  	.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait),				\
137  	.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock),			\
138  	.tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex),			\
139  	.gp_func = gp,									\
140  	.call_func = call,								\
141  	.rtpcpu = &rt_name ## __percpu,							\
142  	.lazy_jiffies = DIV_ROUND_UP(HZ, 4),						\
143  	.name = n,									\
144  	.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS),				\
145  	.percpu_enqueue_lim = 1,							\
146  	.percpu_dequeue_lim = 1,							\
147  	.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),		\
148  	.barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT,				\
149  	.kname = #rt_name,								\
150  }
151  
152  #ifdef CONFIG_TASKS_RCU
153  /* Track exiting tasks in order to allow them to be waited for. */
154  DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
155  
156  /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
157  static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
158  static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
159  #endif
160  
161  /* Avoid IPIing CPUs early in the grace period. */
162  #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
163  static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
164  module_param(rcu_task_ipi_delay, int, 0644);
165  
166  /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
167  #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
168  #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
169  static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
170  module_param(rcu_task_stall_timeout, int, 0644);
171  #define RCU_TASK_STALL_INFO (HZ * 10)
172  static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
173  module_param(rcu_task_stall_info, int, 0644);
174  static int rcu_task_stall_info_mult __read_mostly = 3;
175  module_param(rcu_task_stall_info_mult, int, 0444);
176  
177  static int rcu_task_enqueue_lim __read_mostly = -1;
178  module_param(rcu_task_enqueue_lim, int, 0444);
179  
180  static bool rcu_task_cb_adjust;
181  static int rcu_task_contend_lim __read_mostly = 100;
182  module_param(rcu_task_contend_lim, int, 0444);
183  static int rcu_task_collapse_lim __read_mostly = 10;
184  module_param(rcu_task_collapse_lim, int, 0444);
185  static int rcu_task_lazy_lim __read_mostly = 32;
186  module_param(rcu_task_lazy_lim, int, 0444);
187  
188  static int rcu_task_cpu_ids;
189  
190  /* RCU tasks grace-period state for debugging. */
191  #define RTGS_INIT		 0
192  #define RTGS_WAIT_WAIT_CBS	 1
193  #define RTGS_WAIT_GP		 2
194  #define RTGS_PRE_WAIT_GP	 3
195  #define RTGS_SCAN_TASKLIST	 4
196  #define RTGS_POST_SCAN_TASKLIST	 5
197  #define RTGS_WAIT_SCAN_HOLDOUTS	 6
198  #define RTGS_SCAN_HOLDOUTS	 7
199  #define RTGS_POST_GP		 8
200  #define RTGS_WAIT_READERS	 9
201  #define RTGS_INVOKE_CBS		10
202  #define RTGS_WAIT_CBS		11
203  #ifndef CONFIG_TINY_RCU
204  static const char * const rcu_tasks_gp_state_names[] = {
205  	"RTGS_INIT",
206  	"RTGS_WAIT_WAIT_CBS",
207  	"RTGS_WAIT_GP",
208  	"RTGS_PRE_WAIT_GP",
209  	"RTGS_SCAN_TASKLIST",
210  	"RTGS_POST_SCAN_TASKLIST",
211  	"RTGS_WAIT_SCAN_HOLDOUTS",
212  	"RTGS_SCAN_HOLDOUTS",
213  	"RTGS_POST_GP",
214  	"RTGS_WAIT_READERS",
215  	"RTGS_INVOKE_CBS",
216  	"RTGS_WAIT_CBS",
217  };
218  #endif /* #ifndef CONFIG_TINY_RCU */
219  
220  ////////////////////////////////////////////////////////////////////////
221  //
222  // Generic code.
223  
224  static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
225  
226  /* Record grace-period phase and time. */
set_tasks_gp_state(struct rcu_tasks * rtp,int newstate)227  static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
228  {
229  	rtp->gp_state = newstate;
230  	rtp->gp_jiffies = jiffies;
231  }
232  
233  #ifndef CONFIG_TINY_RCU
234  /* Return state name. */
tasks_gp_state_getname(struct rcu_tasks * rtp)235  static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
236  {
237  	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
238  	int j = READ_ONCE(i); // Prevent the compiler from reading twice
239  
240  	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
241  		return "???";
242  	return rcu_tasks_gp_state_names[j];
243  }
244  #endif /* #ifndef CONFIG_TINY_RCU */
245  
246  // Initialize per-CPU callback lists for the specified flavor of
247  // Tasks RCU.  Do not enqueue callbacks before this function is invoked.
cblist_init_generic(struct rcu_tasks * rtp)248  static void cblist_init_generic(struct rcu_tasks *rtp)
249  {
250  	int cpu;
251  	unsigned long flags;
252  	int lim;
253  	int shift;
254  	int maxcpu;
255  	int index = 0;
256  
257  	if (rcu_task_enqueue_lim < 0) {
258  		rcu_task_enqueue_lim = 1;
259  		rcu_task_cb_adjust = true;
260  	} else if (rcu_task_enqueue_lim == 0) {
261  		rcu_task_enqueue_lim = 1;
262  	}
263  	lim = rcu_task_enqueue_lim;
264  
265  	rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
266  	BUG_ON(!rtp->rtpcp_array);
267  
268  	for_each_possible_cpu(cpu) {
269  		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
270  
271  		WARN_ON_ONCE(!rtpcp);
272  		if (cpu)
273  			raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
274  		local_irq_save(flags);  // serialize initialization
275  		if (rcu_segcblist_empty(&rtpcp->cblist))
276  			rcu_segcblist_init(&rtpcp->cblist);
277  		local_irq_restore(flags);
278  		INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
279  		rtpcp->cpu = cpu;
280  		rtpcp->rtpp = rtp;
281  		rtpcp->index = index;
282  		rtp->rtpcp_array[index] = rtpcp;
283  		index++;
284  		if (!rtpcp->rtp_blkd_tasks.next)
285  			INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
286  		if (!rtpcp->rtp_exit_list.next)
287  			INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
288  		maxcpu = cpu;
289  	}
290  
291  	rcu_task_cpu_ids = maxcpu + 1;
292  	if (lim > rcu_task_cpu_ids)
293  		lim = rcu_task_cpu_ids;
294  	shift = ilog2(rcu_task_cpu_ids / lim);
295  	if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
296  		shift++;
297  	WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
298  	WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
299  	smp_store_release(&rtp->percpu_enqueue_lim, lim);
300  
301  	pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
302  			rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
303  			rcu_task_cb_adjust, rcu_task_cpu_ids);
304  }
305  
306  // Compute wakeup time for lazy callback timer.
rcu_tasks_lazy_time(struct rcu_tasks * rtp)307  static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
308  {
309  	return jiffies + rtp->lazy_jiffies;
310  }
311  
312  // Timer handler that unlazifies lazy callbacks.
call_rcu_tasks_generic_timer(struct timer_list * tlp)313  static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
314  {
315  	unsigned long flags;
316  	bool needwake = false;
317  	struct rcu_tasks *rtp;
318  	struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
319  
320  	rtp = rtpcp->rtpp;
321  	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
322  	if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
323  		if (!rtpcp->urgent_gp)
324  			rtpcp->urgent_gp = 1;
325  		needwake = true;
326  		mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
327  	}
328  	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
329  	if (needwake)
330  		rcuwait_wake_up(&rtp->cbs_wait);
331  }
332  
333  // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
call_rcu_tasks_iw_wakeup(struct irq_work * iwp)334  static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
335  {
336  	struct rcu_tasks *rtp;
337  	struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
338  
339  	rtp = rtpcp->rtpp;
340  	rcuwait_wake_up(&rtp->cbs_wait);
341  }
342  
343  // Enqueue a callback for the specified flavor of Tasks RCU.
call_rcu_tasks_generic(struct rcu_head * rhp,rcu_callback_t func,struct rcu_tasks * rtp)344  static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
345  				   struct rcu_tasks *rtp)
346  {
347  	int chosen_cpu;
348  	unsigned long flags;
349  	bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
350  	int ideal_cpu;
351  	unsigned long j;
352  	bool needadjust = false;
353  	bool needwake;
354  	struct rcu_tasks_percpu *rtpcp;
355  
356  	rhp->next = NULL;
357  	rhp->func = func;
358  	local_irq_save(flags);
359  	rcu_read_lock();
360  	ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
361  	chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
362  	rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
363  	if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
364  		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
365  		j = jiffies;
366  		if (rtpcp->rtp_jiffies != j) {
367  			rtpcp->rtp_jiffies = j;
368  			rtpcp->rtp_n_lock_retries = 0;
369  		}
370  		if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
371  		    READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
372  			needadjust = true;  // Defer adjustment to avoid deadlock.
373  	}
374  	// Queuing callbacks before initialization not yet supported.
375  	if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
376  		rcu_segcblist_init(&rtpcp->cblist);
377  	needwake = (func == wakeme_after_rcu) ||
378  		   (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
379  	if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
380  		if (rtp->lazy_jiffies)
381  			mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
382  		else
383  			needwake = rcu_segcblist_empty(&rtpcp->cblist);
384  	}
385  	if (needwake)
386  		rtpcp->urgent_gp = 3;
387  	rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
388  	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
389  	if (unlikely(needadjust)) {
390  		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
391  		if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
392  			WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
393  			WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
394  			smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
395  			pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
396  		}
397  		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
398  	}
399  	rcu_read_unlock();
400  	/* We can't create the thread unless interrupts are enabled. */
401  	if (needwake && READ_ONCE(rtp->kthread_ptr))
402  		irq_work_queue(&rtpcp->rtp_irq_work);
403  }
404  
405  // RCU callback function for rcu_barrier_tasks_generic().
rcu_barrier_tasks_generic_cb(struct rcu_head * rhp)406  static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
407  {
408  	struct rcu_tasks *rtp;
409  	struct rcu_tasks_percpu *rtpcp;
410  
411  	rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
412  	rtp = rtpcp->rtpp;
413  	if (atomic_dec_and_test(&rtp->barrier_q_count))
414  		complete(&rtp->barrier_q_completion);
415  }
416  
417  // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
418  // Operates in a manner similar to rcu_barrier().
rcu_barrier_tasks_generic(struct rcu_tasks * rtp)419  static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
420  {
421  	int cpu;
422  	unsigned long flags;
423  	struct rcu_tasks_percpu *rtpcp;
424  	unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
425  
426  	mutex_lock(&rtp->barrier_q_mutex);
427  	if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
428  		smp_mb();
429  		mutex_unlock(&rtp->barrier_q_mutex);
430  		return;
431  	}
432  	rcu_seq_start(&rtp->barrier_q_seq);
433  	init_completion(&rtp->barrier_q_completion);
434  	atomic_set(&rtp->barrier_q_count, 2);
435  	for_each_possible_cpu(cpu) {
436  		if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
437  			break;
438  		rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
439  		rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
440  		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
441  		if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
442  			atomic_inc(&rtp->barrier_q_count);
443  		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
444  	}
445  	if (atomic_sub_and_test(2, &rtp->barrier_q_count))
446  		complete(&rtp->barrier_q_completion);
447  	wait_for_completion(&rtp->barrier_q_completion);
448  	rcu_seq_end(&rtp->barrier_q_seq);
449  	mutex_unlock(&rtp->barrier_q_mutex);
450  }
451  
452  // Advance callbacks and indicate whether either a grace period or
453  // callback invocation is needed.
rcu_tasks_need_gpcb(struct rcu_tasks * rtp)454  static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
455  {
456  	int cpu;
457  	int dequeue_limit;
458  	unsigned long flags;
459  	bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
460  	long n;
461  	long ncbs = 0;
462  	long ncbsnz = 0;
463  	int needgpcb = 0;
464  
465  	dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
466  	for (cpu = 0; cpu < dequeue_limit; cpu++) {
467  		if (!cpu_possible(cpu))
468  			continue;
469  		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
470  
471  		/* Advance and accelerate any new callbacks. */
472  		if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
473  			continue;
474  		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
475  		// Should we shrink down to a single callback queue?
476  		n = rcu_segcblist_n_cbs(&rtpcp->cblist);
477  		if (n) {
478  			ncbs += n;
479  			if (cpu > 0)
480  				ncbsnz += n;
481  		}
482  		rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
483  		(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
484  		if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
485  			if (rtp->lazy_jiffies)
486  				rtpcp->urgent_gp--;
487  			needgpcb |= 0x3;
488  		} else if (rcu_segcblist_empty(&rtpcp->cblist)) {
489  			rtpcp->urgent_gp = 0;
490  		}
491  		if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
492  			needgpcb |= 0x1;
493  		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
494  	}
495  
496  	// Shrink down to a single callback queue if appropriate.
497  	// This is done in two stages: (1) If there are no more than
498  	// rcu_task_collapse_lim callbacks on CPU 0 and none on any other
499  	// CPU, limit enqueueing to CPU 0.  (2) After an RCU grace period,
500  	// if there has not been an increase in callbacks, limit dequeuing
501  	// to CPU 0.  Note the matching RCU read-side critical section in
502  	// call_rcu_tasks_generic().
503  	if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
504  		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
505  		if (rtp->percpu_enqueue_lim > 1) {
506  			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
507  			smp_store_release(&rtp->percpu_enqueue_lim, 1);
508  			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
509  			gpdone = false;
510  			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
511  		}
512  		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
513  	}
514  	if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
515  		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
516  		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
517  			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
518  			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
519  		}
520  		if (rtp->percpu_dequeue_lim == 1) {
521  			for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
522  				if (!cpu_possible(cpu))
523  					continue;
524  				struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
525  
526  				WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
527  			}
528  		}
529  		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
530  	}
531  
532  	return needgpcb;
533  }
534  
535  // Advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs(struct rcu_tasks * rtp,struct rcu_tasks_percpu * rtpcp)536  static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
537  {
538  	int cpuwq;
539  	unsigned long flags;
540  	int len;
541  	int index;
542  	struct rcu_head *rhp;
543  	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
544  	struct rcu_tasks_percpu *rtpcp_next;
545  
546  	index = rtpcp->index * 2 + 1;
547  	if (index < num_possible_cpus()) {
548  		rtpcp_next = rtp->rtpcp_array[index];
549  		if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
550  			cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
551  			queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
552  			index++;
553  			if (index < num_possible_cpus()) {
554  				rtpcp_next = rtp->rtpcp_array[index];
555  				if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
556  					cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
557  					queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
558  				}
559  			}
560  		}
561  	}
562  
563  	if (rcu_segcblist_empty(&rtpcp->cblist))
564  		return;
565  	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
566  	rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
567  	rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
568  	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
569  	len = rcl.len;
570  	for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
571  		debug_rcu_head_callback(rhp);
572  		local_bh_disable();
573  		rhp->func(rhp);
574  		local_bh_enable();
575  		cond_resched();
576  	}
577  	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
578  	rcu_segcblist_add_len(&rtpcp->cblist, -len);
579  	(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
580  	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
581  }
582  
583  // Workqueue flood to advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs_wq(struct work_struct * wp)584  static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
585  {
586  	struct rcu_tasks *rtp;
587  	struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
588  
589  	rtp = rtpcp->rtpp;
590  	rcu_tasks_invoke_cbs(rtp, rtpcp);
591  }
592  
593  // Wait for one grace period.
rcu_tasks_one_gp(struct rcu_tasks * rtp,bool midboot)594  static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
595  {
596  	int needgpcb;
597  
598  	mutex_lock(&rtp->tasks_gp_mutex);
599  
600  	// If there were none, wait a bit and start over.
601  	if (unlikely(midboot)) {
602  		needgpcb = 0x2;
603  	} else {
604  		mutex_unlock(&rtp->tasks_gp_mutex);
605  		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
606  		rcuwait_wait_event(&rtp->cbs_wait,
607  				   (needgpcb = rcu_tasks_need_gpcb(rtp)),
608  				   TASK_IDLE);
609  		mutex_lock(&rtp->tasks_gp_mutex);
610  	}
611  
612  	if (needgpcb & 0x2) {
613  		// Wait for one grace period.
614  		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
615  		rtp->gp_start = jiffies;
616  		rcu_seq_start(&rtp->tasks_gp_seq);
617  		rtp->gp_func(rtp);
618  		rcu_seq_end(&rtp->tasks_gp_seq);
619  	}
620  
621  	// Invoke callbacks.
622  	set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
623  	rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
624  	mutex_unlock(&rtp->tasks_gp_mutex);
625  }
626  
627  // RCU-tasks kthread that detects grace periods and invokes callbacks.
rcu_tasks_kthread(void * arg)628  static int __noreturn rcu_tasks_kthread(void *arg)
629  {
630  	int cpu;
631  	struct rcu_tasks *rtp = arg;
632  
633  	for_each_possible_cpu(cpu) {
634  		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
635  
636  		timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
637  		rtpcp->urgent_gp = 1;
638  	}
639  
640  	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
641  	housekeeping_affine(current, HK_TYPE_RCU);
642  	smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
643  
644  	/*
645  	 * Each pass through the following loop makes one check for
646  	 * newly arrived callbacks, and, if there are some, waits for
647  	 * one RCU-tasks grace period and then invokes the callbacks.
648  	 * This loop is terminated by the system going down.  ;-)
649  	 */
650  	for (;;) {
651  		// Wait for one grace period and invoke any callbacks
652  		// that are ready.
653  		rcu_tasks_one_gp(rtp, false);
654  
655  		// Paranoid sleep to keep this from entering a tight loop.
656  		schedule_timeout_idle(rtp->gp_sleep);
657  	}
658  }
659  
660  // Wait for a grace period for the specified flavor of Tasks RCU.
synchronize_rcu_tasks_generic(struct rcu_tasks * rtp)661  static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
662  {
663  	/* Complain if the scheduler has not started.  */
664  	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
665  			 "synchronize_%s() called too soon", rtp->name))
666  		return;
667  
668  	// If the grace-period kthread is running, use it.
669  	if (READ_ONCE(rtp->kthread_ptr)) {
670  		wait_rcu_gp(rtp->call_func);
671  		return;
672  	}
673  	rcu_tasks_one_gp(rtp, true);
674  }
675  
676  /* Spawn RCU-tasks grace-period kthread. */
rcu_spawn_tasks_kthread_generic(struct rcu_tasks * rtp)677  static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
678  {
679  	struct task_struct *t;
680  
681  	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
682  	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
683  		return;
684  	smp_mb(); /* Ensure others see full kthread. */
685  }
686  
687  #ifndef CONFIG_TINY_RCU
688  
689  /*
690   * Print any non-default Tasks RCU settings.
691   */
rcu_tasks_bootup_oddness(void)692  static void __init rcu_tasks_bootup_oddness(void)
693  {
694  #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
695  	int rtsimc;
696  
697  	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
698  		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
699  	rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
700  	if (rtsimc != rcu_task_stall_info_mult) {
701  		pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
702  		rcu_task_stall_info_mult = rtsimc;
703  	}
704  #endif /* #ifdef CONFIG_TASKS_RCU */
705  #ifdef CONFIG_TASKS_RCU
706  	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
707  #endif /* #ifdef CONFIG_TASKS_RCU */
708  #ifdef CONFIG_TASKS_RUDE_RCU
709  	pr_info("\tRude variant of Tasks RCU enabled.\n");
710  #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
711  #ifdef CONFIG_TASKS_TRACE_RCU
712  	pr_info("\tTracing variant of Tasks RCU enabled.\n");
713  #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
714  }
715  
716  #endif /* #ifndef CONFIG_TINY_RCU */
717  
718  #ifndef CONFIG_TINY_RCU
719  /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
show_rcu_tasks_generic_gp_kthread(struct rcu_tasks * rtp,char * s)720  static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
721  {
722  	int cpu;
723  	bool havecbs = false;
724  	bool haveurgent = false;
725  	bool haveurgentcbs = false;
726  
727  	for_each_possible_cpu(cpu) {
728  		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
729  
730  		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
731  			havecbs = true;
732  		if (data_race(rtpcp->urgent_gp))
733  			haveurgent = true;
734  		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
735  			haveurgentcbs = true;
736  		if (havecbs && haveurgent && haveurgentcbs)
737  			break;
738  	}
739  	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
740  		rtp->kname,
741  		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
742  		jiffies - data_race(rtp->gp_jiffies),
743  		data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
744  		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
745  		".k"[!!data_race(rtp->kthread_ptr)],
746  		".C"[havecbs],
747  		".u"[haveurgent],
748  		".U"[haveurgentcbs],
749  		rtp->lazy_jiffies,
750  		s);
751  }
752  #endif // #ifndef CONFIG_TINY_RCU
753  
754  static void exit_tasks_rcu_finish_trace(struct task_struct *t);
755  
756  #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
757  
758  ////////////////////////////////////////////////////////////////////////
759  //
760  // Shared code between task-list-scanning variants of Tasks RCU.
761  
762  /* Wait for one RCU-tasks grace period. */
rcu_tasks_wait_gp(struct rcu_tasks * rtp)763  static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
764  {
765  	struct task_struct *g;
766  	int fract;
767  	LIST_HEAD(holdouts);
768  	unsigned long j;
769  	unsigned long lastinfo;
770  	unsigned long lastreport;
771  	bool reported = false;
772  	int rtsi;
773  	struct task_struct *t;
774  
775  	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
776  	rtp->pregp_func(&holdouts);
777  
778  	/*
779  	 * There were callbacks, so we need to wait for an RCU-tasks
780  	 * grace period.  Start off by scanning the task list for tasks
781  	 * that are not already voluntarily blocked.  Mark these tasks
782  	 * and make a list of them in holdouts.
783  	 */
784  	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
785  	if (rtp->pertask_func) {
786  		rcu_read_lock();
787  		for_each_process_thread(g, t)
788  			rtp->pertask_func(t, &holdouts);
789  		rcu_read_unlock();
790  	}
791  
792  	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
793  	rtp->postscan_func(&holdouts);
794  
795  	/*
796  	 * Each pass through the following loop scans the list of holdout
797  	 * tasks, removing any that are no longer holdouts.  When the list
798  	 * is empty, we are done.
799  	 */
800  	lastreport = jiffies;
801  	lastinfo = lastreport;
802  	rtsi = READ_ONCE(rcu_task_stall_info);
803  
804  	// Start off with initial wait and slowly back off to 1 HZ wait.
805  	fract = rtp->init_fract;
806  
807  	while (!list_empty(&holdouts)) {
808  		ktime_t exp;
809  		bool firstreport;
810  		bool needreport;
811  		int rtst;
812  
813  		// Slowly back off waiting for holdouts
814  		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
815  		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
816  			schedule_timeout_idle(fract);
817  		} else {
818  			exp = jiffies_to_nsecs(fract);
819  			__set_current_state(TASK_IDLE);
820  			schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
821  		}
822  
823  		if (fract < HZ)
824  			fract++;
825  
826  		rtst = READ_ONCE(rcu_task_stall_timeout);
827  		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
828  		if (needreport) {
829  			lastreport = jiffies;
830  			reported = true;
831  		}
832  		firstreport = true;
833  		WARN_ON(signal_pending(current));
834  		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
835  		rtp->holdouts_func(&holdouts, needreport, &firstreport);
836  
837  		// Print pre-stall informational messages if needed.
838  		j = jiffies;
839  		if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
840  			lastinfo = j;
841  			rtsi = rtsi * rcu_task_stall_info_mult;
842  			pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
843  				__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
844  		}
845  	}
846  
847  	set_tasks_gp_state(rtp, RTGS_POST_GP);
848  	rtp->postgp_func(rtp);
849  }
850  
851  #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
852  
853  #ifdef CONFIG_TASKS_RCU
854  
855  ////////////////////////////////////////////////////////////////////////
856  //
857  // Simple variant of RCU whose quiescent states are voluntary context
858  // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
859  // As such, grace periods can take one good long time.  There are no
860  // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
861  // because this implementation is intended to get the system into a safe
862  // state for some of the manipulations involved in tracing and the like.
863  // Finally, this implementation does not support high call_rcu_tasks()
864  // rates from multiple CPUs.  If this is required, per-CPU callback lists
865  // will be needed.
866  //
867  // The implementation uses rcu_tasks_wait_gp(), which relies on function
868  // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
869  // function sets these function pointers up so that rcu_tasks_wait_gp()
870  // invokes these functions in this order:
871  //
872  // rcu_tasks_pregp_step():
873  //	Invokes synchronize_rcu() in order to wait for all in-flight
874  //	t->on_rq and t->nvcsw transitions to complete.	This works because
875  //	all such transitions are carried out with interrupts disabled.
876  // rcu_tasks_pertask(), invoked on every non-idle task:
877  //	For every runnable non-idle task other than the current one, use
878  //	get_task_struct() to pin down that task, snapshot that task's
879  //	number of voluntary context switches, and add that task to the
880  //	holdout list.
881  // rcu_tasks_postscan():
882  //	Invoke synchronize_srcu() to ensure that all tasks that were
883  //	in the process of exiting (and which thus might not know to
884  //	synchronize with this RCU Tasks grace period) have completed
885  //	exiting.
886  // check_all_holdout_tasks(), repeatedly until holdout list is empty:
887  //	Scans the holdout list, attempting to identify a quiescent state
888  //	for each task on the list.  If there is a quiescent state, the
889  //	corresponding task is removed from the holdout list.
890  // rcu_tasks_postgp():
891  //	Invokes synchronize_rcu() in order to ensure that all prior
892  //	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
893  //	to have happened before the end of this RCU Tasks grace period.
894  //	Again, this works because all such transitions are carried out
895  //	with interrupts disabled.
896  //
897  // For each exiting task, the exit_tasks_rcu_start() and
898  // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
899  // read-side critical sections waited for by rcu_tasks_postscan().
900  //
901  // Pre-grace-period update-side code is ordered before the grace
902  // via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
903  // is ordered before the grace period via synchronize_rcu() call in
904  // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
905  // disabling.
906  
907  /* Pre-grace-period preparation. */
rcu_tasks_pregp_step(struct list_head * hop)908  static void rcu_tasks_pregp_step(struct list_head *hop)
909  {
910  	/*
911  	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
912  	 * to complete.  Invoking synchronize_rcu() suffices because all
913  	 * these transitions occur with interrupts disabled.  Without this
914  	 * synchronize_rcu(), a read-side critical section that started
915  	 * before the grace period might be incorrectly seen as having
916  	 * started after the grace period.
917  	 *
918  	 * This synchronize_rcu() also dispenses with the need for a
919  	 * memory barrier on the first store to t->rcu_tasks_holdout,
920  	 * as it forces the store to happen after the beginning of the
921  	 * grace period.
922  	 */
923  	synchronize_rcu();
924  }
925  
926  /* Check for quiescent states since the pregp's synchronize_rcu() */
rcu_tasks_is_holdout(struct task_struct * t)927  static bool rcu_tasks_is_holdout(struct task_struct *t)
928  {
929  	int cpu;
930  
931  	/* Has the task been seen voluntarily sleeping? */
932  	if (!READ_ONCE(t->on_rq))
933  		return false;
934  
935  	/*
936  	 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
937  	 * quiescent states. But CPU boot code performed by the idle task
938  	 * isn't a quiescent state.
939  	 */
940  	if (is_idle_task(t))
941  		return false;
942  
943  	cpu = task_cpu(t);
944  
945  	/* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
946  	if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
947  		return false;
948  
949  	return true;
950  }
951  
952  /* Per-task initial processing. */
rcu_tasks_pertask(struct task_struct * t,struct list_head * hop)953  static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
954  {
955  	if (t != current && rcu_tasks_is_holdout(t)) {
956  		get_task_struct(t);
957  		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
958  		WRITE_ONCE(t->rcu_tasks_holdout, true);
959  		list_add(&t->rcu_tasks_holdout_list, hop);
960  	}
961  }
962  
963  /* Processing between scanning taskslist and draining the holdout list. */
rcu_tasks_postscan(struct list_head * hop)964  static void rcu_tasks_postscan(struct list_head *hop)
965  {
966  	int rtsi = READ_ONCE(rcu_task_stall_info);
967  
968  	if (!IS_ENABLED(CONFIG_TINY_RCU)) {
969  		tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
970  		add_timer(&tasks_rcu_exit_srcu_stall_timer);
971  	}
972  
973  	/*
974  	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
975  	 * until their final schedule() with TASK_DEAD state. To cope with
976  	 * this, divide the fragile exit path part in two intersecting
977  	 * read side critical sections:
978  	 *
979  	 * 1) An _SRCU_ read side starting before calling exit_notify(),
980  	 *    which may remove the task from the tasklist, and ending after
981  	 *    the final preempt_disable() call in do_exit().
982  	 *
983  	 * 2) An _RCU_ read side starting with the final preempt_disable()
984  	 *    call in do_exit() and ending with the final call to schedule()
985  	 *    with TASK_DEAD state.
986  	 *
987  	 * This handles the part 1). And postgp will handle part 2) with a
988  	 * call to synchronize_rcu().
989  	 */
990  	synchronize_srcu(&tasks_rcu_exit_srcu);
991  
992  	if (!IS_ENABLED(CONFIG_TINY_RCU))
993  		del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
994  }
995  
996  /* See if tasks are still holding out, complain if so. */
check_holdout_task(struct task_struct * t,bool needreport,bool * firstreport)997  static void check_holdout_task(struct task_struct *t,
998  			       bool needreport, bool *firstreport)
999  {
1000  	int cpu;
1001  
1002  	if (!READ_ONCE(t->rcu_tasks_holdout) ||
1003  	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
1004  	    !rcu_tasks_is_holdout(t) ||
1005  	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
1006  	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
1007  		WRITE_ONCE(t->rcu_tasks_holdout, false);
1008  		list_del_init(&t->rcu_tasks_holdout_list);
1009  		put_task_struct(t);
1010  		return;
1011  	}
1012  	rcu_request_urgent_qs_task(t);
1013  	if (!needreport)
1014  		return;
1015  	if (*firstreport) {
1016  		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1017  		*firstreport = false;
1018  	}
1019  	cpu = task_cpu(t);
1020  	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1021  		 t, ".I"[is_idle_task(t)],
1022  		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1023  		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
1024  		 t->rcu_tasks_idle_cpu, cpu);
1025  	sched_show_task(t);
1026  }
1027  
1028  /* Scan the holdout lists for tasks no longer holding out. */
check_all_holdout_tasks(struct list_head * hop,bool needreport,bool * firstreport)1029  static void check_all_holdout_tasks(struct list_head *hop,
1030  				    bool needreport, bool *firstreport)
1031  {
1032  	struct task_struct *t, *t1;
1033  
1034  	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1035  		check_holdout_task(t, needreport, firstreport);
1036  		cond_resched();
1037  	}
1038  }
1039  
1040  /* Finish off the Tasks-RCU grace period. */
rcu_tasks_postgp(struct rcu_tasks * rtp)1041  static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1042  {
1043  	/*
1044  	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1045  	 * memory barriers prior to them in the schedule() path, memory
1046  	 * reordering on other CPUs could cause their RCU-tasks read-side
1047  	 * critical sections to extend past the end of the grace period.
1048  	 * However, because these ->nvcsw updates are carried out with
1049  	 * interrupts disabled, we can use synchronize_rcu() to force the
1050  	 * needed ordering on all such CPUs.
1051  	 *
1052  	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1053  	 * accesses to be within the grace period, avoiding the need for
1054  	 * memory barriers for ->rcu_tasks_holdout accesses.
1055  	 *
1056  	 * In addition, this synchronize_rcu() waits for exiting tasks
1057  	 * to complete their final preempt_disable() region of execution,
1058  	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
1059  	 * enforcing the whole region before tasklist removal until
1060  	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1061  	 * read side critical section.
1062  	 */
1063  	synchronize_rcu();
1064  }
1065  
1066  void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1067  DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
1068  
tasks_rcu_exit_srcu_stall(struct timer_list * unused)1069  static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1070  {
1071  #ifndef CONFIG_TINY_RCU
1072  	int rtsi;
1073  
1074  	rtsi = READ_ONCE(rcu_task_stall_info);
1075  	pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1076  		__func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1077  		tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1078  	pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1079  	tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1080  	add_timer(&tasks_rcu_exit_srcu_stall_timer);
1081  #endif // #ifndef CONFIG_TINY_RCU
1082  }
1083  
1084  /**
1085   * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1086   * @rhp: structure to be used for queueing the RCU updates.
1087   * @func: actual callback function to be invoked after the grace period
1088   *
1089   * The callback function will be invoked some time after a full grace
1090   * period elapses, in other words after all currently executing RCU
1091   * read-side critical sections have completed. call_rcu_tasks() assumes
1092   * that the read-side critical sections end at a voluntary context
1093   * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1094   * or transition to usermode execution.  As such, there are no read-side
1095   * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1096   * this primitive is intended to determine that all tasks have passed
1097   * through a safe state, not so much for data-structure synchronization.
1098   *
1099   * See the description of call_rcu() for more detailed information on
1100   * memory ordering guarantees.
1101   */
call_rcu_tasks(struct rcu_head * rhp,rcu_callback_t func)1102  void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1103  {
1104  	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1105  }
1106  EXPORT_SYMBOL_GPL(call_rcu_tasks);
1107  
1108  /**
1109   * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1110   *
1111   * Control will return to the caller some time after a full rcu-tasks
1112   * grace period has elapsed, in other words after all currently
1113   * executing rcu-tasks read-side critical sections have elapsed.  These
1114   * read-side critical sections are delimited by calls to schedule(),
1115   * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1116   * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1117   *
1118   * This is a very specialized primitive, intended only for a few uses in
1119   * tracing and other situations requiring manipulation of function
1120   * preambles and profiling hooks.  The synchronize_rcu_tasks() function
1121   * is not (yet) intended for heavy use from multiple CPUs.
1122   *
1123   * See the description of synchronize_rcu() for more detailed information
1124   * on memory ordering guarantees.
1125   */
synchronize_rcu_tasks(void)1126  void synchronize_rcu_tasks(void)
1127  {
1128  	synchronize_rcu_tasks_generic(&rcu_tasks);
1129  }
1130  EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1131  
1132  /**
1133   * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1134   *
1135   * Although the current implementation is guaranteed to wait, it is not
1136   * obligated to, for example, if there are no pending callbacks.
1137   */
rcu_barrier_tasks(void)1138  void rcu_barrier_tasks(void)
1139  {
1140  	rcu_barrier_tasks_generic(&rcu_tasks);
1141  }
1142  EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1143  
1144  int rcu_tasks_lazy_ms = -1;
1145  module_param(rcu_tasks_lazy_ms, int, 0444);
1146  
rcu_spawn_tasks_kthread(void)1147  static int __init rcu_spawn_tasks_kthread(void)
1148  {
1149  	cblist_init_generic(&rcu_tasks);
1150  	rcu_tasks.gp_sleep = HZ / 10;
1151  	rcu_tasks.init_fract = HZ / 10;
1152  	if (rcu_tasks_lazy_ms >= 0)
1153  		rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1154  	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1155  	rcu_tasks.pertask_func = rcu_tasks_pertask;
1156  	rcu_tasks.postscan_func = rcu_tasks_postscan;
1157  	rcu_tasks.holdouts_func = check_all_holdout_tasks;
1158  	rcu_tasks.postgp_func = rcu_tasks_postgp;
1159  	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1160  	return 0;
1161  }
1162  
1163  #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_classic_gp_kthread(void)1164  void show_rcu_tasks_classic_gp_kthread(void)
1165  {
1166  	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1167  }
1168  EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1169  #endif // !defined(CONFIG_TINY_RCU)
1170  
get_rcu_tasks_gp_kthread(void)1171  struct task_struct *get_rcu_tasks_gp_kthread(void)
1172  {
1173  	return rcu_tasks.kthread_ptr;
1174  }
1175  EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1176  
1177  /*
1178   * Contribute to protect against tasklist scan blind spot while the
1179   * task is exiting and may be removed from the tasklist. See
1180   * corresponding synchronize_srcu() for further details.
1181   */
exit_tasks_rcu_start(void)1182  void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
1183  {
1184  	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
1185  }
1186  
1187  /*
1188   * Contribute to protect against tasklist scan blind spot while the
1189   * task is exiting and may be removed from the tasklist. See
1190   * corresponding synchronize_srcu() for further details.
1191   */
exit_tasks_rcu_stop(void)1192  void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
1193  {
1194  	struct task_struct *t = current;
1195  
1196  	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
1197  }
1198  
1199  /*
1200   * Contribute to protect against tasklist scan blind spot while the
1201   * task is exiting and may be removed from the tasklist. See
1202   * corresponding synchronize_srcu() for further details.
1203   */
exit_tasks_rcu_finish(void)1204  void exit_tasks_rcu_finish(void)
1205  {
1206  	exit_tasks_rcu_stop();
1207  	exit_tasks_rcu_finish_trace(current);
1208  }
1209  
1210  #else /* #ifdef CONFIG_TASKS_RCU */
exit_tasks_rcu_start(void)1211  void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_stop(void)1212  void exit_tasks_rcu_stop(void) { }
exit_tasks_rcu_finish(void)1213  void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1214  #endif /* #else #ifdef CONFIG_TASKS_RCU */
1215  
1216  #ifdef CONFIG_TASKS_RUDE_RCU
1217  
1218  ////////////////////////////////////////////////////////////////////////
1219  //
1220  // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1221  // passing an empty function to schedule_on_each_cpu().  This approach
1222  // provides an asynchronous call_rcu_tasks_rude() API and batching of
1223  // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1224  // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1225  // and induces otherwise unnecessary context switches on all online CPUs,
1226  // whether idle or not.
1227  //
1228  // Callback handling is provided by the rcu_tasks_kthread() function.
1229  //
1230  // Ordering is provided by the scheduler's context-switch code.
1231  
1232  // Empty function to allow workqueues to force a context switch.
rcu_tasks_be_rude(struct work_struct * work)1233  static void rcu_tasks_be_rude(struct work_struct *work)
1234  {
1235  }
1236  
1237  // Wait for one rude RCU-tasks grace period.
rcu_tasks_rude_wait_gp(struct rcu_tasks * rtp)1238  static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1239  {
1240  	rtp->n_ipis += cpumask_weight(cpu_online_mask);
1241  	schedule_on_each_cpu(rcu_tasks_be_rude);
1242  }
1243  
1244  void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1245  DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1246  		 "RCU Tasks Rude");
1247  
1248  /**
1249   * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1250   * @rhp: structure to be used for queueing the RCU updates.
1251   * @func: actual callback function to be invoked after the grace period
1252   *
1253   * The callback function will be invoked some time after a full grace
1254   * period elapses, in other words after all currently executing RCU
1255   * read-side critical sections have completed. call_rcu_tasks_rude()
1256   * assumes that the read-side critical sections end at context switch,
1257   * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1258   * usermode execution is schedulable). As such, there are no read-side
1259   * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1260   * this primitive is intended to determine that all tasks have passed
1261   * through a safe state, not so much for data-structure synchronization.
1262   *
1263   * See the description of call_rcu() for more detailed information on
1264   * memory ordering guarantees.
1265   */
call_rcu_tasks_rude(struct rcu_head * rhp,rcu_callback_t func)1266  void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1267  {
1268  	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1269  }
1270  EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1271  
1272  /**
1273   * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1274   *
1275   * Control will return to the caller some time after a rude rcu-tasks
1276   * grace period has elapsed, in other words after all currently
1277   * executing rcu-tasks read-side critical sections have elapsed.  These
1278   * read-side critical sections are delimited by calls to schedule(),
1279   * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1280   * context), and (in theory, anyway) cond_resched().
1281   *
1282   * This is a very specialized primitive, intended only for a few uses in
1283   * tracing and other situations requiring manipulation of function preambles
1284   * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
1285   * (yet) intended for heavy use from multiple CPUs.
1286   *
1287   * See the description of synchronize_rcu() for more detailed information
1288   * on memory ordering guarantees.
1289   */
synchronize_rcu_tasks_rude(void)1290  void synchronize_rcu_tasks_rude(void)
1291  {
1292  	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1293  }
1294  EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1295  
1296  /**
1297   * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1298   *
1299   * Although the current implementation is guaranteed to wait, it is not
1300   * obligated to, for example, if there are no pending callbacks.
1301   */
rcu_barrier_tasks_rude(void)1302  void rcu_barrier_tasks_rude(void)
1303  {
1304  	rcu_barrier_tasks_generic(&rcu_tasks_rude);
1305  }
1306  EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1307  
1308  int rcu_tasks_rude_lazy_ms = -1;
1309  module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1310  
rcu_spawn_tasks_rude_kthread(void)1311  static int __init rcu_spawn_tasks_rude_kthread(void)
1312  {
1313  	cblist_init_generic(&rcu_tasks_rude);
1314  	rcu_tasks_rude.gp_sleep = HZ / 10;
1315  	if (rcu_tasks_rude_lazy_ms >= 0)
1316  		rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1317  	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1318  	return 0;
1319  }
1320  
1321  #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_rude_gp_kthread(void)1322  void show_rcu_tasks_rude_gp_kthread(void)
1323  {
1324  	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1325  }
1326  EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1327  #endif // !defined(CONFIG_TINY_RCU)
1328  
get_rcu_tasks_rude_gp_kthread(void)1329  struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1330  {
1331  	return rcu_tasks_rude.kthread_ptr;
1332  }
1333  EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1334  
1335  #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1336  
1337  ////////////////////////////////////////////////////////////////////////
1338  //
1339  // Tracing variant of Tasks RCU.  This variant is designed to be used
1340  // to protect tracing hooks, including those of BPF.  This variant
1341  // therefore:
1342  //
1343  // 1.	Has explicit read-side markers to allow finite grace periods
1344  //	in the face of in-kernel loops for PREEMPT=n builds.
1345  //
1346  // 2.	Protects code in the idle loop, exception entry/exit, and
1347  //	CPU-hotplug code paths, similar to the capabilities of SRCU.
1348  //
1349  // 3.	Avoids expensive read-side instructions, having overhead similar
1350  //	to that of Preemptible RCU.
1351  //
1352  // There are of course downsides.  For example, the grace-period code
1353  // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1354  // in nohz_full userspace.  If needed, these downsides can be at least
1355  // partially remedied.
1356  //
1357  // Perhaps most important, this variant of RCU does not affect the vanilla
1358  // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
1359  // readers can operate from idle, offline, and exception entry/exit in no
1360  // way allows rcu_preempt and rcu_sched readers to also do so.
1361  //
1362  // The implementation uses rcu_tasks_wait_gp(), which relies on function
1363  // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
1364  // function sets these function pointers up so that rcu_tasks_wait_gp()
1365  // invokes these functions in this order:
1366  //
1367  // rcu_tasks_trace_pregp_step():
1368  //	Disables CPU hotplug, adds all currently executing tasks to the
1369  //	holdout list, then checks the state of all tasks that blocked
1370  //	or were preempted within their current RCU Tasks Trace read-side
1371  //	critical section, adding them to the holdout list if appropriate.
1372  //	Finally, this function re-enables CPU hotplug.
1373  // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1374  // rcu_tasks_trace_postscan():
1375  //	Invokes synchronize_rcu() to wait for late-stage exiting tasks
1376  //	to finish exiting.
1377  // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1378  //	Scans the holdout list, attempting to identify a quiescent state
1379  //	for each task on the list.  If there is a quiescent state, the
1380  //	corresponding task is removed from the holdout list.  Once this
1381  //	list is empty, the grace period has completed.
1382  // rcu_tasks_trace_postgp():
1383  //	Provides the needed full memory barrier and does debug checks.
1384  //
1385  // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1386  //
1387  // Pre-grace-period update-side code is ordered before the grace period
1388  // via the ->cbs_lock and barriers in rcu_tasks_kthread().  Pre-grace-period
1389  // read-side code is ordered before the grace period by atomic operations
1390  // on .b.need_qs flag of each task involved in this process, or by scheduler
1391  // context-switch ordering (for locked-down non-running readers).
1392  
1393  // The lockdep state must be outside of #ifdef to be useful.
1394  #ifdef CONFIG_DEBUG_LOCK_ALLOC
1395  static struct lock_class_key rcu_lock_trace_key;
1396  struct lockdep_map rcu_trace_lock_map =
1397  	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1398  EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1399  #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1400  
1401  #ifdef CONFIG_TASKS_TRACE_RCU
1402  
1403  // Record outstanding IPIs to each CPU.  No point in sending two...
1404  static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1405  
1406  // The number of detections of task quiescent state relying on
1407  // heavyweight readers executing explicit memory barriers.
1408  static unsigned long n_heavy_reader_attempts;
1409  static unsigned long n_heavy_reader_updates;
1410  static unsigned long n_heavy_reader_ofl_updates;
1411  static unsigned long n_trc_holdouts;
1412  
1413  void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1414  DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1415  		 "RCU Tasks Trace");
1416  
1417  /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
rcu_ld_need_qs(struct task_struct * t)1418  static u8 rcu_ld_need_qs(struct task_struct *t)
1419  {
1420  	smp_mb(); // Enforce full grace-period ordering.
1421  	return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1422  }
1423  
1424  /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
rcu_st_need_qs(struct task_struct * t,u8 v)1425  static void rcu_st_need_qs(struct task_struct *t, u8 v)
1426  {
1427  	smp_store_release(&t->trc_reader_special.b.need_qs, v);
1428  	smp_mb(); // Enforce full grace-period ordering.
1429  }
1430  
1431  /*
1432   * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1433   * the four-byte operand-size restriction of some platforms.
1434   * Returns the old value, which is often ignored.
1435   */
rcu_trc_cmpxchg_need_qs(struct task_struct * t,u8 old,u8 new)1436  u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1437  {
1438  	union rcu_special ret;
1439  	union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1440  	union rcu_special trs_new = trs_old;
1441  
1442  	if (trs_old.b.need_qs != old)
1443  		return trs_old.b.need_qs;
1444  	trs_new.b.need_qs = new;
1445  	ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1446  	return ret.b.need_qs;
1447  }
1448  EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1449  
1450  /*
1451   * If we are the last reader, signal the grace-period kthread.
1452   * Also remove from the per-CPU list of blocked tasks.
1453   */
rcu_read_unlock_trace_special(struct task_struct * t)1454  void rcu_read_unlock_trace_special(struct task_struct *t)
1455  {
1456  	unsigned long flags;
1457  	struct rcu_tasks_percpu *rtpcp;
1458  	union rcu_special trs;
1459  
1460  	// Open-coded full-word version of rcu_ld_need_qs().
1461  	smp_mb(); // Enforce full grace-period ordering.
1462  	trs = smp_load_acquire(&t->trc_reader_special);
1463  
1464  	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1465  		smp_mb(); // Pairs with update-side barriers.
1466  	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1467  	if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1468  		u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1469  						       TRC_NEED_QS_CHECKED);
1470  
1471  		WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1472  	}
1473  	if (trs.b.blocked) {
1474  		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1475  		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1476  		list_del_init(&t->trc_blkd_node);
1477  		WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1478  		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1479  	}
1480  	WRITE_ONCE(t->trc_reader_nesting, 0);
1481  }
1482  EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1483  
1484  /* Add a newly blocked reader task to its CPU's list. */
rcu_tasks_trace_qs_blkd(struct task_struct * t)1485  void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1486  {
1487  	unsigned long flags;
1488  	struct rcu_tasks_percpu *rtpcp;
1489  
1490  	local_irq_save(flags);
1491  	rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1492  	raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1493  	t->trc_blkd_cpu = smp_processor_id();
1494  	if (!rtpcp->rtp_blkd_tasks.next)
1495  		INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1496  	list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1497  	WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1498  	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1499  }
1500  EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1501  
1502  /* Add a task to the holdout list, if it is not already on the list. */
trc_add_holdout(struct task_struct * t,struct list_head * bhp)1503  static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1504  {
1505  	if (list_empty(&t->trc_holdout_list)) {
1506  		get_task_struct(t);
1507  		list_add(&t->trc_holdout_list, bhp);
1508  		n_trc_holdouts++;
1509  	}
1510  }
1511  
1512  /* Remove a task from the holdout list, if it is in fact present. */
trc_del_holdout(struct task_struct * t)1513  static void trc_del_holdout(struct task_struct *t)
1514  {
1515  	if (!list_empty(&t->trc_holdout_list)) {
1516  		list_del_init(&t->trc_holdout_list);
1517  		put_task_struct(t);
1518  		n_trc_holdouts--;
1519  	}
1520  }
1521  
1522  /* IPI handler to check task state. */
trc_read_check_handler(void * t_in)1523  static void trc_read_check_handler(void *t_in)
1524  {
1525  	int nesting;
1526  	struct task_struct *t = current;
1527  	struct task_struct *texp = t_in;
1528  
1529  	// If the task is no longer running on this CPU, leave.
1530  	if (unlikely(texp != t))
1531  		goto reset_ipi; // Already on holdout list, so will check later.
1532  
1533  	// If the task is not in a read-side critical section, and
1534  	// if this is the last reader, awaken the grace-period kthread.
1535  	nesting = READ_ONCE(t->trc_reader_nesting);
1536  	if (likely(!nesting)) {
1537  		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1538  		goto reset_ipi;
1539  	}
1540  	// If we are racing with an rcu_read_unlock_trace(), try again later.
1541  	if (unlikely(nesting < 0))
1542  		goto reset_ipi;
1543  
1544  	// Get here if the task is in a read-side critical section.
1545  	// Set its state so that it will update state for the grace-period
1546  	// kthread upon exit from that critical section.
1547  	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1548  
1549  reset_ipi:
1550  	// Allow future IPIs to be sent on CPU and for task.
1551  	// Also order this IPI handler against any later manipulations of
1552  	// the intended task.
1553  	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1554  	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1555  }
1556  
1557  /* Callback function for scheduler to check locked-down task.  */
trc_inspect_reader(struct task_struct * t,void * bhp_in)1558  static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1559  {
1560  	struct list_head *bhp = bhp_in;
1561  	int cpu = task_cpu(t);
1562  	int nesting;
1563  	bool ofl = cpu_is_offline(cpu);
1564  
1565  	if (task_curr(t) && !ofl) {
1566  		// If no chance of heavyweight readers, do it the hard way.
1567  		if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1568  			return -EINVAL;
1569  
1570  		// If heavyweight readers are enabled on the remote task,
1571  		// we can inspect its state despite its currently running.
1572  		// However, we cannot safely change its state.
1573  		n_heavy_reader_attempts++;
1574  		// Check for "running" idle tasks on offline CPUs.
1575  		if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1576  			return -EINVAL; // No quiescent state, do it the hard way.
1577  		n_heavy_reader_updates++;
1578  		nesting = 0;
1579  	} else {
1580  		// The task is not running, so C-language access is safe.
1581  		nesting = t->trc_reader_nesting;
1582  		WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1583  		if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1584  			n_heavy_reader_ofl_updates++;
1585  	}
1586  
1587  	// If not exiting a read-side critical section, mark as checked
1588  	// so that the grace-period kthread will remove it from the
1589  	// holdout list.
1590  	if (!nesting) {
1591  		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1592  		return 0;  // In QS, so done.
1593  	}
1594  	if (nesting < 0)
1595  		return -EINVAL; // Reader transitioning, try again later.
1596  
1597  	// The task is in a read-side critical section, so set up its
1598  	// state so that it will update state upon exit from that critical
1599  	// section.
1600  	if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1601  		trc_add_holdout(t, bhp);
1602  	return 0;
1603  }
1604  
1605  /* Attempt to extract the state for the specified task. */
trc_wait_for_one_reader(struct task_struct * t,struct list_head * bhp)1606  static void trc_wait_for_one_reader(struct task_struct *t,
1607  				    struct list_head *bhp)
1608  {
1609  	int cpu;
1610  
1611  	// If a previous IPI is still in flight, let it complete.
1612  	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1613  		return;
1614  
1615  	// The current task had better be in a quiescent state.
1616  	if (t == current) {
1617  		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1618  		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1619  		return;
1620  	}
1621  
1622  	// Attempt to nail down the task for inspection.
1623  	get_task_struct(t);
1624  	if (!task_call_func(t, trc_inspect_reader, bhp)) {
1625  		put_task_struct(t);
1626  		return;
1627  	}
1628  	put_task_struct(t);
1629  
1630  	// If this task is not yet on the holdout list, then we are in
1631  	// an RCU read-side critical section.  Otherwise, the invocation of
1632  	// trc_add_holdout() that added it to the list did the necessary
1633  	// get_task_struct().  Either way, the task cannot be freed out
1634  	// from under this code.
1635  
1636  	// If currently running, send an IPI, either way, add to list.
1637  	trc_add_holdout(t, bhp);
1638  	if (task_curr(t) &&
1639  	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1640  		// The task is currently running, so try IPIing it.
1641  		cpu = task_cpu(t);
1642  
1643  		// If there is already an IPI outstanding, let it happen.
1644  		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1645  			return;
1646  
1647  		per_cpu(trc_ipi_to_cpu, cpu) = true;
1648  		t->trc_ipi_to_cpu = cpu;
1649  		rcu_tasks_trace.n_ipis++;
1650  		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1651  			// Just in case there is some other reason for
1652  			// failure than the target CPU being offline.
1653  			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1654  				  __func__, cpu);
1655  			rcu_tasks_trace.n_ipis_fails++;
1656  			per_cpu(trc_ipi_to_cpu, cpu) = false;
1657  			t->trc_ipi_to_cpu = -1;
1658  		}
1659  	}
1660  }
1661  
1662  /*
1663   * Initialize for first-round processing for the specified task.
1664   * Return false if task is NULL or already taken care of, true otherwise.
1665   */
rcu_tasks_trace_pertask_prep(struct task_struct * t,bool notself)1666  static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1667  {
1668  	// During early boot when there is only the one boot CPU, there
1669  	// is no idle task for the other CPUs.	Also, the grace-period
1670  	// kthread is always in a quiescent state.  In addition, just return
1671  	// if this task is already on the list.
1672  	if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1673  		return false;
1674  
1675  	rcu_st_need_qs(t, 0);
1676  	t->trc_ipi_to_cpu = -1;
1677  	return true;
1678  }
1679  
1680  /* Do first-round processing for the specified task. */
rcu_tasks_trace_pertask(struct task_struct * t,struct list_head * hop)1681  static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1682  {
1683  	if (rcu_tasks_trace_pertask_prep(t, true))
1684  		trc_wait_for_one_reader(t, hop);
1685  }
1686  
1687  /* Initialize for a new RCU-tasks-trace grace period. */
rcu_tasks_trace_pregp_step(struct list_head * hop)1688  static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1689  {
1690  	LIST_HEAD(blkd_tasks);
1691  	int cpu;
1692  	unsigned long flags;
1693  	struct rcu_tasks_percpu *rtpcp;
1694  	struct task_struct *t;
1695  
1696  	// There shouldn't be any old IPIs, but...
1697  	for_each_possible_cpu(cpu)
1698  		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1699  
1700  	// Disable CPU hotplug across the CPU scan for the benefit of
1701  	// any IPIs that might be needed.  This also waits for all readers
1702  	// in CPU-hotplug code paths.
1703  	cpus_read_lock();
1704  
1705  	// These rcu_tasks_trace_pertask_prep() calls are serialized to
1706  	// allow safe access to the hop list.
1707  	for_each_online_cpu(cpu) {
1708  		rcu_read_lock();
1709  		// Note that cpu_curr_snapshot() picks up the target
1710  		// CPU's current task while its runqueue is locked with
1711  		// an smp_mb__after_spinlock().  This ensures that either
1712  		// the grace-period kthread will see that task's read-side
1713  		// critical section or the task will see the updater's pre-GP
1714  		// accesses.  The trailing smp_mb() in cpu_curr_snapshot()
1715  		// does not currently play a role other than simplify
1716  		// that function's ordering semantics.  If these simplified
1717  		// ordering semantics continue to be redundant, that smp_mb()
1718  		// might be removed.
1719  		t = cpu_curr_snapshot(cpu);
1720  		if (rcu_tasks_trace_pertask_prep(t, true))
1721  			trc_add_holdout(t, hop);
1722  		rcu_read_unlock();
1723  		cond_resched_tasks_rcu_qs();
1724  	}
1725  
1726  	// Only after all running tasks have been accounted for is it
1727  	// safe to take care of the tasks that have blocked within their
1728  	// current RCU tasks trace read-side critical section.
1729  	for_each_possible_cpu(cpu) {
1730  		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1731  		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1732  		list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1733  		while (!list_empty(&blkd_tasks)) {
1734  			rcu_read_lock();
1735  			t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1736  			list_del_init(&t->trc_blkd_node);
1737  			list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1738  			raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1739  			rcu_tasks_trace_pertask(t, hop);
1740  			rcu_read_unlock();
1741  			raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1742  		}
1743  		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1744  		cond_resched_tasks_rcu_qs();
1745  	}
1746  
1747  	// Re-enable CPU hotplug now that the holdout list is populated.
1748  	cpus_read_unlock();
1749  }
1750  
1751  /*
1752   * Do intermediate processing between task and holdout scans.
1753   */
rcu_tasks_trace_postscan(struct list_head * hop)1754  static void rcu_tasks_trace_postscan(struct list_head *hop)
1755  {
1756  	// Wait for late-stage exiting tasks to finish exiting.
1757  	// These might have passed the call to exit_tasks_rcu_finish().
1758  
1759  	// If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1760  	synchronize_rcu();
1761  	// Any tasks that exit after this point will set
1762  	// TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1763  }
1764  
1765  /* Communicate task state back to the RCU tasks trace stall warning request. */
1766  struct trc_stall_chk_rdr {
1767  	int nesting;
1768  	int ipi_to_cpu;
1769  	u8 needqs;
1770  };
1771  
trc_check_slow_task(struct task_struct * t,void * arg)1772  static int trc_check_slow_task(struct task_struct *t, void *arg)
1773  {
1774  	struct trc_stall_chk_rdr *trc_rdrp = arg;
1775  
1776  	if (task_curr(t) && cpu_online(task_cpu(t)))
1777  		return false; // It is running, so decline to inspect it.
1778  	trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1779  	trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1780  	trc_rdrp->needqs = rcu_ld_need_qs(t);
1781  	return true;
1782  }
1783  
1784  /* Show the state of a task stalling the current RCU tasks trace GP. */
show_stalled_task_trace(struct task_struct * t,bool * firstreport)1785  static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1786  {
1787  	int cpu;
1788  	struct trc_stall_chk_rdr trc_rdr;
1789  	bool is_idle_tsk = is_idle_task(t);
1790  
1791  	if (*firstreport) {
1792  		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1793  		*firstreport = false;
1794  	}
1795  	cpu = task_cpu(t);
1796  	if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1797  		pr_alert("P%d: %c%c\n",
1798  			 t->pid,
1799  			 ".I"[t->trc_ipi_to_cpu >= 0],
1800  			 ".i"[is_idle_tsk]);
1801  	else
1802  		pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1803  			 t->pid,
1804  			 ".I"[trc_rdr.ipi_to_cpu >= 0],
1805  			 ".i"[is_idle_tsk],
1806  			 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1807  			 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1808  			 trc_rdr.nesting,
1809  			 " !CN"[trc_rdr.needqs & 0x3],
1810  			 " ?"[trc_rdr.needqs > 0x3],
1811  			 cpu, cpu_online(cpu) ? "" : "(offline)");
1812  	sched_show_task(t);
1813  }
1814  
1815  /* List stalled IPIs for RCU tasks trace. */
show_stalled_ipi_trace(void)1816  static void show_stalled_ipi_trace(void)
1817  {
1818  	int cpu;
1819  
1820  	for_each_possible_cpu(cpu)
1821  		if (per_cpu(trc_ipi_to_cpu, cpu))
1822  			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1823  }
1824  
1825  /* Do one scan of the holdout list. */
check_all_holdout_tasks_trace(struct list_head * hop,bool needreport,bool * firstreport)1826  static void check_all_holdout_tasks_trace(struct list_head *hop,
1827  					  bool needreport, bool *firstreport)
1828  {
1829  	struct task_struct *g, *t;
1830  
1831  	// Disable CPU hotplug across the holdout list scan for IPIs.
1832  	cpus_read_lock();
1833  
1834  	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1835  		// If safe and needed, try to check the current task.
1836  		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1837  		    !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1838  			trc_wait_for_one_reader(t, hop);
1839  
1840  		// If check succeeded, remove this task from the list.
1841  		if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1842  		    rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1843  			trc_del_holdout(t);
1844  		else if (needreport)
1845  			show_stalled_task_trace(t, firstreport);
1846  		cond_resched_tasks_rcu_qs();
1847  	}
1848  
1849  	// Re-enable CPU hotplug now that the holdout list scan has completed.
1850  	cpus_read_unlock();
1851  
1852  	if (needreport) {
1853  		if (*firstreport)
1854  			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1855  		show_stalled_ipi_trace();
1856  	}
1857  }
1858  
rcu_tasks_trace_empty_fn(void * unused)1859  static void rcu_tasks_trace_empty_fn(void *unused)
1860  {
1861  }
1862  
1863  /* Wait for grace period to complete and provide ordering. */
rcu_tasks_trace_postgp(struct rcu_tasks * rtp)1864  static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1865  {
1866  	int cpu;
1867  
1868  	// Wait for any lingering IPI handlers to complete.  Note that
1869  	// if a CPU has gone offline or transitioned to userspace in the
1870  	// meantime, all IPI handlers should have been drained beforehand.
1871  	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1872  	// changes, there will need to be a recheck and/or timed wait.
1873  	for_each_online_cpu(cpu)
1874  		if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1875  			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1876  
1877  	smp_mb(); // Caller's code must be ordered after wakeup.
1878  		  // Pairs with pretty much every ordering primitive.
1879  }
1880  
1881  /* Report any needed quiescent state for this exiting task. */
exit_tasks_rcu_finish_trace(struct task_struct * t)1882  static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1883  {
1884  	union rcu_special trs = READ_ONCE(t->trc_reader_special);
1885  
1886  	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1887  	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1888  	if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1889  		rcu_read_unlock_trace_special(t);
1890  	else
1891  		WRITE_ONCE(t->trc_reader_nesting, 0);
1892  }
1893  
1894  /**
1895   * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1896   * @rhp: structure to be used for queueing the RCU updates.
1897   * @func: actual callback function to be invoked after the grace period
1898   *
1899   * The callback function will be invoked some time after a trace rcu-tasks
1900   * grace period elapses, in other words after all currently executing
1901   * trace rcu-tasks read-side critical sections have completed. These
1902   * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1903   * and rcu_read_unlock_trace().
1904   *
1905   * See the description of call_rcu() for more detailed information on
1906   * memory ordering guarantees.
1907   */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)1908  void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1909  {
1910  	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1911  }
1912  EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1913  
1914  /**
1915   * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1916   *
1917   * Control will return to the caller some time after a trace rcu-tasks
1918   * grace period has elapsed, in other words after all currently executing
1919   * trace rcu-tasks read-side critical sections have elapsed. These read-side
1920   * critical sections are delimited by calls to rcu_read_lock_trace()
1921   * and rcu_read_unlock_trace().
1922   *
1923   * This is a very specialized primitive, intended only for a few uses in
1924   * tracing and other situations requiring manipulation of function preambles
1925   * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1926   * (yet) intended for heavy use from multiple CPUs.
1927   *
1928   * See the description of synchronize_rcu() for more detailed information
1929   * on memory ordering guarantees.
1930   */
synchronize_rcu_tasks_trace(void)1931  void synchronize_rcu_tasks_trace(void)
1932  {
1933  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1934  	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1935  }
1936  EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1937  
1938  /**
1939   * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1940   *
1941   * Although the current implementation is guaranteed to wait, it is not
1942   * obligated to, for example, if there are no pending callbacks.
1943   */
rcu_barrier_tasks_trace(void)1944  void rcu_barrier_tasks_trace(void)
1945  {
1946  	rcu_barrier_tasks_generic(&rcu_tasks_trace);
1947  }
1948  EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1949  
1950  int rcu_tasks_trace_lazy_ms = -1;
1951  module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1952  
rcu_spawn_tasks_trace_kthread(void)1953  static int __init rcu_spawn_tasks_trace_kthread(void)
1954  {
1955  	cblist_init_generic(&rcu_tasks_trace);
1956  	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1957  		rcu_tasks_trace.gp_sleep = HZ / 10;
1958  		rcu_tasks_trace.init_fract = HZ / 10;
1959  	} else {
1960  		rcu_tasks_trace.gp_sleep = HZ / 200;
1961  		if (rcu_tasks_trace.gp_sleep <= 0)
1962  			rcu_tasks_trace.gp_sleep = 1;
1963  		rcu_tasks_trace.init_fract = HZ / 200;
1964  		if (rcu_tasks_trace.init_fract <= 0)
1965  			rcu_tasks_trace.init_fract = 1;
1966  	}
1967  	if (rcu_tasks_trace_lazy_ms >= 0)
1968  		rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1969  	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1970  	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1971  	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1972  	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1973  	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1974  	return 0;
1975  }
1976  
1977  #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_trace_gp_kthread(void)1978  void show_rcu_tasks_trace_gp_kthread(void)
1979  {
1980  	char buf[64];
1981  
1982  	snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
1983  		data_race(n_trc_holdouts),
1984  		data_race(n_heavy_reader_ofl_updates),
1985  		data_race(n_heavy_reader_updates),
1986  		data_race(n_heavy_reader_attempts));
1987  	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1988  }
1989  EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1990  #endif // !defined(CONFIG_TINY_RCU)
1991  
get_rcu_tasks_trace_gp_kthread(void)1992  struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
1993  {
1994  	return rcu_tasks_trace.kthread_ptr;
1995  }
1996  EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
1997  
1998  #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
exit_tasks_rcu_finish_trace(struct task_struct * t)1999  static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
2000  #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
2001  
2002  #ifndef CONFIG_TINY_RCU
show_rcu_tasks_gp_kthreads(void)2003  void show_rcu_tasks_gp_kthreads(void)
2004  {
2005  	show_rcu_tasks_classic_gp_kthread();
2006  	show_rcu_tasks_rude_gp_kthread();
2007  	show_rcu_tasks_trace_gp_kthread();
2008  }
2009  #endif /* #ifndef CONFIG_TINY_RCU */
2010  
2011  #ifdef CONFIG_PROVE_RCU
2012  struct rcu_tasks_test_desc {
2013  	struct rcu_head rh;
2014  	const char *name;
2015  	bool notrun;
2016  	unsigned long runstart;
2017  };
2018  
2019  static struct rcu_tasks_test_desc tests[] = {
2020  	{
2021  		.name = "call_rcu_tasks()",
2022  		/* If not defined, the test is skipped. */
2023  		.notrun = IS_ENABLED(CONFIG_TASKS_RCU),
2024  	},
2025  	{
2026  		.name = "call_rcu_tasks_rude()",
2027  		/* If not defined, the test is skipped. */
2028  		.notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
2029  	},
2030  	{
2031  		.name = "call_rcu_tasks_trace()",
2032  		/* If not defined, the test is skipped. */
2033  		.notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
2034  	}
2035  };
2036  
test_rcu_tasks_callback(struct rcu_head * rhp)2037  static void test_rcu_tasks_callback(struct rcu_head *rhp)
2038  {
2039  	struct rcu_tasks_test_desc *rttd =
2040  		container_of(rhp, struct rcu_tasks_test_desc, rh);
2041  
2042  	pr_info("Callback from %s invoked.\n", rttd->name);
2043  
2044  	rttd->notrun = false;
2045  }
2046  
rcu_tasks_initiate_self_tests(void)2047  static void rcu_tasks_initiate_self_tests(void)
2048  {
2049  	pr_info("Running RCU-tasks wait API self tests\n");
2050  #ifdef CONFIG_TASKS_RCU
2051  	tests[0].runstart = jiffies;
2052  	synchronize_rcu_tasks();
2053  	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2054  #endif
2055  
2056  #ifdef CONFIG_TASKS_RUDE_RCU
2057  	tests[1].runstart = jiffies;
2058  	synchronize_rcu_tasks_rude();
2059  	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2060  #endif
2061  
2062  #ifdef CONFIG_TASKS_TRACE_RCU
2063  	tests[2].runstart = jiffies;
2064  	synchronize_rcu_tasks_trace();
2065  	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2066  #endif
2067  }
2068  
2069  /*
2070   * Return:  0 - test passed
2071   *	    1 - test failed, but have not timed out yet
2072   *	   -1 - test failed and timed out
2073   */
rcu_tasks_verify_self_tests(void)2074  static int rcu_tasks_verify_self_tests(void)
2075  {
2076  	int ret = 0;
2077  	int i;
2078  	unsigned long bst = rcu_task_stall_timeout;
2079  
2080  	if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2081  		bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2082  	for (i = 0; i < ARRAY_SIZE(tests); i++) {
2083  		while (tests[i].notrun) {		// still hanging.
2084  			if (time_after(jiffies, tests[i].runstart + bst)) {
2085  				pr_err("%s has failed boot-time tests.\n", tests[i].name);
2086  				ret = -1;
2087  				break;
2088  			}
2089  			ret = 1;
2090  			break;
2091  		}
2092  	}
2093  	WARN_ON(ret < 0);
2094  
2095  	return ret;
2096  }
2097  
2098  /*
2099   * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2100   * test passes or has timed out.
2101   */
2102  static struct delayed_work rcu_tasks_verify_work;
rcu_tasks_verify_work_fn(struct work_struct * work __maybe_unused)2103  static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2104  {
2105  	int ret = rcu_tasks_verify_self_tests();
2106  
2107  	if (ret <= 0)
2108  		return;
2109  
2110  	/* Test fails but not timed out yet, reschedule another check */
2111  	schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2112  }
2113  
rcu_tasks_verify_schedule_work(void)2114  static int rcu_tasks_verify_schedule_work(void)
2115  {
2116  	INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2117  	rcu_tasks_verify_work_fn(NULL);
2118  	return 0;
2119  }
2120  late_initcall(rcu_tasks_verify_schedule_work);
2121  #else /* #ifdef CONFIG_PROVE_RCU */
rcu_tasks_initiate_self_tests(void)2122  static void rcu_tasks_initiate_self_tests(void) { }
2123  #endif /* #else #ifdef CONFIG_PROVE_RCU */
2124  
rcu_init_tasks_generic(void)2125  void __init rcu_init_tasks_generic(void)
2126  {
2127  #ifdef CONFIG_TASKS_RCU
2128  	rcu_spawn_tasks_kthread();
2129  #endif
2130  
2131  #ifdef CONFIG_TASKS_RUDE_RCU
2132  	rcu_spawn_tasks_rude_kthread();
2133  #endif
2134  
2135  #ifdef CONFIG_TASKS_TRACE_RCU
2136  	rcu_spawn_tasks_trace_kthread();
2137  #endif
2138  
2139  	// Run the self-tests.
2140  	rcu_tasks_initiate_self_tests();
2141  }
2142  
2143  #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
rcu_tasks_bootup_oddness(void)2144  static inline void rcu_tasks_bootup_oddness(void) {}
2145  #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
2146