xref: /openbmc/linux/kernel/rcu/tasks.h (revision 5f8b7d4b2e9604d03ae06f1a2dd5a1f34c33e533)
1eacd6f04SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
2eacd6f04SPaul E. McKenney /*
3eacd6f04SPaul E. McKenney  * Task-based RCU implementations.
4eacd6f04SPaul E. McKenney  *
5eacd6f04SPaul E. McKenney  * Copyright (C) 2020 Paul E. McKenney
6eacd6f04SPaul E. McKenney  */
7eacd6f04SPaul E. McKenney 
88fd8ca38SPaul E. McKenney #ifdef CONFIG_TASKS_RCU_GENERIC
99b073de1SPaul E. McKenney #include "rcu_segcblist.h"
105873b8a9SPaul E. McKenney 
115873b8a9SPaul E. McKenney ////////////////////////////////////////////////////////////////////////
125873b8a9SPaul E. McKenney //
135873b8a9SPaul E. McKenney // Generic data structures.
145873b8a9SPaul E. McKenney 
155873b8a9SPaul E. McKenney struct rcu_tasks;
165873b8a9SPaul E. McKenney typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
177460ade1SPaul E. McKenney typedef void (*pregp_func_t)(struct list_head *hop);
18e4fe5dd6SPaul E. McKenney typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
199796e1aeSPaul E. McKenney typedef void (*postscan_func_t)(struct list_head *hop);
20e4fe5dd6SPaul E. McKenney typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21af051ca4SPaul E. McKenney typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22eacd6f04SPaul E. McKenney 
2307e10515SPaul E. McKenney /**
24cafafd67SPaul E. McKenney  * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
259b073de1SPaul E. McKenney  * @cblist: Callback list.
26381a4f3bSPaul E. McKenney  * @lock: Lock protecting per-CPU callback list.
277d13d30bSPaul E. McKenney  * @rtp_jiffies: Jiffies counter value for statistics.
28d119357dSPaul E. McKenney  * @lazy_timer: Timer to unlazify callbacks.
29d119357dSPaul E. McKenney  * @urgent_gp: Number of additional non-lazy grace periods.
307d13d30bSPaul E. McKenney  * @rtp_n_lock_retries: Rough lock-contention statistic.
31d363f833SPaul E. McKenney  * @rtp_work: Work queue for invoking callbacks.
323063b33aSPaul E. McKenney  * @rtp_irq_work: IRQ work queue for deferred wakeups.
33ce9b1c66SPaul E. McKenney  * @barrier_q_head: RCU callback for barrier operation.
34434c9eefSPaul E. McKenney  * @rtp_blkd_tasks: List of tasks blocked as readers.
35dc5d4d4cSPaul E. McKenney  * @rtp_exit_list: List of tasks in the latter portion of do_exit().
36ce9b1c66SPaul E. McKenney  * @cpu: CPU number corresponding to this entry.
37*b3b2431eSZqiang  * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
38ce9b1c66SPaul E. McKenney  * @rtpp: Pointer to the rcu_tasks structure.
39cafafd67SPaul E. McKenney  */
40cafafd67SPaul E. McKenney struct rcu_tasks_percpu {
419b073de1SPaul E. McKenney 	struct rcu_segcblist cblist;
42381a4f3bSPaul E. McKenney 	raw_spinlock_t __private lock;
437d13d30bSPaul E. McKenney 	unsigned long rtp_jiffies;
447d13d30bSPaul E. McKenney 	unsigned long rtp_n_lock_retries;
45d119357dSPaul E. McKenney 	struct timer_list lazy_timer;
46d119357dSPaul E. McKenney 	unsigned int urgent_gp;
47d363f833SPaul E. McKenney 	struct work_struct rtp_work;
483063b33aSPaul E. McKenney 	struct irq_work rtp_irq_work;
49ce9b1c66SPaul E. McKenney 	struct rcu_head barrier_q_head;
50434c9eefSPaul E. McKenney 	struct list_head rtp_blkd_tasks;
51dc5d4d4cSPaul E. McKenney 	struct list_head rtp_exit_list;
52d363f833SPaul E. McKenney 	int cpu;
53*b3b2431eSZqiang 	int index;
54d363f833SPaul E. McKenney 	struct rcu_tasks *rtpp;
55cafafd67SPaul E. McKenney };
56cafafd67SPaul E. McKenney 
57cafafd67SPaul E. McKenney /**
58cafafd67SPaul E. McKenney  * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
5988db792bSSebastian Andrzej Siewior  * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
60cafafd67SPaul E. McKenney  * @cbs_gbl_lock: Lock protecting callback list.
61d96225fdSPaul E. McKenney  * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
625873b8a9SPaul E. McKenney  * @gp_func: This flavor's grace-period-wait function.
63af051ca4SPaul E. McKenney  * @gp_state: Grace period's most recent state transition (debugging).
644fe192dfSPaul E. McKenney  * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
652393a613SPaul E. McKenney  * @init_fract: Initial backoff sleep interval.
66af051ca4SPaul E. McKenney  * @gp_jiffies: Time of last @gp_state transition.
67af051ca4SPaul E. McKenney  * @gp_start: Most recent grace-period start in jiffies.
68b14fb4fbSPaul E. McKenney  * @tasks_gp_seq: Number of grace periods completed since boot.
69238dbce3SPaul E. McKenney  * @n_ipis: Number of IPIs sent to encourage grace periods to end.
707e0669c3SPaul E. McKenney  * @n_ipis_fails: Number of IPI-send failures.
71d119357dSPaul E. McKenney  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
72d119357dSPaul E. McKenney  * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
73e4fe5dd6SPaul E. McKenney  * @pregp_func: This flavor's pre-grace-period function (optional).
74e4fe5dd6SPaul E. McKenney  * @pertask_func: This flavor's per-task scan function (optional).
75e4fe5dd6SPaul E. McKenney  * @postscan_func: This flavor's post-task scan function (optional).
7685b86994SLukas Bulwahn  * @holdouts_func: This flavor's holdout-list scan function (optional).
77e4fe5dd6SPaul E. McKenney  * @postgp_func: This flavor's post-grace-period function (optional).
785873b8a9SPaul E. McKenney  * @call_func: This flavor's call_rcu()-equivalent function.
79cafafd67SPaul E. McKenney  * @rtpcpu: This flavor's rcu_tasks_percpu structure.
80*b3b2431eSZqiang  * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
817a30871bSPaul E. McKenney  * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
822cee0789SPaul E. McKenney  * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
832cee0789SPaul E. McKenney  * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
84fd796e41SPaul E. McKenney  * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
85ce9b1c66SPaul E. McKenney  * @barrier_q_mutex: Serialize barrier operations.
86ce9b1c66SPaul E. McKenney  * @barrier_q_count: Number of queues being waited on.
87ce9b1c66SPaul E. McKenney  * @barrier_q_completion: Barrier wait/wakeup mechanism.
88ce9b1c66SPaul E. McKenney  * @barrier_q_seq: Sequence number for barrier operations.
89c97d12a6SPaul E. McKenney  * @name: This flavor's textual name.
90c97d12a6SPaul E. McKenney  * @kname: This flavor's kthread name.
9107e10515SPaul E. McKenney  */
9207e10515SPaul E. McKenney struct rcu_tasks {
9388db792bSSebastian Andrzej Siewior 	struct rcuwait cbs_wait;
94cafafd67SPaul E. McKenney 	raw_spinlock_t cbs_gbl_lock;
95d96225fdSPaul E. McKenney 	struct mutex tasks_gp_mutex;
96af051ca4SPaul E. McKenney 	int gp_state;
974fe192dfSPaul E. McKenney 	int gp_sleep;
982393a613SPaul E. McKenney 	int init_fract;
99af051ca4SPaul E. McKenney 	unsigned long gp_jiffies;
10088092d0cSPaul E. McKenney 	unsigned long gp_start;
101b14fb4fbSPaul E. McKenney 	unsigned long tasks_gp_seq;
102238dbce3SPaul E. McKenney 	unsigned long n_ipis;
1037e0669c3SPaul E. McKenney 	unsigned long n_ipis_fails;
10407e10515SPaul E. McKenney 	struct task_struct *kthread_ptr;
105d119357dSPaul E. McKenney 	unsigned long lazy_jiffies;
1065873b8a9SPaul E. McKenney 	rcu_tasks_gp_func_t gp_func;
107e4fe5dd6SPaul E. McKenney 	pregp_func_t pregp_func;
108e4fe5dd6SPaul E. McKenney 	pertask_func_t pertask_func;
109e4fe5dd6SPaul E. McKenney 	postscan_func_t postscan_func;
110e4fe5dd6SPaul E. McKenney 	holdouts_func_t holdouts_func;
111e4fe5dd6SPaul E. McKenney 	postgp_func_t postgp_func;
1125873b8a9SPaul E. McKenney 	call_rcu_func_t call_func;
113cafafd67SPaul E. McKenney 	struct rcu_tasks_percpu __percpu *rtpcpu;
114*b3b2431eSZqiang 	struct rcu_tasks_percpu **rtpcp_array;
1157a30871bSPaul E. McKenney 	int percpu_enqueue_shift;
1168dd593fdSPaul E. McKenney 	int percpu_enqueue_lim;
1172cee0789SPaul E. McKenney 	int percpu_dequeue_lim;
118fd796e41SPaul E. McKenney 	unsigned long percpu_dequeue_gpseq;
119ce9b1c66SPaul E. McKenney 	struct mutex barrier_q_mutex;
120ce9b1c66SPaul E. McKenney 	atomic_t barrier_q_count;
121ce9b1c66SPaul E. McKenney 	struct completion barrier_q_completion;
122ce9b1c66SPaul E. McKenney 	unsigned long barrier_q_seq;
123c97d12a6SPaul E. McKenney 	char *name;
124c97d12a6SPaul E. McKenney 	char *kname;
12507e10515SPaul E. McKenney };
12607e10515SPaul E. McKenney 
1273063b33aSPaul E. McKenney static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
1283063b33aSPaul E. McKenney 
129c97d12a6SPaul E. McKenney #define DEFINE_RCU_TASKS(rt_name, gp, call, n)						\
130cafafd67SPaul E. McKenney static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {			\
131381a4f3bSPaul E. McKenney 	.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),		\
13288db792bSSebastian Andrzej Siewior 	.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),			\
133cafafd67SPaul E. McKenney };											\
134c97d12a6SPaul E. McKenney static struct rcu_tasks rt_name =							\
13507e10515SPaul E. McKenney {											\
13688db792bSSebastian Andrzej Siewior 	.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait),				\
137cafafd67SPaul E. McKenney 	.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock),			\
138d96225fdSPaul E. McKenney 	.tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex),			\
1395873b8a9SPaul E. McKenney 	.gp_func = gp,									\
1405873b8a9SPaul E. McKenney 	.call_func = call,								\
141cafafd67SPaul E. McKenney 	.rtpcpu = &rt_name ## __percpu,							\
142d119357dSPaul E. McKenney 	.lazy_jiffies = DIV_ROUND_UP(HZ, 4),						\
143c97d12a6SPaul E. McKenney 	.name = n,									\
1442bcd18e0SPaul E. McKenney 	.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS),				\
1458dd593fdSPaul E. McKenney 	.percpu_enqueue_lim = 1,							\
1462cee0789SPaul E. McKenney 	.percpu_dequeue_lim = 1,							\
147ce9b1c66SPaul E. McKenney 	.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),		\
148ce9b1c66SPaul E. McKenney 	.barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT,				\
149c97d12a6SPaul E. McKenney 	.kname = #rt_name,								\
15007e10515SPaul E. McKenney }
15107e10515SPaul E. McKenney 
1522b4be548SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
153eacd6f04SPaul E. McKenney /* Track exiting tasks in order to allow them to be waited for. */
154eacd6f04SPaul E. McKenney DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
155eacd6f04SPaul E. McKenney 
156a4533cc0SNeeraj Upadhyay /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
157a4533cc0SNeeraj Upadhyay static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
158a4533cc0SNeeraj Upadhyay static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
159a4533cc0SNeeraj Upadhyay #endif
160a4533cc0SNeeraj Upadhyay 
161b0afa0f0SPaul E. McKenney /* Avoid IPIing CPUs early in the grace period. */
162574de876SPaul E. McKenney #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
163b0afa0f0SPaul E. McKenney static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
164b0afa0f0SPaul E. McKenney module_param(rcu_task_ipi_delay, int, 0644);
165b0afa0f0SPaul E. McKenney 
166eacd6f04SPaul E. McKenney /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
1671cf1144eSPaul E. McKenney #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
168eacd6f04SPaul E. McKenney #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
169eacd6f04SPaul E. McKenney static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
170eacd6f04SPaul E. McKenney module_param(rcu_task_stall_timeout, int, 0644);
171f2539003SPaul E. McKenney #define RCU_TASK_STALL_INFO (HZ * 10)
172f2539003SPaul E. McKenney static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
173f2539003SPaul E. McKenney module_param(rcu_task_stall_info, int, 0644);
174f2539003SPaul E. McKenney static int rcu_task_stall_info_mult __read_mostly = 3;
175f2539003SPaul E. McKenney module_param(rcu_task_stall_info_mult, int, 0444);
176eacd6f04SPaul E. McKenney 
1778610b656SPaul E. McKenney static int rcu_task_enqueue_lim __read_mostly = -1;
1788610b656SPaul E. McKenney module_param(rcu_task_enqueue_lim, int, 0444);
1798610b656SPaul E. McKenney 
180ab97152fSPaul E. McKenney static bool rcu_task_cb_adjust;
181ab97152fSPaul E. McKenney static int rcu_task_contend_lim __read_mostly = 100;
182ab97152fSPaul E. McKenney module_param(rcu_task_contend_lim, int, 0444);
183fd796e41SPaul E. McKenney static int rcu_task_collapse_lim __read_mostly = 10;
184fd796e41SPaul E. McKenney module_param(rcu_task_collapse_lim, int, 0444);
185db13710aSPaul E. McKenney static int rcu_task_lazy_lim __read_mostly = 32;
186db13710aSPaul E. McKenney module_param(rcu_task_lazy_lim, int, 0444);
187ab97152fSPaul E. McKenney 
188*b3b2431eSZqiang static int rcu_task_cpu_ids;
189*b3b2431eSZqiang 
190af051ca4SPaul E. McKenney /* RCU tasks grace-period state for debugging. */
191af051ca4SPaul E. McKenney #define RTGS_INIT		 0
192af051ca4SPaul E. McKenney #define RTGS_WAIT_WAIT_CBS	 1
193af051ca4SPaul E. McKenney #define RTGS_WAIT_GP		 2
194af051ca4SPaul E. McKenney #define RTGS_PRE_WAIT_GP	 3
195af051ca4SPaul E. McKenney #define RTGS_SCAN_TASKLIST	 4
196af051ca4SPaul E. McKenney #define RTGS_POST_SCAN_TASKLIST	 5
197af051ca4SPaul E. McKenney #define RTGS_WAIT_SCAN_HOLDOUTS	 6
198af051ca4SPaul E. McKenney #define RTGS_SCAN_HOLDOUTS	 7
199af051ca4SPaul E. McKenney #define RTGS_POST_GP		 8
200af051ca4SPaul E. McKenney #define RTGS_WAIT_READERS	 9
201af051ca4SPaul E. McKenney #define RTGS_INVOKE_CBS		10
202af051ca4SPaul E. McKenney #define RTGS_WAIT_CBS		11
2038344496eSPaul E. McKenney #ifndef CONFIG_TINY_RCU
204af051ca4SPaul E. McKenney static const char * const rcu_tasks_gp_state_names[] = {
205af051ca4SPaul E. McKenney 	"RTGS_INIT",
206af051ca4SPaul E. McKenney 	"RTGS_WAIT_WAIT_CBS",
207af051ca4SPaul E. McKenney 	"RTGS_WAIT_GP",
208af051ca4SPaul E. McKenney 	"RTGS_PRE_WAIT_GP",
209af051ca4SPaul E. McKenney 	"RTGS_SCAN_TASKLIST",
210af051ca4SPaul E. McKenney 	"RTGS_POST_SCAN_TASKLIST",
211af051ca4SPaul E. McKenney 	"RTGS_WAIT_SCAN_HOLDOUTS",
212af051ca4SPaul E. McKenney 	"RTGS_SCAN_HOLDOUTS",
213af051ca4SPaul E. McKenney 	"RTGS_POST_GP",
214af051ca4SPaul E. McKenney 	"RTGS_WAIT_READERS",
215af051ca4SPaul E. McKenney 	"RTGS_INVOKE_CBS",
216af051ca4SPaul E. McKenney 	"RTGS_WAIT_CBS",
217af051ca4SPaul E. McKenney };
2188344496eSPaul E. McKenney #endif /* #ifndef CONFIG_TINY_RCU */
219af051ca4SPaul E. McKenney 
2205873b8a9SPaul E. McKenney ////////////////////////////////////////////////////////////////////////
2215873b8a9SPaul E. McKenney //
2225873b8a9SPaul E. McKenney // Generic code.
2235873b8a9SPaul E. McKenney 
224d363f833SPaul E. McKenney static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
225d363f833SPaul E. McKenney 
226af051ca4SPaul E. McKenney /* Record grace-period phase and time. */
set_tasks_gp_state(struct rcu_tasks * rtp,int newstate)227af051ca4SPaul E. McKenney static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
228af051ca4SPaul E. McKenney {
229af051ca4SPaul E. McKenney 	rtp->gp_state = newstate;
230af051ca4SPaul E. McKenney 	rtp->gp_jiffies = jiffies;
231af051ca4SPaul E. McKenney }
232af051ca4SPaul E. McKenney 
2338344496eSPaul E. McKenney #ifndef CONFIG_TINY_RCU
234af051ca4SPaul E. McKenney /* Return state name. */
tasks_gp_state_getname(struct rcu_tasks * rtp)235af051ca4SPaul E. McKenney static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
236af051ca4SPaul E. McKenney {
237af051ca4SPaul E. McKenney 	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
238af051ca4SPaul E. McKenney 	int j = READ_ONCE(i); // Prevent the compiler from reading twice
239af051ca4SPaul E. McKenney 
240af051ca4SPaul E. McKenney 	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
241af051ca4SPaul E. McKenney 		return "???";
242af051ca4SPaul E. McKenney 	return rcu_tasks_gp_state_names[j];
243af051ca4SPaul E. McKenney }
2448344496eSPaul E. McKenney #endif /* #ifndef CONFIG_TINY_RCU */
245af051ca4SPaul E. McKenney 
246cafafd67SPaul E. McKenney // Initialize per-CPU callback lists for the specified flavor of
247cb88f7f5SPaul E. McKenney // Tasks RCU.  Do not enqueue callbacks before this function is invoked.
cblist_init_generic(struct rcu_tasks * rtp)248cafafd67SPaul E. McKenney static void cblist_init_generic(struct rcu_tasks *rtp)
249cafafd67SPaul E. McKenney {
250cafafd67SPaul E. McKenney 	int cpu;
251cafafd67SPaul E. McKenney 	unsigned long flags;
2528610b656SPaul E. McKenney 	int lim;
253da123016SPaul E. McKenney 	int shift;
254*b3b2431eSZqiang 	int maxcpu;
255*b3b2431eSZqiang 	int index = 0;
256cafafd67SPaul E. McKenney 
257ab97152fSPaul E. McKenney 	if (rcu_task_enqueue_lim < 0) {
2588610b656SPaul E. McKenney 		rcu_task_enqueue_lim = 1;
259ab97152fSPaul E. McKenney 		rcu_task_cb_adjust = true;
260ab97152fSPaul E. McKenney 	} else if (rcu_task_enqueue_lim == 0) {
261ab97152fSPaul E. McKenney 		rcu_task_enqueue_lim = 1;
262ab97152fSPaul E. McKenney 	}
2638610b656SPaul E. McKenney 	lim = rcu_task_enqueue_lim;
2648610b656SPaul E. McKenney 
265*b3b2431eSZqiang 	rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
266*b3b2431eSZqiang 	BUG_ON(!rtp->rtpcp_array);
267*b3b2431eSZqiang 
268cafafd67SPaul E. McKenney 	for_each_possible_cpu(cpu) {
269cafafd67SPaul E. McKenney 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
270cafafd67SPaul E. McKenney 
271cafafd67SPaul E. McKenney 		WARN_ON_ONCE(!rtpcp);
272cafafd67SPaul E. McKenney 		if (cpu)
273381a4f3bSPaul E. McKenney 			raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
274cb88f7f5SPaul E. McKenney 		local_irq_save(flags);  // serialize initialization
2759b073de1SPaul E. McKenney 		if (rcu_segcblist_empty(&rtpcp->cblist))
2769b073de1SPaul E. McKenney 			rcu_segcblist_init(&rtpcp->cblist);
277cb88f7f5SPaul E. McKenney 		local_irq_restore(flags);
278d363f833SPaul E. McKenney 		INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
279d363f833SPaul E. McKenney 		rtpcp->cpu = cpu;
280d363f833SPaul E. McKenney 		rtpcp->rtpp = rtp;
281*b3b2431eSZqiang 		rtpcp->index = index;
282*b3b2431eSZqiang 		rtp->rtpcp_array[index] = rtpcp;
283*b3b2431eSZqiang 		index++;
284434c9eefSPaul E. McKenney 		if (!rtpcp->rtp_blkd_tasks.next)
285434c9eefSPaul E. McKenney 			INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
2867679283eSPaul E. McKenney 		if (!rtpcp->rtp_exit_list.next)
2877679283eSPaul E. McKenney 			INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
288*b3b2431eSZqiang 		maxcpu = cpu;
289cafafd67SPaul E. McKenney 	}
2905fc8cbe4SShigeru Yoshida 
291*b3b2431eSZqiang 	rcu_task_cpu_ids = maxcpu + 1;
292*b3b2431eSZqiang 	if (lim > rcu_task_cpu_ids)
293*b3b2431eSZqiang 		lim = rcu_task_cpu_ids;
294*b3b2431eSZqiang 	shift = ilog2(rcu_task_cpu_ids / lim);
295*b3b2431eSZqiang 	if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
296*b3b2431eSZqiang 		shift++;
297*b3b2431eSZqiang 	WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
298*b3b2431eSZqiang 	WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
299*b3b2431eSZqiang 	smp_store_release(&rtp->percpu_enqueue_lim, lim);
300*b3b2431eSZqiang 
301*b3b2431eSZqiang 	pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
302*b3b2431eSZqiang 			rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
303*b3b2431eSZqiang 			rcu_task_cb_adjust, rcu_task_cpu_ids);
304cafafd67SPaul E. McKenney }
305cafafd67SPaul E. McKenney 
306d119357dSPaul E. McKenney // Compute wakeup time for lazy callback timer.
rcu_tasks_lazy_time(struct rcu_tasks * rtp)307d119357dSPaul E. McKenney static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
308d119357dSPaul E. McKenney {
309d119357dSPaul E. McKenney 	return jiffies + rtp->lazy_jiffies;
310d119357dSPaul E. McKenney }
311d119357dSPaul E. McKenney 
312d119357dSPaul E. McKenney // Timer handler that unlazifies lazy callbacks.
call_rcu_tasks_generic_timer(struct timer_list * tlp)313d119357dSPaul E. McKenney static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
314d119357dSPaul E. McKenney {
315d119357dSPaul E. McKenney 	unsigned long flags;
316d119357dSPaul E. McKenney 	bool needwake = false;
317d119357dSPaul E. McKenney 	struct rcu_tasks *rtp;
318d119357dSPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
319d119357dSPaul E. McKenney 
320d119357dSPaul E. McKenney 	rtp = rtpcp->rtpp;
321d119357dSPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
322d119357dSPaul E. McKenney 	if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
323d119357dSPaul E. McKenney 		if (!rtpcp->urgent_gp)
324d119357dSPaul E. McKenney 			rtpcp->urgent_gp = 1;
325d119357dSPaul E. McKenney 		needwake = true;
326d119357dSPaul E. McKenney 		mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
327d119357dSPaul E. McKenney 	}
328d119357dSPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
329d119357dSPaul E. McKenney 	if (needwake)
330d119357dSPaul E. McKenney 		rcuwait_wake_up(&rtp->cbs_wait);
331d119357dSPaul E. McKenney }
332d119357dSPaul E. McKenney 
3333063b33aSPaul E. McKenney // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
call_rcu_tasks_iw_wakeup(struct irq_work * iwp)3343063b33aSPaul E. McKenney static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
3353063b33aSPaul E. McKenney {
3363063b33aSPaul E. McKenney 	struct rcu_tasks *rtp;
3373063b33aSPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
3383063b33aSPaul E. McKenney 
3393063b33aSPaul E. McKenney 	rtp = rtpcp->rtpp;
34088db792bSSebastian Andrzej Siewior 	rcuwait_wake_up(&rtp->cbs_wait);
3413063b33aSPaul E. McKenney }
3423063b33aSPaul E. McKenney 
3435873b8a9SPaul E. McKenney // Enqueue a callback for the specified flavor of Tasks RCU.
call_rcu_tasks_generic(struct rcu_head * rhp,rcu_callback_t func,struct rcu_tasks * rtp)3445873b8a9SPaul E. McKenney static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
3455873b8a9SPaul E. McKenney 				   struct rcu_tasks *rtp)
346eacd6f04SPaul E. McKenney {
34707d95c34SEric Dumazet 	int chosen_cpu;
348eacd6f04SPaul E. McKenney 	unsigned long flags;
349d119357dSPaul E. McKenney 	bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
35007d95c34SEric Dumazet 	int ideal_cpu;
3517d13d30bSPaul E. McKenney 	unsigned long j;
352ab97152fSPaul E. McKenney 	bool needadjust = false;
353eacd6f04SPaul E. McKenney 	bool needwake;
354cafafd67SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp;
355eacd6f04SPaul E. McKenney 
356eacd6f04SPaul E. McKenney 	rhp->next = NULL;
357eacd6f04SPaul E. McKenney 	rhp->func = func;
358cafafd67SPaul E. McKenney 	local_irq_save(flags);
359fd796e41SPaul E. McKenney 	rcu_read_lock();
36007d95c34SEric Dumazet 	ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
36107d95c34SEric Dumazet 	chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
36207d95c34SEric Dumazet 	rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
3637d13d30bSPaul E. McKenney 	if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
364381a4f3bSPaul E. McKenney 		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
3657d13d30bSPaul E. McKenney 		j = jiffies;
3667d13d30bSPaul E. McKenney 		if (rtpcp->rtp_jiffies != j) {
3677d13d30bSPaul E. McKenney 			rtpcp->rtp_jiffies = j;
3687d13d30bSPaul E. McKenney 			rtpcp->rtp_n_lock_retries = 0;
3697d13d30bSPaul E. McKenney 		}
370ab97152fSPaul E. McKenney 		if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
371*b3b2431eSZqiang 		    READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
372ab97152fSPaul E. McKenney 			needadjust = true;  // Defer adjustment to avoid deadlock.
3737d13d30bSPaul E. McKenney 	}
374cb88f7f5SPaul E. McKenney 	// Queuing callbacks before initialization not yet supported.
375cb88f7f5SPaul E. McKenney 	if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
376cb88f7f5SPaul E. McKenney 		rcu_segcblist_init(&rtpcp->cblist);
377db13710aSPaul E. McKenney 	needwake = (func == wakeme_after_rcu) ||
378db13710aSPaul E. McKenney 		   (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
379db13710aSPaul E. McKenney 	if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
380d119357dSPaul E. McKenney 		if (rtp->lazy_jiffies)
381d119357dSPaul E. McKenney 			mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
382d119357dSPaul E. McKenney 		else
3839b073de1SPaul E. McKenney 			needwake = rcu_segcblist_empty(&rtpcp->cblist);
384d119357dSPaul E. McKenney 	}
385d119357dSPaul E. McKenney 	if (needwake)
386d119357dSPaul E. McKenney 		rtpcp->urgent_gp = 3;
3879b073de1SPaul E. McKenney 	rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
388381a4f3bSPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
389ab97152fSPaul E. McKenney 	if (unlikely(needadjust)) {
390ab97152fSPaul E. McKenney 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
391*b3b2431eSZqiang 		if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
39200a8b4b5SPaul E. McKenney 			WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
393*b3b2431eSZqiang 			WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
394*b3b2431eSZqiang 			smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
395ab97152fSPaul E. McKenney 			pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
396ab97152fSPaul E. McKenney 		}
397ab97152fSPaul E. McKenney 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
398ab97152fSPaul E. McKenney 	}
399fd796e41SPaul E. McKenney 	rcu_read_unlock();
400eacd6f04SPaul E. McKenney 	/* We can't create the thread unless interrupts are enabled. */
40107e10515SPaul E. McKenney 	if (needwake && READ_ONCE(rtp->kthread_ptr))
4023063b33aSPaul E. McKenney 		irq_work_queue(&rtpcp->rtp_irq_work);
403eacd6f04SPaul E. McKenney }
404eacd6f04SPaul E. McKenney 
405ce9b1c66SPaul E. McKenney // RCU callback function for rcu_barrier_tasks_generic().
rcu_barrier_tasks_generic_cb(struct rcu_head * rhp)406ce9b1c66SPaul E. McKenney static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
407ce9b1c66SPaul E. McKenney {
408ce9b1c66SPaul E. McKenney 	struct rcu_tasks *rtp;
409ce9b1c66SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp;
410ce9b1c66SPaul E. McKenney 
411ce9b1c66SPaul E. McKenney 	rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
412ce9b1c66SPaul E. McKenney 	rtp = rtpcp->rtpp;
413ce9b1c66SPaul E. McKenney 	if (atomic_dec_and_test(&rtp->barrier_q_count))
414ce9b1c66SPaul E. McKenney 		complete(&rtp->barrier_q_completion);
415ce9b1c66SPaul E. McKenney }
416ce9b1c66SPaul E. McKenney 
417ce9b1c66SPaul E. McKenney // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
418ce9b1c66SPaul E. McKenney // Operates in a manner similar to rcu_barrier().
rcu_barrier_tasks_generic(struct rcu_tasks * rtp)419ce9b1c66SPaul E. McKenney static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
420ce9b1c66SPaul E. McKenney {
421ce9b1c66SPaul E. McKenney 	int cpu;
422ce9b1c66SPaul E. McKenney 	unsigned long flags;
423ce9b1c66SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp;
424ce9b1c66SPaul E. McKenney 	unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
425ce9b1c66SPaul E. McKenney 
426ce9b1c66SPaul E. McKenney 	mutex_lock(&rtp->barrier_q_mutex);
427ce9b1c66SPaul E. McKenney 	if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
428ce9b1c66SPaul E. McKenney 		smp_mb();
429ce9b1c66SPaul E. McKenney 		mutex_unlock(&rtp->barrier_q_mutex);
430ce9b1c66SPaul E. McKenney 		return;
431ce9b1c66SPaul E. McKenney 	}
432ce9b1c66SPaul E. McKenney 	rcu_seq_start(&rtp->barrier_q_seq);
433ce9b1c66SPaul E. McKenney 	init_completion(&rtp->barrier_q_completion);
434ce9b1c66SPaul E. McKenney 	atomic_set(&rtp->barrier_q_count, 2);
435ce9b1c66SPaul E. McKenney 	for_each_possible_cpu(cpu) {
4362cee0789SPaul E. McKenney 		if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
437ce9b1c66SPaul E. McKenney 			break;
438ce9b1c66SPaul E. McKenney 		rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
439ce9b1c66SPaul E. McKenney 		rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
440ce9b1c66SPaul E. McKenney 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
441ce9b1c66SPaul E. McKenney 		if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
442ce9b1c66SPaul E. McKenney 			atomic_inc(&rtp->barrier_q_count);
443ce9b1c66SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
444ce9b1c66SPaul E. McKenney 	}
445ce9b1c66SPaul E. McKenney 	if (atomic_sub_and_test(2, &rtp->barrier_q_count))
446ce9b1c66SPaul E. McKenney 		complete(&rtp->barrier_q_completion);
447ce9b1c66SPaul E. McKenney 	wait_for_completion(&rtp->barrier_q_completion);
448ce9b1c66SPaul E. McKenney 	rcu_seq_end(&rtp->barrier_q_seq);
449ce9b1c66SPaul E. McKenney 	mutex_unlock(&rtp->barrier_q_mutex);
450ce9b1c66SPaul E. McKenney }
451ce9b1c66SPaul E. McKenney 
4524d1114c0SPaul E. McKenney // Advance callbacks and indicate whether either a grace period or
4534d1114c0SPaul E. McKenney // callback invocation is needed.
rcu_tasks_need_gpcb(struct rcu_tasks * rtp)4544d1114c0SPaul E. McKenney static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
4554d1114c0SPaul E. McKenney {
4564d1114c0SPaul E. McKenney 	int cpu;
45701a2b99fSPaul E. McKenney 	int dequeue_limit;
4584d1114c0SPaul E. McKenney 	unsigned long flags;
459a4fcfbeeSZqiang 	bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
460fd796e41SPaul E. McKenney 	long n;
461fd796e41SPaul E. McKenney 	long ncbs = 0;
462fd796e41SPaul E. McKenney 	long ncbsnz = 0;
4634d1114c0SPaul E. McKenney 	int needgpcb = 0;
4644d1114c0SPaul E. McKenney 
46501a2b99fSPaul E. McKenney 	dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
46601a2b99fSPaul E. McKenney 	for (cpu = 0; cpu < dequeue_limit; cpu++) {
467*b3b2431eSZqiang 		if (!cpu_possible(cpu))
468*b3b2431eSZqiang 			continue;
4694d1114c0SPaul E. McKenney 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
4704d1114c0SPaul E. McKenney 
4714d1114c0SPaul E. McKenney 		/* Advance and accelerate any new callbacks. */
472fd796e41SPaul E. McKenney 		if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
4734d1114c0SPaul E. McKenney 			continue;
4744d1114c0SPaul E. McKenney 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
475fd796e41SPaul E. McKenney 		// Should we shrink down to a single callback queue?
476fd796e41SPaul E. McKenney 		n = rcu_segcblist_n_cbs(&rtpcp->cblist);
477fd796e41SPaul E. McKenney 		if (n) {
478fd796e41SPaul E. McKenney 			ncbs += n;
479fd796e41SPaul E. McKenney 			if (cpu > 0)
480fd796e41SPaul E. McKenney 				ncbsnz += n;
481fd796e41SPaul E. McKenney 		}
4824d1114c0SPaul E. McKenney 		rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
4834d1114c0SPaul E. McKenney 		(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
484d119357dSPaul E. McKenney 		if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
485d119357dSPaul E. McKenney 			if (rtp->lazy_jiffies)
486d119357dSPaul E. McKenney 				rtpcp->urgent_gp--;
4874d1114c0SPaul E. McKenney 			needgpcb |= 0x3;
488d119357dSPaul E. McKenney 		} else if (rcu_segcblist_empty(&rtpcp->cblist)) {
489d119357dSPaul E. McKenney 			rtpcp->urgent_gp = 0;
490d119357dSPaul E. McKenney 		}
491d119357dSPaul E. McKenney 		if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
4924d1114c0SPaul E. McKenney 			needgpcb |= 0x1;
4934d1114c0SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
4944d1114c0SPaul E. McKenney 	}
495fd796e41SPaul E. McKenney 
496fd796e41SPaul E. McKenney 	// Shrink down to a single callback queue if appropriate.
497fd796e41SPaul E. McKenney 	// This is done in two stages: (1) If there are no more than
498fd796e41SPaul E. McKenney 	// rcu_task_collapse_lim callbacks on CPU 0 and none on any other
499fd796e41SPaul E. McKenney 	// CPU, limit enqueueing to CPU 0.  (2) After an RCU grace period,
500fd796e41SPaul E. McKenney 	// if there has not been an increase in callbacks, limit dequeuing
501fd796e41SPaul E. McKenney 	// to CPU 0.  Note the matching RCU read-side critical section in
502fd796e41SPaul E. McKenney 	// call_rcu_tasks_generic().
503fd796e41SPaul E. McKenney 	if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
504fd796e41SPaul E. McKenney 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
505fd796e41SPaul E. McKenney 		if (rtp->percpu_enqueue_lim > 1) {
506*b3b2431eSZqiang 			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
507fd796e41SPaul E. McKenney 			smp_store_release(&rtp->percpu_enqueue_lim, 1);
508fd796e41SPaul E. McKenney 			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
509a4fcfbeeSZqiang 			gpdone = false;
510fd796e41SPaul E. McKenney 			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
511fd796e41SPaul E. McKenney 		}
512fd796e41SPaul E. McKenney 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
513fd796e41SPaul E. McKenney 	}
514a4fcfbeeSZqiang 	if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
515fd796e41SPaul E. McKenney 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
516fd796e41SPaul E. McKenney 		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
517fd796e41SPaul E. McKenney 			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
518fd796e41SPaul E. McKenney 			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
519fd796e41SPaul E. McKenney 		}
520a4fcfbeeSZqiang 		if (rtp->percpu_dequeue_lim == 1) {
521*b3b2431eSZqiang 			for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
522*b3b2431eSZqiang 				if (!cpu_possible(cpu))
523*b3b2431eSZqiang 					continue;
5244cf0585cSPaul E. McKenney 				struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
5254cf0585cSPaul E. McKenney 
5264cf0585cSPaul E. McKenney 				WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
5274cf0585cSPaul E. McKenney 			}
528a4fcfbeeSZqiang 		}
529fd796e41SPaul E. McKenney 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
530fd796e41SPaul E. McKenney 	}
531fd796e41SPaul E. McKenney 
5324d1114c0SPaul E. McKenney 	return needgpcb;
5334d1114c0SPaul E. McKenney }
5344d1114c0SPaul E. McKenney 
53557881863SPaul E. McKenney // Advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs(struct rcu_tasks * rtp,struct rcu_tasks_percpu * rtpcp)536d363f833SPaul E. McKenney static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
537eacd6f04SPaul E. McKenney {
538401b0de3SPaul E. McKenney 	int cpuwq;
5395873b8a9SPaul E. McKenney 	unsigned long flags;
5409b073de1SPaul E. McKenney 	int len;
541*b3b2431eSZqiang 	int index;
5429b073de1SPaul E. McKenney 	struct rcu_head *rhp;
543d363f833SPaul E. McKenney 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
544d363f833SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp_next;
5455873b8a9SPaul E. McKenney 
546*b3b2431eSZqiang 	index = rtpcp->index * 2 + 1;
547*b3b2431eSZqiang 	if (index < num_possible_cpus()) {
548*b3b2431eSZqiang 		rtpcp_next = rtp->rtpcp_array[index];
549*b3b2431eSZqiang 		if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
550*b3b2431eSZqiang 			cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
551401b0de3SPaul E. McKenney 			queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
552*b3b2431eSZqiang 			index++;
553*b3b2431eSZqiang 			if (index < num_possible_cpus()) {
554*b3b2431eSZqiang 				rtpcp_next = rtp->rtpcp_array[index];
555*b3b2431eSZqiang 				if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
556*b3b2431eSZqiang 					cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
557401b0de3SPaul E. McKenney 					queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
558d363f833SPaul E. McKenney 				}
559d363f833SPaul E. McKenney 			}
560*b3b2431eSZqiang 		}
561*b3b2431eSZqiang 	}
5625873b8a9SPaul E. McKenney 
563*b3b2431eSZqiang 	if (rcu_segcblist_empty(&rtpcp->cblist))
564d363f833SPaul E. McKenney 		return;
565381a4f3bSPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
5669b073de1SPaul E. McKenney 	rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
5679b073de1SPaul E. McKenney 	rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
568381a4f3bSPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
5699b073de1SPaul E. McKenney 	len = rcl.len;
5709b073de1SPaul E. McKenney 	for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
571e160de34SZhen Lei 		debug_rcu_head_callback(rhp);
5725873b8a9SPaul E. McKenney 		local_bh_disable();
5739b073de1SPaul E. McKenney 		rhp->func(rhp);
5745873b8a9SPaul E. McKenney 		local_bh_enable();
5755873b8a9SPaul E. McKenney 		cond_resched();
5765873b8a9SPaul E. McKenney 	}
577381a4f3bSPaul E. McKenney 	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
5789b073de1SPaul E. McKenney 	rcu_segcblist_add_len(&rtpcp->cblist, -len);
5799b073de1SPaul E. McKenney 	(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
580381a4f3bSPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
58157881863SPaul E. McKenney }
582d363f833SPaul E. McKenney 
583d363f833SPaul E. McKenney // Workqueue flood to advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs_wq(struct work_struct * wp)584d363f833SPaul E. McKenney static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
585d363f833SPaul E. McKenney {
586d363f833SPaul E. McKenney 	struct rcu_tasks *rtp;
587d363f833SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
588d363f833SPaul E. McKenney 
589d363f833SPaul E. McKenney 	rtp = rtpcp->rtpp;
590d363f833SPaul E. McKenney 	rcu_tasks_invoke_cbs(rtp, rtpcp);
59157881863SPaul E. McKenney }
59257881863SPaul E. McKenney 
593d96225fdSPaul E. McKenney // Wait for one grace period.
rcu_tasks_one_gp(struct rcu_tasks * rtp,bool midboot)5944a8cc433SPaul E. McKenney static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
59557881863SPaul E. McKenney {
59657881863SPaul E. McKenney 	int needgpcb;
59757881863SPaul E. McKenney 
598d96225fdSPaul E. McKenney 	mutex_lock(&rtp->tasks_gp_mutex);
59957881863SPaul E. McKenney 
600d96225fdSPaul E. McKenney 	// If there were none, wait a bit and start over.
6014a8cc433SPaul E. McKenney 	if (unlikely(midboot)) {
6024a8cc433SPaul E. McKenney 		needgpcb = 0x2;
6034a8cc433SPaul E. McKenney 	} else {
6049d0cce2bSPaul E. McKenney 		mutex_unlock(&rtp->tasks_gp_mutex);
6054a8cc433SPaul E. McKenney 		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
60688db792bSSebastian Andrzej Siewior 		rcuwait_wait_event(&rtp->cbs_wait,
60788db792bSSebastian Andrzej Siewior 				   (needgpcb = rcu_tasks_need_gpcb(rtp)),
60888db792bSSebastian Andrzej Siewior 				   TASK_IDLE);
6099d0cce2bSPaul E. McKenney 		mutex_lock(&rtp->tasks_gp_mutex);
6104a8cc433SPaul E. McKenney 	}
61157881863SPaul E. McKenney 
61257881863SPaul E. McKenney 	if (needgpcb & 0x2) {
61357881863SPaul E. McKenney 		// Wait for one grace period.
61457881863SPaul E. McKenney 		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
61557881863SPaul E. McKenney 		rtp->gp_start = jiffies;
61657881863SPaul E. McKenney 		rcu_seq_start(&rtp->tasks_gp_seq);
61757881863SPaul E. McKenney 		rtp->gp_func(rtp);
61857881863SPaul E. McKenney 		rcu_seq_end(&rtp->tasks_gp_seq);
61957881863SPaul E. McKenney 	}
62057881863SPaul E. McKenney 
621d96225fdSPaul E. McKenney 	// Invoke callbacks.
62257881863SPaul E. McKenney 	set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
623d363f833SPaul E. McKenney 	rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
624d96225fdSPaul E. McKenney 	mutex_unlock(&rtp->tasks_gp_mutex);
625d96225fdSPaul E. McKenney }
62657881863SPaul E. McKenney 
627d96225fdSPaul E. McKenney // RCU-tasks kthread that detects grace periods and invokes callbacks.
rcu_tasks_kthread(void * arg)628d96225fdSPaul E. McKenney static int __noreturn rcu_tasks_kthread(void *arg)
629d96225fdSPaul E. McKenney {
630d119357dSPaul E. McKenney 	int cpu;
631d96225fdSPaul E. McKenney 	struct rcu_tasks *rtp = arg;
632d96225fdSPaul E. McKenney 
633d119357dSPaul E. McKenney 	for_each_possible_cpu(cpu) {
634d119357dSPaul E. McKenney 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
635d119357dSPaul E. McKenney 
636d119357dSPaul E. McKenney 		timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
637d119357dSPaul E. McKenney 		rtpcp->urgent_gp = 1;
638d119357dSPaul E. McKenney 	}
639d119357dSPaul E. McKenney 
640d96225fdSPaul E. McKenney 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
641d96225fdSPaul E. McKenney 	housekeeping_affine(current, HK_TYPE_RCU);
642d119357dSPaul E. McKenney 	smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
643d96225fdSPaul E. McKenney 
644d96225fdSPaul E. McKenney 	/*
645d96225fdSPaul E. McKenney 	 * Each pass through the following loop makes one check for
646d96225fdSPaul E. McKenney 	 * newly arrived callbacks, and, if there are some, waits for
647d96225fdSPaul E. McKenney 	 * one RCU-tasks grace period and then invokes the callbacks.
648d96225fdSPaul E. McKenney 	 * This loop is terminated by the system going down.  ;-)
649d96225fdSPaul E. McKenney 	 */
650d96225fdSPaul E. McKenney 	for (;;) {
651d96225fdSPaul E. McKenney 		// Wait for one grace period and invoke any callbacks
652d96225fdSPaul E. McKenney 		// that are ready.
6534a8cc433SPaul E. McKenney 		rcu_tasks_one_gp(rtp, false);
654d96225fdSPaul E. McKenney 
655d96225fdSPaul E. McKenney 		// Paranoid sleep to keep this from entering a tight loop.
6564fe192dfSPaul E. McKenney 		schedule_timeout_idle(rtp->gp_sleep);
6575873b8a9SPaul E. McKenney 	}
6585873b8a9SPaul E. McKenney }
6595873b8a9SPaul E. McKenney 
66068cb4720SPaul E. McKenney // Wait for a grace period for the specified flavor of Tasks RCU.
synchronize_rcu_tasks_generic(struct rcu_tasks * rtp)66168cb4720SPaul E. McKenney static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
66268cb4720SPaul E. McKenney {
66368cb4720SPaul E. McKenney 	/* Complain if the scheduler has not started.  */
664ea5c8987SZqiang 	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
665ea5c8987SZqiang 			 "synchronize_%s() called too soon", rtp->name))
666ea5c8987SZqiang 		return;
66768cb4720SPaul E. McKenney 
6684a8cc433SPaul E. McKenney 	// If the grace-period kthread is running, use it.
6694a8cc433SPaul E. McKenney 	if (READ_ONCE(rtp->kthread_ptr)) {
67068cb4720SPaul E. McKenney 		wait_rcu_gp(rtp->call_func);
6714a8cc433SPaul E. McKenney 		return;
6724a8cc433SPaul E. McKenney 	}
6734a8cc433SPaul E. McKenney 	rcu_tasks_one_gp(rtp, true);
67468cb4720SPaul E. McKenney }
67568cb4720SPaul E. McKenney 
6761b04fa99SUladzislau Rezki (Sony) /* Spawn RCU-tasks grace-period kthread. */
rcu_spawn_tasks_kthread_generic(struct rcu_tasks * rtp)6775873b8a9SPaul E. McKenney static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
6785873b8a9SPaul E. McKenney {
6795873b8a9SPaul E. McKenney 	struct task_struct *t;
6805873b8a9SPaul E. McKenney 
681c97d12a6SPaul E. McKenney 	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
682c97d12a6SPaul E. McKenney 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
6835873b8a9SPaul E. McKenney 		return;
6845873b8a9SPaul E. McKenney 	smp_mb(); /* Ensure others see full kthread. */
6855873b8a9SPaul E. McKenney }
6865873b8a9SPaul E. McKenney 
6875873b8a9SPaul E. McKenney #ifndef CONFIG_TINY_RCU
6885873b8a9SPaul E. McKenney 
6895873b8a9SPaul E. McKenney /*
6905873b8a9SPaul E. McKenney  * Print any non-default Tasks RCU settings.
6915873b8a9SPaul E. McKenney  */
rcu_tasks_bootup_oddness(void)6925873b8a9SPaul E. McKenney static void __init rcu_tasks_bootup_oddness(void)
6935873b8a9SPaul E. McKenney {
694d5f177d3SPaul E. McKenney #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
695f2539003SPaul E. McKenney 	int rtsimc;
696f2539003SPaul E. McKenney 
6975873b8a9SPaul E. McKenney 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
6985873b8a9SPaul E. McKenney 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
699f2539003SPaul E. McKenney 	rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
700f2539003SPaul E. McKenney 	if (rtsimc != rcu_task_stall_info_mult) {
701f2539003SPaul E. McKenney 		pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
702f2539003SPaul E. McKenney 		rcu_task_stall_info_mult = rtsimc;
703f2539003SPaul E. McKenney 	}
704d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
705d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
706d5f177d3SPaul E. McKenney 	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
7075873b8a9SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
708c84aad76SPaul E. McKenney #ifdef CONFIG_TASKS_RUDE_RCU
709c84aad76SPaul E. McKenney 	pr_info("\tRude variant of Tasks RCU enabled.\n");
710c84aad76SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
711d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU
712d5f177d3SPaul E. McKenney 	pr_info("\tTracing variant of Tasks RCU enabled.\n");
713d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
7145873b8a9SPaul E. McKenney }
7155873b8a9SPaul E. McKenney 
7165873b8a9SPaul E. McKenney #endif /* #ifndef CONFIG_TINY_RCU */
7175873b8a9SPaul E. McKenney 
7188344496eSPaul E. McKenney #ifndef CONFIG_TINY_RCU
719e21408ceSPaul E. McKenney /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
show_rcu_tasks_generic_gp_kthread(struct rcu_tasks * rtp,char * s)720e21408ceSPaul E. McKenney static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
721e21408ceSPaul E. McKenney {
72210b3742fSPaul E. McKenney 	int cpu;
72310b3742fSPaul E. McKenney 	bool havecbs = false;
724d119357dSPaul E. McKenney 	bool haveurgent = false;
725d119357dSPaul E. McKenney 	bool haveurgentcbs = false;
72610b3742fSPaul E. McKenney 
72710b3742fSPaul E. McKenney 	for_each_possible_cpu(cpu) {
72810b3742fSPaul E. McKenney 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
72910b3742fSPaul E. McKenney 
730d119357dSPaul E. McKenney 		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
73110b3742fSPaul E. McKenney 			havecbs = true;
732d119357dSPaul E. McKenney 		if (data_race(rtpcp->urgent_gp))
733d119357dSPaul E. McKenney 			haveurgent = true;
734d119357dSPaul E. McKenney 		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
735d119357dSPaul E. McKenney 			haveurgentcbs = true;
736d119357dSPaul E. McKenney 		if (havecbs && haveurgent && haveurgentcbs)
73710b3742fSPaul E. McKenney 			break;
73810b3742fSPaul E. McKenney 	}
739d119357dSPaul E. McKenney 	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
740e21408ceSPaul E. McKenney 		rtp->kname,
7417e0669c3SPaul E. McKenney 		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
742af051ca4SPaul E. McKenney 		jiffies - data_race(rtp->gp_jiffies),
743b14fb4fbSPaul E. McKenney 		data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
7447e0669c3SPaul E. McKenney 		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
745e21408ceSPaul E. McKenney 		".k"[!!data_race(rtp->kthread_ptr)],
74610b3742fSPaul E. McKenney 		".C"[havecbs],
747d119357dSPaul E. McKenney 		".u"[haveurgent],
748d119357dSPaul E. McKenney 		".U"[haveurgentcbs],
749d119357dSPaul E. McKenney 		rtp->lazy_jiffies,
750e21408ceSPaul E. McKenney 		s);
751e21408ceSPaul E. McKenney }
75227c0f144SPaul E. McKenney #endif // #ifndef CONFIG_TINY_RCU
753e21408ceSPaul E. McKenney 
75425246fc8SPaul E. McKenney static void exit_tasks_rcu_finish_trace(struct task_struct *t);
75525246fc8SPaul E. McKenney 
75625246fc8SPaul E. McKenney #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
7575873b8a9SPaul E. McKenney 
7585873b8a9SPaul E. McKenney ////////////////////////////////////////////////////////////////////////
7595873b8a9SPaul E. McKenney //
760d01aa263SPaul E. McKenney // Shared code between task-list-scanning variants of Tasks RCU.
761d01aa263SPaul E. McKenney 
762d01aa263SPaul E. McKenney /* Wait for one RCU-tasks grace period. */
rcu_tasks_wait_gp(struct rcu_tasks * rtp)763d01aa263SPaul E. McKenney static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
764d01aa263SPaul E. McKenney {
765f2539003SPaul E. McKenney 	struct task_struct *g;
766d01aa263SPaul E. McKenney 	int fract;
767f2539003SPaul E. McKenney 	LIST_HEAD(holdouts);
768f2539003SPaul E. McKenney 	unsigned long j;
769f2539003SPaul E. McKenney 	unsigned long lastinfo;
770f2539003SPaul E. McKenney 	unsigned long lastreport;
771f2539003SPaul E. McKenney 	bool reported = false;
772f2539003SPaul E. McKenney 	int rtsi;
773f2539003SPaul E. McKenney 	struct task_struct *t;
774d01aa263SPaul E. McKenney 
775af051ca4SPaul E. McKenney 	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
7767460ade1SPaul E. McKenney 	rtp->pregp_func(&holdouts);
777d01aa263SPaul E. McKenney 
778d01aa263SPaul E. McKenney 	/*
779d01aa263SPaul E. McKenney 	 * There were callbacks, so we need to wait for an RCU-tasks
780d01aa263SPaul E. McKenney 	 * grace period.  Start off by scanning the task list for tasks
781d01aa263SPaul E. McKenney 	 * that are not already voluntarily blocked.  Mark these tasks
782d01aa263SPaul E. McKenney 	 * and make a list of them in holdouts.
783d01aa263SPaul E. McKenney 	 */
784af051ca4SPaul E. McKenney 	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
7851a4a8153SPaul E. McKenney 	if (rtp->pertask_func) {
786d01aa263SPaul E. McKenney 		rcu_read_lock();
787d01aa263SPaul E. McKenney 		for_each_process_thread(g, t)
788d01aa263SPaul E. McKenney 			rtp->pertask_func(t, &holdouts);
789d01aa263SPaul E. McKenney 		rcu_read_unlock();
7901a4a8153SPaul E. McKenney 	}
791d01aa263SPaul E. McKenney 
792af051ca4SPaul E. McKenney 	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
7939796e1aeSPaul E. McKenney 	rtp->postscan_func(&holdouts);
794d01aa263SPaul E. McKenney 
795d01aa263SPaul E. McKenney 	/*
796d01aa263SPaul E. McKenney 	 * Each pass through the following loop scans the list of holdout
797d01aa263SPaul E. McKenney 	 * tasks, removing any that are no longer holdouts.  When the list
798d01aa263SPaul E. McKenney 	 * is empty, we are done.
799d01aa263SPaul E. McKenney 	 */
800d01aa263SPaul E. McKenney 	lastreport = jiffies;
801f2539003SPaul E. McKenney 	lastinfo = lastreport;
802f2539003SPaul E. McKenney 	rtsi = READ_ONCE(rcu_task_stall_info);
803d01aa263SPaul E. McKenney 
8042393a613SPaul E. McKenney 	// Start off with initial wait and slowly back off to 1 HZ wait.
8052393a613SPaul E. McKenney 	fract = rtp->init_fract;
806d01aa263SPaul E. McKenney 
80777dc1741SPaul E. McKenney 	while (!list_empty(&holdouts)) {
808777570d9SSebastian Andrzej Siewior 		ktime_t exp;
809d01aa263SPaul E. McKenney 		bool firstreport;
810d01aa263SPaul E. McKenney 		bool needreport;
811d01aa263SPaul E. McKenney 		int rtst;
812d01aa263SPaul E. McKenney 
813f2539003SPaul E. McKenney 		// Slowly back off waiting for holdouts
814af051ca4SPaul E. McKenney 		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
815bddf7122SPaul E. McKenney 		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
816bddf7122SPaul E. McKenney 			schedule_timeout_idle(fract);
817bddf7122SPaul E. McKenney 		} else {
818777570d9SSebastian Andrzej Siewior 			exp = jiffies_to_nsecs(fract);
819777570d9SSebastian Andrzej Siewior 			__set_current_state(TASK_IDLE);
820777570d9SSebastian Andrzej Siewior 			schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
821bddf7122SPaul E. McKenney 		}
822d01aa263SPaul E. McKenney 
82375dc2da5SPaul E. McKenney 		if (fract < HZ)
82475dc2da5SPaul E. McKenney 			fract++;
825d01aa263SPaul E. McKenney 
826d01aa263SPaul E. McKenney 		rtst = READ_ONCE(rcu_task_stall_timeout);
827d01aa263SPaul E. McKenney 		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
828f2539003SPaul E. McKenney 		if (needreport) {
829d01aa263SPaul E. McKenney 			lastreport = jiffies;
830f2539003SPaul E. McKenney 			reported = true;
831f2539003SPaul E. McKenney 		}
832d01aa263SPaul E. McKenney 		firstreport = true;
833d01aa263SPaul E. McKenney 		WARN_ON(signal_pending(current));
834af051ca4SPaul E. McKenney 		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
835d01aa263SPaul E. McKenney 		rtp->holdouts_func(&holdouts, needreport, &firstreport);
836f2539003SPaul E. McKenney 
837f2539003SPaul E. McKenney 		// Print pre-stall informational messages if needed.
838f2539003SPaul E. McKenney 		j = jiffies;
839f2539003SPaul E. McKenney 		if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
840f2539003SPaul E. McKenney 			lastinfo = j;
841f2539003SPaul E. McKenney 			rtsi = rtsi * rcu_task_stall_info_mult;
842df83fff7SPaul E. McKenney 			pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
843f2539003SPaul E. McKenney 				__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
844f2539003SPaul E. McKenney 		}
845d01aa263SPaul E. McKenney 	}
846d01aa263SPaul E. McKenney 
847af051ca4SPaul E. McKenney 	set_tasks_gp_state(rtp, RTGS_POST_GP);
848af051ca4SPaul E. McKenney 	rtp->postgp_func(rtp);
849d01aa263SPaul E. McKenney }
850d01aa263SPaul E. McKenney 
85125246fc8SPaul E. McKenney #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
85225246fc8SPaul E. McKenney 
85325246fc8SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
85425246fc8SPaul E. McKenney 
855d01aa263SPaul E. McKenney ////////////////////////////////////////////////////////////////////////
856d01aa263SPaul E. McKenney //
8575873b8a9SPaul E. McKenney // Simple variant of RCU whose quiescent states are voluntary context
8588af9e2c7SPaul E. McKenney // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
8595873b8a9SPaul E. McKenney // As such, grace periods can take one good long time.  There are no
8605873b8a9SPaul E. McKenney // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
8615873b8a9SPaul E. McKenney // because this implementation is intended to get the system into a safe
8625873b8a9SPaul E. McKenney // state for some of the manipulations involved in tracing and the like.
8635873b8a9SPaul E. McKenney // Finally, this implementation does not support high call_rcu_tasks()
8645873b8a9SPaul E. McKenney // rates from multiple CPUs.  If this is required, per-CPU callback lists
8655873b8a9SPaul E. McKenney // will be needed.
86606a3ec92SPaul E. McKenney //
86706a3ec92SPaul E. McKenney // The implementation uses rcu_tasks_wait_gp(), which relies on function
86806a3ec92SPaul E. McKenney // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
86906a3ec92SPaul E. McKenney // function sets these function pointers up so that rcu_tasks_wait_gp()
87006a3ec92SPaul E. McKenney // invokes these functions in this order:
87106a3ec92SPaul E. McKenney //
87206a3ec92SPaul E. McKenney // rcu_tasks_pregp_step():
87306a3ec92SPaul E. McKenney //	Invokes synchronize_rcu() in order to wait for all in-flight
87406a3ec92SPaul E. McKenney //	t->on_rq and t->nvcsw transitions to complete.	This works because
87506a3ec92SPaul E. McKenney //	all such transitions are carried out with interrupts disabled.
87606a3ec92SPaul E. McKenney // rcu_tasks_pertask(), invoked on every non-idle task:
87706a3ec92SPaul E. McKenney //	For every runnable non-idle task other than the current one, use
87806a3ec92SPaul E. McKenney //	get_task_struct() to pin down that task, snapshot that task's
87906a3ec92SPaul E. McKenney //	number of voluntary context switches, and add that task to the
88006a3ec92SPaul E. McKenney //	holdout list.
88106a3ec92SPaul E. McKenney // rcu_tasks_postscan():
88206a3ec92SPaul E. McKenney //	Invoke synchronize_srcu() to ensure that all tasks that were
88306a3ec92SPaul E. McKenney //	in the process of exiting (and which thus might not know to
88406a3ec92SPaul E. McKenney //	synchronize with this RCU Tasks grace period) have completed
88506a3ec92SPaul E. McKenney //	exiting.
88606a3ec92SPaul E. McKenney // check_all_holdout_tasks(), repeatedly until holdout list is empty:
88706a3ec92SPaul E. McKenney //	Scans the holdout list, attempting to identify a quiescent state
88806a3ec92SPaul E. McKenney //	for each task on the list.  If there is a quiescent state, the
88906a3ec92SPaul E. McKenney //	corresponding task is removed from the holdout list.
89006a3ec92SPaul E. McKenney // rcu_tasks_postgp():
89106a3ec92SPaul E. McKenney //	Invokes synchronize_rcu() in order to ensure that all prior
89206a3ec92SPaul E. McKenney //	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
89306a3ec92SPaul E. McKenney //	to have happened before the end of this RCU Tasks grace period.
89406a3ec92SPaul E. McKenney //	Again, this works because all such transitions are carried out
89506a3ec92SPaul E. McKenney //	with interrupts disabled.
89606a3ec92SPaul E. McKenney //
89706a3ec92SPaul E. McKenney // For each exiting task, the exit_tasks_rcu_start() and
89806a3ec92SPaul E. McKenney // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
89906a3ec92SPaul E. McKenney // read-side critical sections waited for by rcu_tasks_postscan().
90006a3ec92SPaul E. McKenney //
901381a4f3bSPaul E. McKenney // Pre-grace-period update-side code is ordered before the grace
902381a4f3bSPaul E. McKenney // via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
903381a4f3bSPaul E. McKenney // is ordered before the grace period via synchronize_rcu() call in
904381a4f3bSPaul E. McKenney // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
90506a3ec92SPaul E. McKenney // disabling.
906eacd6f04SPaul E. McKenney 
907e4fe5dd6SPaul E. McKenney /* Pre-grace-period preparation. */
rcu_tasks_pregp_step(struct list_head * hop)9087460ade1SPaul E. McKenney static void rcu_tasks_pregp_step(struct list_head *hop)
909e4fe5dd6SPaul E. McKenney {
910e4fe5dd6SPaul E. McKenney 	/*
911e4fe5dd6SPaul E. McKenney 	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
912e4fe5dd6SPaul E. McKenney 	 * to complete.  Invoking synchronize_rcu() suffices because all
913e4fe5dd6SPaul E. McKenney 	 * these transitions occur with interrupts disabled.  Without this
914e4fe5dd6SPaul E. McKenney 	 * synchronize_rcu(), a read-side critical section that started
915e4fe5dd6SPaul E. McKenney 	 * before the grace period might be incorrectly seen as having
916e4fe5dd6SPaul E. McKenney 	 * started after the grace period.
917e4fe5dd6SPaul E. McKenney 	 *
918e4fe5dd6SPaul E. McKenney 	 * This synchronize_rcu() also dispenses with the need for a
919e4fe5dd6SPaul E. McKenney 	 * memory barrier on the first store to t->rcu_tasks_holdout,
920e4fe5dd6SPaul E. McKenney 	 * as it forces the store to happen after the beginning of the
921e4fe5dd6SPaul E. McKenney 	 * grace period.
922e4fe5dd6SPaul E. McKenney 	 */
923e4fe5dd6SPaul E. McKenney 	synchronize_rcu();
924e4fe5dd6SPaul E. McKenney }
925e4fe5dd6SPaul E. McKenney 
926b3ffc116SFrederic Weisbecker /* Check for quiescent states since the pregp's synchronize_rcu() */
rcu_tasks_is_holdout(struct task_struct * t)927b3ffc116SFrederic Weisbecker static bool rcu_tasks_is_holdout(struct task_struct *t)
928b3ffc116SFrederic Weisbecker {
929b3ffc116SFrederic Weisbecker 	int cpu;
930b3ffc116SFrederic Weisbecker 
931b3ffc116SFrederic Weisbecker 	/* Has the task been seen voluntarily sleeping? */
932b3ffc116SFrederic Weisbecker 	if (!READ_ONCE(t->on_rq))
933b3ffc116SFrederic Weisbecker 		return false;
934b3ffc116SFrederic Weisbecker 
935b3ffc116SFrederic Weisbecker 	/*
936b3ffc116SFrederic Weisbecker 	 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
937b3ffc116SFrederic Weisbecker 	 * quiescent states. But CPU boot code performed by the idle task
938b3ffc116SFrederic Weisbecker 	 * isn't a quiescent state.
939b3ffc116SFrederic Weisbecker 	 */
940b3ffc116SFrederic Weisbecker 	if (is_idle_task(t))
941b3ffc116SFrederic Weisbecker 		return false;
942b3ffc116SFrederic Weisbecker 
943b3ffc116SFrederic Weisbecker 	cpu = task_cpu(t);
944b3ffc116SFrederic Weisbecker 
945b3ffc116SFrederic Weisbecker 	/* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
946b3ffc116SFrederic Weisbecker 	if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
947b3ffc116SFrederic Weisbecker 		return false;
948b3ffc116SFrederic Weisbecker 
949b3ffc116SFrederic Weisbecker 	return true;
950b3ffc116SFrederic Weisbecker }
951b3ffc116SFrederic Weisbecker 
952e4fe5dd6SPaul E. McKenney /* Per-task initial processing. */
rcu_tasks_pertask(struct task_struct * t,struct list_head * hop)953e4fe5dd6SPaul E. McKenney static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
954e4fe5dd6SPaul E. McKenney {
955b3ffc116SFrederic Weisbecker 	if (t != current && rcu_tasks_is_holdout(t)) {
956e4fe5dd6SPaul E. McKenney 		get_task_struct(t);
957e4fe5dd6SPaul E. McKenney 		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
958e4fe5dd6SPaul E. McKenney 		WRITE_ONCE(t->rcu_tasks_holdout, true);
959e4fe5dd6SPaul E. McKenney 		list_add(&t->rcu_tasks_holdout_list, hop);
960e4fe5dd6SPaul E. McKenney 	}
961e4fe5dd6SPaul E. McKenney }
962e4fe5dd6SPaul E. McKenney 
963e4fe5dd6SPaul E. McKenney /* Processing between scanning taskslist and draining the holdout list. */
rcu_tasks_postscan(struct list_head * hop)96404a3c5aaSPaul E. McKenney static void rcu_tasks_postscan(struct list_head *hop)
965e4fe5dd6SPaul E. McKenney {
966a4533cc0SNeeraj Upadhyay 	int rtsi = READ_ONCE(rcu_task_stall_info);
967a4533cc0SNeeraj Upadhyay 
968a4533cc0SNeeraj Upadhyay 	if (!IS_ENABLED(CONFIG_TINY_RCU)) {
969a4533cc0SNeeraj Upadhyay 		tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
970a4533cc0SNeeraj Upadhyay 		add_timer(&tasks_rcu_exit_srcu_stall_timer);
971a4533cc0SNeeraj Upadhyay 	}
972a4533cc0SNeeraj Upadhyay 
973e4fe5dd6SPaul E. McKenney 	/*
974e4e1e808SFrederic Weisbecker 	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
975e4e1e808SFrederic Weisbecker 	 * until their final schedule() with TASK_DEAD state. To cope with
976e4e1e808SFrederic Weisbecker 	 * this, divide the fragile exit path part in two intersecting
977e4e1e808SFrederic Weisbecker 	 * read side critical sections:
978e4e1e808SFrederic Weisbecker 	 *
979e4e1e808SFrederic Weisbecker 	 * 1) An _SRCU_ read side starting before calling exit_notify(),
980e4e1e808SFrederic Weisbecker 	 *    which may remove the task from the tasklist, and ending after
981e4e1e808SFrederic Weisbecker 	 *    the final preempt_disable() call in do_exit().
982e4e1e808SFrederic Weisbecker 	 *
983e4e1e808SFrederic Weisbecker 	 * 2) An _RCU_ read side starting with the final preempt_disable()
984e4e1e808SFrederic Weisbecker 	 *    call in do_exit() and ending with the final call to schedule()
985e4e1e808SFrederic Weisbecker 	 *    with TASK_DEAD state.
986e4e1e808SFrederic Weisbecker 	 *
987e4e1e808SFrederic Weisbecker 	 * This handles the part 1). And postgp will handle part 2) with a
988e4e1e808SFrederic Weisbecker 	 * call to synchronize_rcu().
989e4fe5dd6SPaul E. McKenney 	 */
990e4fe5dd6SPaul E. McKenney 	synchronize_srcu(&tasks_rcu_exit_srcu);
991a4533cc0SNeeraj Upadhyay 
992a4533cc0SNeeraj Upadhyay 	if (!IS_ENABLED(CONFIG_TINY_RCU))
993a4533cc0SNeeraj Upadhyay 		del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
994e4fe5dd6SPaul E. McKenney }
995e4fe5dd6SPaul E. McKenney 
996eacd6f04SPaul E. McKenney /* See if tasks are still holding out, complain if so. */
check_holdout_task(struct task_struct * t,bool needreport,bool * firstreport)997eacd6f04SPaul E. McKenney static void check_holdout_task(struct task_struct *t,
998eacd6f04SPaul E. McKenney 			       bool needreport, bool *firstreport)
999eacd6f04SPaul E. McKenney {
1000eacd6f04SPaul E. McKenney 	int cpu;
1001eacd6f04SPaul E. McKenney 
1002eacd6f04SPaul E. McKenney 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
1003eacd6f04SPaul E. McKenney 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
1004b3ffc116SFrederic Weisbecker 	    !rcu_tasks_is_holdout(t) ||
1005eacd6f04SPaul E. McKenney 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
1006eacd6f04SPaul E. McKenney 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
1007eacd6f04SPaul E. McKenney 		WRITE_ONCE(t->rcu_tasks_holdout, false);
1008eacd6f04SPaul E. McKenney 		list_del_init(&t->rcu_tasks_holdout_list);
1009eacd6f04SPaul E. McKenney 		put_task_struct(t);
1010eacd6f04SPaul E. McKenney 		return;
1011eacd6f04SPaul E. McKenney 	}
1012eacd6f04SPaul E. McKenney 	rcu_request_urgent_qs_task(t);
1013eacd6f04SPaul E. McKenney 	if (!needreport)
1014eacd6f04SPaul E. McKenney 		return;
1015eacd6f04SPaul E. McKenney 	if (*firstreport) {
1016eacd6f04SPaul E. McKenney 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1017eacd6f04SPaul E. McKenney 		*firstreport = false;
1018eacd6f04SPaul E. McKenney 	}
1019eacd6f04SPaul E. McKenney 	cpu = task_cpu(t);
1020eacd6f04SPaul E. McKenney 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1021eacd6f04SPaul E. McKenney 		 t, ".I"[is_idle_task(t)],
1022eacd6f04SPaul E. McKenney 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1023eacd6f04SPaul E. McKenney 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
1024eacd6f04SPaul E. McKenney 		 t->rcu_tasks_idle_cpu, cpu);
1025eacd6f04SPaul E. McKenney 	sched_show_task(t);
1026eacd6f04SPaul E. McKenney }
1027eacd6f04SPaul E. McKenney 
1028e4fe5dd6SPaul E. McKenney /* Scan the holdout lists for tasks no longer holding out. */
check_all_holdout_tasks(struct list_head * hop,bool needreport,bool * firstreport)1029e4fe5dd6SPaul E. McKenney static void check_all_holdout_tasks(struct list_head *hop,
1030e4fe5dd6SPaul E. McKenney 				    bool needreport, bool *firstreport)
1031eacd6f04SPaul E. McKenney {
1032e4fe5dd6SPaul E. McKenney 	struct task_struct *t, *t1;
1033eacd6f04SPaul E. McKenney 
1034e4fe5dd6SPaul E. McKenney 	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1035e4fe5dd6SPaul E. McKenney 		check_holdout_task(t, needreport, firstreport);
1036eacd6f04SPaul E. McKenney 		cond_resched();
1037eacd6f04SPaul E. McKenney 	}
1038eacd6f04SPaul E. McKenney }
1039eacd6f04SPaul E. McKenney 
1040e4fe5dd6SPaul E. McKenney /* Finish off the Tasks-RCU grace period. */
rcu_tasks_postgp(struct rcu_tasks * rtp)1041af051ca4SPaul E. McKenney static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1042e4fe5dd6SPaul E. McKenney {
1043eacd6f04SPaul E. McKenney 	/*
10445873b8a9SPaul E. McKenney 	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
10455873b8a9SPaul E. McKenney 	 * memory barriers prior to them in the schedule() path, memory
10465873b8a9SPaul E. McKenney 	 * reordering on other CPUs could cause their RCU-tasks read-side
10475873b8a9SPaul E. McKenney 	 * critical sections to extend past the end of the grace period.
10485873b8a9SPaul E. McKenney 	 * However, because these ->nvcsw updates are carried out with
10495873b8a9SPaul E. McKenney 	 * interrupts disabled, we can use synchronize_rcu() to force the
10505873b8a9SPaul E. McKenney 	 * needed ordering on all such CPUs.
1051eacd6f04SPaul E. McKenney 	 *
10525873b8a9SPaul E. McKenney 	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
10535873b8a9SPaul E. McKenney 	 * accesses to be within the grace period, avoiding the need for
10545873b8a9SPaul E. McKenney 	 * memory barriers for ->rcu_tasks_holdout accesses.
1055eacd6f04SPaul E. McKenney 	 *
10565873b8a9SPaul E. McKenney 	 * In addition, this synchronize_rcu() waits for exiting tasks
10575873b8a9SPaul E. McKenney 	 * to complete their final preempt_disable() region of execution,
1058e4e1e808SFrederic Weisbecker 	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
1059e4e1e808SFrederic Weisbecker 	 * enforcing the whole region before tasklist removal until
1060e4e1e808SFrederic Weisbecker 	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1061e4e1e808SFrederic Weisbecker 	 * read side critical section.
1062eacd6f04SPaul E. McKenney 	 */
1063eacd6f04SPaul E. McKenney 	synchronize_rcu();
1064eacd6f04SPaul E. McKenney }
1065eacd6f04SPaul E. McKenney 
10665873b8a9SPaul E. McKenney void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1067c97d12a6SPaul E. McKenney DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
10685873b8a9SPaul E. McKenney 
tasks_rcu_exit_srcu_stall(struct timer_list * unused)1069a4533cc0SNeeraj Upadhyay static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1070a4533cc0SNeeraj Upadhyay {
1071a4533cc0SNeeraj Upadhyay #ifndef CONFIG_TINY_RCU
1072a4533cc0SNeeraj Upadhyay 	int rtsi;
1073a4533cc0SNeeraj Upadhyay 
1074a4533cc0SNeeraj Upadhyay 	rtsi = READ_ONCE(rcu_task_stall_info);
1075a4533cc0SNeeraj Upadhyay 	pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1076a4533cc0SNeeraj Upadhyay 		__func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1077a4533cc0SNeeraj Upadhyay 		tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1078a4533cc0SNeeraj Upadhyay 	pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1079a4533cc0SNeeraj Upadhyay 	tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1080a4533cc0SNeeraj Upadhyay 	add_timer(&tasks_rcu_exit_srcu_stall_timer);
1081a4533cc0SNeeraj Upadhyay #endif // #ifndef CONFIG_TINY_RCU
1082a4533cc0SNeeraj Upadhyay }
1083a4533cc0SNeeraj Upadhyay 
10845873b8a9SPaul E. McKenney /**
10855873b8a9SPaul E. McKenney  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
10865873b8a9SPaul E. McKenney  * @rhp: structure to be used for queueing the RCU updates.
10875873b8a9SPaul E. McKenney  * @func: actual callback function to be invoked after the grace period
10885873b8a9SPaul E. McKenney  *
10895873b8a9SPaul E. McKenney  * The callback function will be invoked some time after a full grace
10905873b8a9SPaul E. McKenney  * period elapses, in other words after all currently executing RCU
10915873b8a9SPaul E. McKenney  * read-side critical sections have completed. call_rcu_tasks() assumes
10925873b8a9SPaul E. McKenney  * that the read-side critical sections end at a voluntary context
10938af9e2c7SPaul E. McKenney  * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
10945873b8a9SPaul E. McKenney  * or transition to usermode execution.  As such, there are no read-side
10955873b8a9SPaul E. McKenney  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
10965873b8a9SPaul E. McKenney  * this primitive is intended to determine that all tasks have passed
1097a616aec9SIngo Molnar  * through a safe state, not so much for data-structure synchronization.
10985873b8a9SPaul E. McKenney  *
10995873b8a9SPaul E. McKenney  * See the description of call_rcu() for more detailed information on
11005873b8a9SPaul E. McKenney  * memory ordering guarantees.
11015873b8a9SPaul E. McKenney  */
call_rcu_tasks(struct rcu_head * rhp,rcu_callback_t func)11025873b8a9SPaul E. McKenney void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
11035873b8a9SPaul E. McKenney {
11045873b8a9SPaul E. McKenney 	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
11055873b8a9SPaul E. McKenney }
11065873b8a9SPaul E. McKenney EXPORT_SYMBOL_GPL(call_rcu_tasks);
11075873b8a9SPaul E. McKenney 
11085873b8a9SPaul E. McKenney /**
11095873b8a9SPaul E. McKenney  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
11105873b8a9SPaul E. McKenney  *
11115873b8a9SPaul E. McKenney  * Control will return to the caller some time after a full rcu-tasks
11125873b8a9SPaul E. McKenney  * grace period has elapsed, in other words after all currently
11135873b8a9SPaul E. McKenney  * executing rcu-tasks read-side critical sections have elapsed.  These
11145873b8a9SPaul E. McKenney  * read-side critical sections are delimited by calls to schedule(),
11155873b8a9SPaul E. McKenney  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
11165873b8a9SPaul E. McKenney  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
11175873b8a9SPaul E. McKenney  *
11185873b8a9SPaul E. McKenney  * This is a very specialized primitive, intended only for a few uses in
11195873b8a9SPaul E. McKenney  * tracing and other situations requiring manipulation of function
11205873b8a9SPaul E. McKenney  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
11215873b8a9SPaul E. McKenney  * is not (yet) intended for heavy use from multiple CPUs.
11225873b8a9SPaul E. McKenney  *
11235873b8a9SPaul E. McKenney  * See the description of synchronize_rcu() for more detailed information
11245873b8a9SPaul E. McKenney  * on memory ordering guarantees.
11255873b8a9SPaul E. McKenney  */
synchronize_rcu_tasks(void)11265873b8a9SPaul E. McKenney void synchronize_rcu_tasks(void)
11275873b8a9SPaul E. McKenney {
11285873b8a9SPaul E. McKenney 	synchronize_rcu_tasks_generic(&rcu_tasks);
11295873b8a9SPaul E. McKenney }
11305873b8a9SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
11315873b8a9SPaul E. McKenney 
11325873b8a9SPaul E. McKenney /**
11335873b8a9SPaul E. McKenney  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
11345873b8a9SPaul E. McKenney  *
11355873b8a9SPaul E. McKenney  * Although the current implementation is guaranteed to wait, it is not
11365873b8a9SPaul E. McKenney  * obligated to, for example, if there are no pending callbacks.
11375873b8a9SPaul E. McKenney  */
rcu_barrier_tasks(void)11385873b8a9SPaul E. McKenney void rcu_barrier_tasks(void)
11395873b8a9SPaul E. McKenney {
1140ce9b1c66SPaul E. McKenney 	rcu_barrier_tasks_generic(&rcu_tasks);
11415873b8a9SPaul E. McKenney }
11425873b8a9SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
11435873b8a9SPaul E. McKenney 
1144450d461aSPaul E. McKenney int rcu_tasks_lazy_ms = -1;
1145450d461aSPaul E. McKenney module_param(rcu_tasks_lazy_ms, int, 0444);
1146450d461aSPaul E. McKenney 
rcu_spawn_tasks_kthread(void)1147eacd6f04SPaul E. McKenney static int __init rcu_spawn_tasks_kthread(void)
1148eacd6f04SPaul E. McKenney {
1149cafafd67SPaul E. McKenney 	cblist_init_generic(&rcu_tasks);
11504fe192dfSPaul E. McKenney 	rcu_tasks.gp_sleep = HZ / 10;
115175dc2da5SPaul E. McKenney 	rcu_tasks.init_fract = HZ / 10;
1152450d461aSPaul E. McKenney 	if (rcu_tasks_lazy_ms >= 0)
1153450d461aSPaul E. McKenney 		rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1154e4fe5dd6SPaul E. McKenney 	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1155e4fe5dd6SPaul E. McKenney 	rcu_tasks.pertask_func = rcu_tasks_pertask;
1156e4fe5dd6SPaul E. McKenney 	rcu_tasks.postscan_func = rcu_tasks_postscan;
1157e4fe5dd6SPaul E. McKenney 	rcu_tasks.holdouts_func = check_all_holdout_tasks;
1158e4fe5dd6SPaul E. McKenney 	rcu_tasks.postgp_func = rcu_tasks_postgp;
11595873b8a9SPaul E. McKenney 	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1160eacd6f04SPaul E. McKenney 	return 0;
1161eacd6f04SPaul E. McKenney }
1162eacd6f04SPaul E. McKenney 
116327c0f144SPaul E. McKenney #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_classic_gp_kthread(void)116427c0f144SPaul E. McKenney void show_rcu_tasks_classic_gp_kthread(void)
1165e21408ceSPaul E. McKenney {
1166e21408ceSPaul E. McKenney 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1167e21408ceSPaul E. McKenney }
116827c0f144SPaul E. McKenney EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
116927c0f144SPaul E. McKenney #endif // !defined(CONFIG_TINY_RCU)
1170e21408ceSPaul E. McKenney 
get_rcu_tasks_gp_kthread(void)1171271a8467SPaul E. McKenney struct task_struct *get_rcu_tasks_gp_kthread(void)
1172271a8467SPaul E. McKenney {
1173271a8467SPaul E. McKenney 	return rcu_tasks.kthread_ptr;
1174271a8467SPaul E. McKenney }
1175271a8467SPaul E. McKenney EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1176271a8467SPaul E. McKenney 
1177e4e1e808SFrederic Weisbecker /*
1178e4e1e808SFrederic Weisbecker  * Contribute to protect against tasklist scan blind spot while the
1179e4e1e808SFrederic Weisbecker  * task is exiting and may be removed from the tasklist. See
1180e4e1e808SFrederic Weisbecker  * corresponding synchronize_srcu() for further details.
1181e4e1e808SFrederic Weisbecker  */
exit_tasks_rcu_start(void)118225246fc8SPaul E. McKenney void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
118325246fc8SPaul E. McKenney {
118425246fc8SPaul E. McKenney 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
118525246fc8SPaul E. McKenney }
118625246fc8SPaul E. McKenney 
1187e4e1e808SFrederic Weisbecker /*
1188e4e1e808SFrederic Weisbecker  * Contribute to protect against tasklist scan blind spot while the
1189e4e1e808SFrederic Weisbecker  * task is exiting and may be removed from the tasklist. See
1190e4e1e808SFrederic Weisbecker  * corresponding synchronize_srcu() for further details.
1191e4e1e808SFrederic Weisbecker  */
exit_tasks_rcu_stop(void)119228319d6dSFrederic Weisbecker void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
119325246fc8SPaul E. McKenney {
119425246fc8SPaul E. McKenney 	struct task_struct *t = current;
119525246fc8SPaul E. McKenney 
119625246fc8SPaul E. McKenney 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
119728319d6dSFrederic Weisbecker }
119828319d6dSFrederic Weisbecker 
119928319d6dSFrederic Weisbecker /*
120028319d6dSFrederic Weisbecker  * Contribute to protect against tasklist scan blind spot while the
120128319d6dSFrederic Weisbecker  * task is exiting and may be removed from the tasklist. See
120228319d6dSFrederic Weisbecker  * corresponding synchronize_srcu() for further details.
120328319d6dSFrederic Weisbecker  */
exit_tasks_rcu_finish(void)120428319d6dSFrederic Weisbecker void exit_tasks_rcu_finish(void)
120528319d6dSFrederic Weisbecker {
120628319d6dSFrederic Weisbecker 	exit_tasks_rcu_stop();
120728319d6dSFrederic Weisbecker 	exit_tasks_rcu_finish_trace(current);
120825246fc8SPaul E. McKenney }
120925246fc8SPaul E. McKenney 
1210e21408ceSPaul E. McKenney #else /* #ifdef CONFIG_TASKS_RCU */
exit_tasks_rcu_start(void)121125246fc8SPaul E. McKenney void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_stop(void)121228319d6dSFrederic Weisbecker void exit_tasks_rcu_stop(void) { }
exit_tasks_rcu_finish(void)121325246fc8SPaul E. McKenney void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1214e21408ceSPaul E. McKenney #endif /* #else #ifdef CONFIG_TASKS_RCU */
1215c84aad76SPaul E. McKenney 
1216c84aad76SPaul E. McKenney #ifdef CONFIG_TASKS_RUDE_RCU
1217c84aad76SPaul E. McKenney 
1218c84aad76SPaul E. McKenney ////////////////////////////////////////////////////////////////////////
1219c84aad76SPaul E. McKenney //
1220c84aad76SPaul E. McKenney // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1221c84aad76SPaul E. McKenney // passing an empty function to schedule_on_each_cpu().  This approach
1222e4be1f44SPaul E. McKenney // provides an asynchronous call_rcu_tasks_rude() API and batching of
1223e4be1f44SPaul E. McKenney // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
12249fc98e31SPaul E. McKenney // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
12259fc98e31SPaul E. McKenney // and induces otherwise unnecessary context switches on all online CPUs,
12269fc98e31SPaul E. McKenney // whether idle or not.
12279fc98e31SPaul E. McKenney //
12289fc98e31SPaul E. McKenney // Callback handling is provided by the rcu_tasks_kthread() function.
12299fc98e31SPaul E. McKenney //
12309fc98e31SPaul E. McKenney // Ordering is provided by the scheduler's context-switch code.
1231c84aad76SPaul E. McKenney 
1232c84aad76SPaul E. McKenney // Empty function to allow workqueues to force a context switch.
rcu_tasks_be_rude(struct work_struct * work)1233c84aad76SPaul E. McKenney static void rcu_tasks_be_rude(struct work_struct *work)
1234c84aad76SPaul E. McKenney {
1235c84aad76SPaul E. McKenney }
1236c84aad76SPaul E. McKenney 
1237c84aad76SPaul E. McKenney // Wait for one rude RCU-tasks grace period.
rcu_tasks_rude_wait_gp(struct rcu_tasks * rtp)1238c84aad76SPaul E. McKenney static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1239c84aad76SPaul E. McKenney {
1240238dbce3SPaul E. McKenney 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
1241c84aad76SPaul E. McKenney 	schedule_on_each_cpu(rcu_tasks_be_rude);
1242c84aad76SPaul E. McKenney }
1243c84aad76SPaul E. McKenney 
1244c84aad76SPaul E. McKenney void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1245c97d12a6SPaul E. McKenney DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1246c97d12a6SPaul E. McKenney 		 "RCU Tasks Rude");
1247c84aad76SPaul E. McKenney 
1248c84aad76SPaul E. McKenney /**
1249c84aad76SPaul E. McKenney  * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1250c84aad76SPaul E. McKenney  * @rhp: structure to be used for queueing the RCU updates.
1251c84aad76SPaul E. McKenney  * @func: actual callback function to be invoked after the grace period
1252c84aad76SPaul E. McKenney  *
1253c84aad76SPaul E. McKenney  * The callback function will be invoked some time after a full grace
1254c84aad76SPaul E. McKenney  * period elapses, in other words after all currently executing RCU
1255c84aad76SPaul E. McKenney  * read-side critical sections have completed. call_rcu_tasks_rude()
1256c84aad76SPaul E. McKenney  * assumes that the read-side critical sections end at context switch,
12578af9e2c7SPaul E. McKenney  * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1258a6517e9cSNeeraj Upadhyay  * usermode execution is schedulable). As such, there are no read-side
1259a6517e9cSNeeraj Upadhyay  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1260a6517e9cSNeeraj Upadhyay  * this primitive is intended to determine that all tasks have passed
1261a6517e9cSNeeraj Upadhyay  * through a safe state, not so much for data-structure synchronization.
1262c84aad76SPaul E. McKenney  *
1263c84aad76SPaul E. McKenney  * See the description of call_rcu() for more detailed information on
1264c84aad76SPaul E. McKenney  * memory ordering guarantees.
1265c84aad76SPaul E. McKenney  */
call_rcu_tasks_rude(struct rcu_head * rhp,rcu_callback_t func)1266c84aad76SPaul E. McKenney void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1267c84aad76SPaul E. McKenney {
1268c84aad76SPaul E. McKenney 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1269c84aad76SPaul E. McKenney }
1270c84aad76SPaul E. McKenney EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1271c84aad76SPaul E. McKenney 
1272c84aad76SPaul E. McKenney /**
1273c84aad76SPaul E. McKenney  * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1274c84aad76SPaul E. McKenney  *
1275c84aad76SPaul E. McKenney  * Control will return to the caller some time after a rude rcu-tasks
1276c84aad76SPaul E. McKenney  * grace period has elapsed, in other words after all currently
1277c84aad76SPaul E. McKenney  * executing rcu-tasks read-side critical sections have elapsed.  These
1278c84aad76SPaul E. McKenney  * read-side critical sections are delimited by calls to schedule(),
1279a6517e9cSNeeraj Upadhyay  * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1280a6517e9cSNeeraj Upadhyay  * context), and (in theory, anyway) cond_resched().
1281c84aad76SPaul E. McKenney  *
1282c84aad76SPaul E. McKenney  * This is a very specialized primitive, intended only for a few uses in
1283c84aad76SPaul E. McKenney  * tracing and other situations requiring manipulation of function preambles
1284c84aad76SPaul E. McKenney  * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
1285c84aad76SPaul E. McKenney  * (yet) intended for heavy use from multiple CPUs.
1286c84aad76SPaul E. McKenney  *
1287c84aad76SPaul E. McKenney  * See the description of synchronize_rcu() for more detailed information
1288c84aad76SPaul E. McKenney  * on memory ordering guarantees.
1289c84aad76SPaul E. McKenney  */
synchronize_rcu_tasks_rude(void)1290c84aad76SPaul E. McKenney void synchronize_rcu_tasks_rude(void)
1291c84aad76SPaul E. McKenney {
1292c84aad76SPaul E. McKenney 	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1293c84aad76SPaul E. McKenney }
1294c84aad76SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1295c84aad76SPaul E. McKenney 
1296c84aad76SPaul E. McKenney /**
1297c84aad76SPaul E. McKenney  * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1298c84aad76SPaul E. McKenney  *
1299c84aad76SPaul E. McKenney  * Although the current implementation is guaranteed to wait, it is not
1300c84aad76SPaul E. McKenney  * obligated to, for example, if there are no pending callbacks.
1301c84aad76SPaul E. McKenney  */
rcu_barrier_tasks_rude(void)1302c84aad76SPaul E. McKenney void rcu_barrier_tasks_rude(void)
1303c84aad76SPaul E. McKenney {
1304ce9b1c66SPaul E. McKenney 	rcu_barrier_tasks_generic(&rcu_tasks_rude);
1305c84aad76SPaul E. McKenney }
1306c84aad76SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1307c84aad76SPaul E. McKenney 
1308450d461aSPaul E. McKenney int rcu_tasks_rude_lazy_ms = -1;
1309450d461aSPaul E. McKenney module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1310450d461aSPaul E. McKenney 
rcu_spawn_tasks_rude_kthread(void)1311c84aad76SPaul E. McKenney static int __init rcu_spawn_tasks_rude_kthread(void)
1312c84aad76SPaul E. McKenney {
1313cafafd67SPaul E. McKenney 	cblist_init_generic(&rcu_tasks_rude);
13144fe192dfSPaul E. McKenney 	rcu_tasks_rude.gp_sleep = HZ / 10;
1315450d461aSPaul E. McKenney 	if (rcu_tasks_rude_lazy_ms >= 0)
1316450d461aSPaul E. McKenney 		rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1317c84aad76SPaul E. McKenney 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1318c84aad76SPaul E. McKenney 	return 0;
1319c84aad76SPaul E. McKenney }
1320c84aad76SPaul E. McKenney 
132127c0f144SPaul E. McKenney #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_rude_gp_kthread(void)132227c0f144SPaul E. McKenney void show_rcu_tasks_rude_gp_kthread(void)
1323e21408ceSPaul E. McKenney {
1324e21408ceSPaul E. McKenney 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1325e21408ceSPaul E. McKenney }
132627c0f144SPaul E. McKenney EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
132727c0f144SPaul E. McKenney #endif // !defined(CONFIG_TINY_RCU)
1328a15ec57cSPaul E. McKenney 
get_rcu_tasks_rude_gp_kthread(void)1329a15ec57cSPaul E. McKenney struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1330a15ec57cSPaul E. McKenney {
1331a15ec57cSPaul E. McKenney 	return rcu_tasks_rude.kthread_ptr;
1332a15ec57cSPaul E. McKenney }
1333a15ec57cSPaul E. McKenney EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1334a15ec57cSPaul E. McKenney 
133527c0f144SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1336d5f177d3SPaul E. McKenney 
1337d5f177d3SPaul E. McKenney ////////////////////////////////////////////////////////////////////////
1338d5f177d3SPaul E. McKenney //
1339d5f177d3SPaul E. McKenney // Tracing variant of Tasks RCU.  This variant is designed to be used
1340d5f177d3SPaul E. McKenney // to protect tracing hooks, including those of BPF.  This variant
1341d5f177d3SPaul E. McKenney // therefore:
1342d5f177d3SPaul E. McKenney //
1343d5f177d3SPaul E. McKenney // 1.	Has explicit read-side markers to allow finite grace periods
1344d5f177d3SPaul E. McKenney //	in the face of in-kernel loops for PREEMPT=n builds.
1345d5f177d3SPaul E. McKenney //
1346d5f177d3SPaul E. McKenney // 2.	Protects code in the idle loop, exception entry/exit, and
1347d5f177d3SPaul E. McKenney //	CPU-hotplug code paths, similar to the capabilities of SRCU.
1348d5f177d3SPaul E. McKenney //
1349c4f113acSPaul E. McKenney // 3.	Avoids expensive read-side instructions, having overhead similar
1350d5f177d3SPaul E. McKenney //	to that of Preemptible RCU.
1351d5f177d3SPaul E. McKenney //
1352eea3423bSPaul E. McKenney // There are of course downsides.  For example, the grace-period code
1353eea3423bSPaul E. McKenney // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1354eea3423bSPaul E. McKenney // in nohz_full userspace.  If needed, these downsides can be at least
1355eea3423bSPaul E. McKenney // partially remedied.
1356d5f177d3SPaul E. McKenney //
1357d5f177d3SPaul E. McKenney // Perhaps most important, this variant of RCU does not affect the vanilla
1358d5f177d3SPaul E. McKenney // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
1359d5f177d3SPaul E. McKenney // readers can operate from idle, offline, and exception entry/exit in no
1360d5f177d3SPaul E. McKenney // way allows rcu_preempt and rcu_sched readers to also do so.
1361a434dd10SPaul E. McKenney //
1362a434dd10SPaul E. McKenney // The implementation uses rcu_tasks_wait_gp(), which relies on function
1363a434dd10SPaul E. McKenney // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
1364a434dd10SPaul E. McKenney // function sets these function pointers up so that rcu_tasks_wait_gp()
1365a434dd10SPaul E. McKenney // invokes these functions in this order:
1366a434dd10SPaul E. McKenney //
1367a434dd10SPaul E. McKenney // rcu_tasks_trace_pregp_step():
1368eea3423bSPaul E. McKenney //	Disables CPU hotplug, adds all currently executing tasks to the
1369eea3423bSPaul E. McKenney //	holdout list, then checks the state of all tasks that blocked
1370eea3423bSPaul E. McKenney //	or were preempted within their current RCU Tasks Trace read-side
1371eea3423bSPaul E. McKenney //	critical section, adding them to the holdout list if appropriate.
1372eea3423bSPaul E. McKenney //	Finally, this function re-enables CPU hotplug.
1373eea3423bSPaul E. McKenney // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1374a434dd10SPaul E. McKenney // rcu_tasks_trace_postscan():
1375eea3423bSPaul E. McKenney //	Invokes synchronize_rcu() to wait for late-stage exiting tasks
1376eea3423bSPaul E. McKenney //	to finish exiting.
1377a434dd10SPaul E. McKenney // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1378a434dd10SPaul E. McKenney //	Scans the holdout list, attempting to identify a quiescent state
1379a434dd10SPaul E. McKenney //	for each task on the list.  If there is a quiescent state, the
1380eea3423bSPaul E. McKenney //	corresponding task is removed from the holdout list.  Once this
1381eea3423bSPaul E. McKenney //	list is empty, the grace period has completed.
1382a434dd10SPaul E. McKenney // rcu_tasks_trace_postgp():
1383eea3423bSPaul E. McKenney //	Provides the needed full memory barrier and does debug checks.
1384a434dd10SPaul E. McKenney //
1385a434dd10SPaul E. McKenney // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1386a434dd10SPaul E. McKenney //
1387eea3423bSPaul E. McKenney // Pre-grace-period update-side code is ordered before the grace period
1388eea3423bSPaul E. McKenney // via the ->cbs_lock and barriers in rcu_tasks_kthread().  Pre-grace-period
1389eea3423bSPaul E. McKenney // read-side code is ordered before the grace period by atomic operations
1390eea3423bSPaul E. McKenney // on .b.need_qs flag of each task involved in this process, or by scheduler
1391eea3423bSPaul E. McKenney // context-switch ordering (for locked-down non-running readers).
1392d5f177d3SPaul E. McKenney 
1393d5f177d3SPaul E. McKenney // The lockdep state must be outside of #ifdef to be useful.
1394d5f177d3SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC
1395d5f177d3SPaul E. McKenney static struct lock_class_key rcu_lock_trace_key;
1396d5f177d3SPaul E. McKenney struct lockdep_map rcu_trace_lock_map =
1397d5f177d3SPaul E. McKenney 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1398d5f177d3SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1399d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1400d5f177d3SPaul E. McKenney 
1401d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU
1402d5f177d3SPaul E. McKenney 
1403d5f177d3SPaul E. McKenney // Record outstanding IPIs to each CPU.  No point in sending two...
1404d5f177d3SPaul E. McKenney static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1405d5f177d3SPaul E. McKenney 
140640471509SPaul E. McKenney // The number of detections of task quiescent state relying on
140740471509SPaul E. McKenney // heavyweight readers executing explicit memory barriers.
14086731da9eSPaul E. McKenney static unsigned long n_heavy_reader_attempts;
14096731da9eSPaul E. McKenney static unsigned long n_heavy_reader_updates;
14106731da9eSPaul E. McKenney static unsigned long n_heavy_reader_ofl_updates;
1411ffcc21a3SPaul E. McKenney static unsigned long n_trc_holdouts;
141240471509SPaul E. McKenney 
1413b0afa0f0SPaul E. McKenney void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1414b0afa0f0SPaul E. McKenney DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1415b0afa0f0SPaul E. McKenney 		 "RCU Tasks Trace");
1416b0afa0f0SPaul E. McKenney 
14173847b645SPaul E. McKenney /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
rcu_ld_need_qs(struct task_struct * t)14183847b645SPaul E. McKenney static u8 rcu_ld_need_qs(struct task_struct *t)
14193847b645SPaul E. McKenney {
14203847b645SPaul E. McKenney 	smp_mb(); // Enforce full grace-period ordering.
14213847b645SPaul E. McKenney 	return smp_load_acquire(&t->trc_reader_special.b.need_qs);
14223847b645SPaul E. McKenney }
14233847b645SPaul E. McKenney 
14243847b645SPaul E. McKenney /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
rcu_st_need_qs(struct task_struct * t,u8 v)14253847b645SPaul E. McKenney static void rcu_st_need_qs(struct task_struct *t, u8 v)
14263847b645SPaul E. McKenney {
14273847b645SPaul E. McKenney 	smp_store_release(&t->trc_reader_special.b.need_qs, v);
14283847b645SPaul E. McKenney 	smp_mb(); // Enforce full grace-period ordering.
14293847b645SPaul E. McKenney }
14303847b645SPaul E. McKenney 
14313847b645SPaul E. McKenney /*
14323847b645SPaul E. McKenney  * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
14333847b645SPaul E. McKenney  * the four-byte operand-size restriction of some platforms.
14343847b645SPaul E. McKenney  * Returns the old value, which is often ignored.
14353847b645SPaul E. McKenney  */
rcu_trc_cmpxchg_need_qs(struct task_struct * t,u8 old,u8 new)14363847b645SPaul E. McKenney u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
14373847b645SPaul E. McKenney {
14383847b645SPaul E. McKenney 	union rcu_special ret;
14393847b645SPaul E. McKenney 	union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
14403847b645SPaul E. McKenney 	union rcu_special trs_new = trs_old;
14413847b645SPaul E. McKenney 
14423847b645SPaul E. McKenney 	if (trs_old.b.need_qs != old)
14433847b645SPaul E. McKenney 		return trs_old.b.need_qs;
14443847b645SPaul E. McKenney 	trs_new.b.need_qs = new;
14453847b645SPaul E. McKenney 	ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
14463847b645SPaul E. McKenney 	return ret.b.need_qs;
14473847b645SPaul E. McKenney }
14483847b645SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
14493847b645SPaul E. McKenney 
1450eea3423bSPaul E. McKenney /*
1451eea3423bSPaul E. McKenney  * If we are the last reader, signal the grace-period kthread.
1452eea3423bSPaul E. McKenney  * Also remove from the per-CPU list of blocked tasks.
1453eea3423bSPaul E. McKenney  */
rcu_read_unlock_trace_special(struct task_struct * t)1454a5c071ccSPaul E. McKenney void rcu_read_unlock_trace_special(struct task_struct *t)
1455d5f177d3SPaul E. McKenney {
14560bcb3868SPaul E. McKenney 	unsigned long flags;
14570bcb3868SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp;
14580bcb3868SPaul E. McKenney 	union rcu_special trs;
14590bcb3868SPaul E. McKenney 
14600bcb3868SPaul E. McKenney 	// Open-coded full-word version of rcu_ld_need_qs().
14610bcb3868SPaul E. McKenney 	smp_mb(); // Enforce full grace-period ordering.
14620bcb3868SPaul E. McKenney 	trs = smp_load_acquire(&t->trc_reader_special);
1463276c4104SPaul E. McKenney 
14643847b645SPaul E. McKenney 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1465276c4104SPaul E. McKenney 		smp_mb(); // Pairs with update-side barriers.
1466276c4104SPaul E. McKenney 	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
14670bcb3868SPaul E. McKenney 	if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
14683847b645SPaul E. McKenney 		u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
14693847b645SPaul E. McKenney 						       TRC_NEED_QS_CHECKED);
14703847b645SPaul E. McKenney 
14710bcb3868SPaul E. McKenney 		WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
14720bcb3868SPaul E. McKenney 	}
14730bcb3868SPaul E. McKenney 	if (trs.b.blocked) {
14740bcb3868SPaul E. McKenney 		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
14750bcb3868SPaul E. McKenney 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
14760bcb3868SPaul E. McKenney 		list_del_init(&t->trc_blkd_node);
14770bcb3868SPaul E. McKenney 		WRITE_ONCE(t->trc_reader_special.b.blocked, false);
14780bcb3868SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
14793847b645SPaul E. McKenney 	}
1480a5c071ccSPaul E. McKenney 	WRITE_ONCE(t->trc_reader_nesting, 0);
1481d5f177d3SPaul E. McKenney }
1482d5f177d3SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1483d5f177d3SPaul E. McKenney 
14840356d4e6SPaul E. McKenney /* Add a newly blocked reader task to its CPU's list. */
rcu_tasks_trace_qs_blkd(struct task_struct * t)14850356d4e6SPaul E. McKenney void rcu_tasks_trace_qs_blkd(struct task_struct *t)
14860356d4e6SPaul E. McKenney {
14870356d4e6SPaul E. McKenney 	unsigned long flags;
14880356d4e6SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp;
14890356d4e6SPaul E. McKenney 
14900356d4e6SPaul E. McKenney 	local_irq_save(flags);
14910356d4e6SPaul E. McKenney 	rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
14920356d4e6SPaul E. McKenney 	raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
14930356d4e6SPaul E. McKenney 	t->trc_blkd_cpu = smp_processor_id();
14940356d4e6SPaul E. McKenney 	if (!rtpcp->rtp_blkd_tasks.next)
14950356d4e6SPaul E. McKenney 		INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
14960356d4e6SPaul E. McKenney 	list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
14970bcb3868SPaul E. McKenney 	WRITE_ONCE(t->trc_reader_special.b.blocked, true);
14980356d4e6SPaul E. McKenney 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
14990356d4e6SPaul E. McKenney }
15000356d4e6SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
15010356d4e6SPaul E. McKenney 
1502d5f177d3SPaul E. McKenney /* Add a task to the holdout list, if it is not already on the list. */
trc_add_holdout(struct task_struct * t,struct list_head * bhp)1503d5f177d3SPaul E. McKenney static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1504d5f177d3SPaul E. McKenney {
1505d5f177d3SPaul E. McKenney 	if (list_empty(&t->trc_holdout_list)) {
1506d5f177d3SPaul E. McKenney 		get_task_struct(t);
1507d5f177d3SPaul E. McKenney 		list_add(&t->trc_holdout_list, bhp);
1508ffcc21a3SPaul E. McKenney 		n_trc_holdouts++;
1509d5f177d3SPaul E. McKenney 	}
1510d5f177d3SPaul E. McKenney }
1511d5f177d3SPaul E. McKenney 
1512d5f177d3SPaul E. McKenney /* Remove a task from the holdout list, if it is in fact present. */
trc_del_holdout(struct task_struct * t)1513d5f177d3SPaul E. McKenney static void trc_del_holdout(struct task_struct *t)
1514d5f177d3SPaul E. McKenney {
1515d5f177d3SPaul E. McKenney 	if (!list_empty(&t->trc_holdout_list)) {
1516d5f177d3SPaul E. McKenney 		list_del_init(&t->trc_holdout_list);
1517d5f177d3SPaul E. McKenney 		put_task_struct(t);
1518ffcc21a3SPaul E. McKenney 		n_trc_holdouts--;
1519d5f177d3SPaul E. McKenney 	}
1520d5f177d3SPaul E. McKenney }
1521d5f177d3SPaul E. McKenney 
1522d5f177d3SPaul E. McKenney /* IPI handler to check task state. */
trc_read_check_handler(void * t_in)1523d5f177d3SPaul E. McKenney static void trc_read_check_handler(void *t_in)
1524d5f177d3SPaul E. McKenney {
15259ff86b4cSPaul E. McKenney 	int nesting;
1526d5f177d3SPaul E. McKenney 	struct task_struct *t = current;
1527d5f177d3SPaul E. McKenney 	struct task_struct *texp = t_in;
1528d5f177d3SPaul E. McKenney 
1529d5f177d3SPaul E. McKenney 	// If the task is no longer running on this CPU, leave.
15303847b645SPaul E. McKenney 	if (unlikely(texp != t))
1531d5f177d3SPaul E. McKenney 		goto reset_ipi; // Already on holdout list, so will check later.
1532d5f177d3SPaul E. McKenney 
1533d5f177d3SPaul E. McKenney 	// If the task is not in a read-side critical section, and
1534d5f177d3SPaul E. McKenney 	// if this is the last reader, awaken the grace-period kthread.
15359ff86b4cSPaul E. McKenney 	nesting = READ_ONCE(t->trc_reader_nesting);
15369ff86b4cSPaul E. McKenney 	if (likely(!nesting)) {
15373847b645SPaul E. McKenney 		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1538d5f177d3SPaul E. McKenney 		goto reset_ipi;
1539d5f177d3SPaul E. McKenney 	}
1540ba3a86e4SPaul E. McKenney 	// If we are racing with an rcu_read_unlock_trace(), try again later.
15419ff86b4cSPaul E. McKenney 	if (unlikely(nesting < 0))
1542ba3a86e4SPaul E. McKenney 		goto reset_ipi;
1543d5f177d3SPaul E. McKenney 
1544eea3423bSPaul E. McKenney 	// Get here if the task is in a read-side critical section.
1545eea3423bSPaul E. McKenney 	// Set its state so that it will update state for the grace-period
1546eea3423bSPaul E. McKenney 	// kthread upon exit from that critical section.
154755061126SPaul E. McKenney 	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1548d5f177d3SPaul E. McKenney 
1549d5f177d3SPaul E. McKenney reset_ipi:
1550d5f177d3SPaul E. McKenney 	// Allow future IPIs to be sent on CPU and for task.
1551d5f177d3SPaul E. McKenney 	// Also order this IPI handler against any later manipulations of
1552d5f177d3SPaul E. McKenney 	// the intended task.
15538211e922SLiu Song 	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1554d5f177d3SPaul E. McKenney 	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1555d5f177d3SPaul E. McKenney }
1556d5f177d3SPaul E. McKenney 
1557d5f177d3SPaul E. McKenney /* Callback function for scheduler to check locked-down task.  */
trc_inspect_reader(struct task_struct * t,void * bhp_in)15583847b645SPaul E. McKenney static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1559d5f177d3SPaul E. McKenney {
15603847b645SPaul E. McKenney 	struct list_head *bhp = bhp_in;
15617d0c9c50SPaul E. McKenney 	int cpu = task_cpu(t);
156218f08e75SPaul E. McKenney 	int nesting;
15637e3b70e0SPaul E. McKenney 	bool ofl = cpu_is_offline(cpu);
15647d0c9c50SPaul E. McKenney 
1565897ba84dSPaul E. McKenney 	if (task_curr(t) && !ofl) {
15667d0c9c50SPaul E. McKenney 		// If no chance of heavyweight readers, do it the hard way.
1567897ba84dSPaul E. McKenney 		if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
15689b3c4ab3SPeter Zijlstra 			return -EINVAL;
15697d0c9c50SPaul E. McKenney 
15707d0c9c50SPaul E. McKenney 		// If heavyweight readers are enabled on the remote task,
15717d0c9c50SPaul E. McKenney 		// we can inspect its state despite its currently running.
15727d0c9c50SPaul E. McKenney 		// However, we cannot safely change its state.
157340471509SPaul E. McKenney 		n_heavy_reader_attempts++;
1574897ba84dSPaul E. McKenney 		// Check for "running" idle tasks on offline CPUs.
1575897ba84dSPaul E. McKenney 		if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
15769b3c4ab3SPeter Zijlstra 			return -EINVAL; // No quiescent state, do it the hard way.
157740471509SPaul E. McKenney 		n_heavy_reader_updates++;
157818f08e75SPaul E. McKenney 		nesting = 0;
15797d0c9c50SPaul E. McKenney 	} else {
1580bdb0cca0SPaul E. McKenney 		// The task is not running, so C-language access is safe.
158118f08e75SPaul E. McKenney 		nesting = t->trc_reader_nesting;
15825d1e4e5fSFrederic Weisbecker 		WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1583897ba84dSPaul E. McKenney 		if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1584897ba84dSPaul E. McKenney 			n_heavy_reader_ofl_updates++;
15857d0c9c50SPaul E. McKenney 	}
1586d5f177d3SPaul E. McKenney 
158718f08e75SPaul E. McKenney 	// If not exiting a read-side critical section, mark as checked
158818f08e75SPaul E. McKenney 	// so that the grace-period kthread will remove it from the
158918f08e75SPaul E. McKenney 	// holdout list.
15900968e892SPaul E. McKenney 	if (!nesting) {
15913847b645SPaul E. McKenney 		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
15920968e892SPaul E. McKenney 		return 0;  // In QS, so done.
15933847b645SPaul E. McKenney 	}
15940968e892SPaul E. McKenney 	if (nesting < 0)
1595eea3423bSPaul E. McKenney 		return -EINVAL; // Reader transitioning, try again later.
15967d0c9c50SPaul E. McKenney 
15977d0c9c50SPaul E. McKenney 	// The task is in a read-side critical section, so set up its
15980968e892SPaul E. McKenney 	// state so that it will update state upon exit from that critical
15990968e892SPaul E. McKenney 	// section.
160055061126SPaul E. McKenney 	if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
16013847b645SPaul E. McKenney 		trc_add_holdout(t, bhp);
16029b3c4ab3SPeter Zijlstra 	return 0;
1603d5f177d3SPaul E. McKenney }
1604d5f177d3SPaul E. McKenney 
1605d5f177d3SPaul E. McKenney /* Attempt to extract the state for the specified task. */
trc_wait_for_one_reader(struct task_struct * t,struct list_head * bhp)1606d5f177d3SPaul E. McKenney static void trc_wait_for_one_reader(struct task_struct *t,
1607d5f177d3SPaul E. McKenney 				    struct list_head *bhp)
1608d5f177d3SPaul E. McKenney {
1609d5f177d3SPaul E. McKenney 	int cpu;
1610d5f177d3SPaul E. McKenney 
1611d5f177d3SPaul E. McKenney 	// If a previous IPI is still in flight, let it complete.
1612d5f177d3SPaul E. McKenney 	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1613d5f177d3SPaul E. McKenney 		return;
1614d5f177d3SPaul E. McKenney 
1615d5f177d3SPaul E. McKenney 	// The current task had better be in a quiescent state.
1616d5f177d3SPaul E. McKenney 	if (t == current) {
16173847b645SPaul E. McKenney 		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1618bdb0cca0SPaul E. McKenney 		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1619d5f177d3SPaul E. McKenney 		return;
1620d5f177d3SPaul E. McKenney 	}
1621d5f177d3SPaul E. McKenney 
1622d5f177d3SPaul E. McKenney 	// Attempt to nail down the task for inspection.
1623d5f177d3SPaul E. McKenney 	get_task_struct(t);
16243847b645SPaul E. McKenney 	if (!task_call_func(t, trc_inspect_reader, bhp)) {
1625d5f177d3SPaul E. McKenney 		put_task_struct(t);
1626d5f177d3SPaul E. McKenney 		return;
1627d5f177d3SPaul E. McKenney 	}
1628d5f177d3SPaul E. McKenney 	put_task_struct(t);
1629d5f177d3SPaul E. McKenney 
163045f4b4a2SPaul E. McKenney 	// If this task is not yet on the holdout list, then we are in
163145f4b4a2SPaul E. McKenney 	// an RCU read-side critical section.  Otherwise, the invocation of
1632d0a85858SNeeraj Upadhyay 	// trc_add_holdout() that added it to the list did the necessary
163345f4b4a2SPaul E. McKenney 	// get_task_struct().  Either way, the task cannot be freed out
163445f4b4a2SPaul E. McKenney 	// from under this code.
163545f4b4a2SPaul E. McKenney 
1636d5f177d3SPaul E. McKenney 	// If currently running, send an IPI, either way, add to list.
1637d5f177d3SPaul E. McKenney 	trc_add_holdout(t, bhp);
1638574de876SPaul E. McKenney 	if (task_curr(t) &&
1639574de876SPaul E. McKenney 	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1640d5f177d3SPaul E. McKenney 		// The task is currently running, so try IPIing it.
1641d5f177d3SPaul E. McKenney 		cpu = task_cpu(t);
1642d5f177d3SPaul E. McKenney 
1643d5f177d3SPaul E. McKenney 		// If there is already an IPI outstanding, let it happen.
1644d5f177d3SPaul E. McKenney 		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1645d5f177d3SPaul E. McKenney 			return;
1646d5f177d3SPaul E. McKenney 
1647d5f177d3SPaul E. McKenney 		per_cpu(trc_ipi_to_cpu, cpu) = true;
1648d5f177d3SPaul E. McKenney 		t->trc_ipi_to_cpu = cpu;
1649238dbce3SPaul E. McKenney 		rcu_tasks_trace.n_ipis++;
165096017bf9SPaul E. McKenney 		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1651d5f177d3SPaul E. McKenney 			// Just in case there is some other reason for
1652d5f177d3SPaul E. McKenney 			// failure than the target CPU being offline.
165346aa886cSNeeraj Upadhyay 			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
165446aa886cSNeeraj Upadhyay 				  __func__, cpu);
16557e0669c3SPaul E. McKenney 			rcu_tasks_trace.n_ipis_fails++;
1656d5f177d3SPaul E. McKenney 			per_cpu(trc_ipi_to_cpu, cpu) = false;
165746aa886cSNeeraj Upadhyay 			t->trc_ipi_to_cpu = -1;
1658d5f177d3SPaul E. McKenney 		}
1659d5f177d3SPaul E. McKenney 	}
1660d5f177d3SPaul E. McKenney }
1661d5f177d3SPaul E. McKenney 
16627460ade1SPaul E. McKenney /*
16637460ade1SPaul E. McKenney  * Initialize for first-round processing for the specified task.
16647460ade1SPaul E. McKenney  * Return false if task is NULL or already taken care of, true otherwise.
16657460ade1SPaul E. McKenney  */
rcu_tasks_trace_pertask_prep(struct task_struct * t,bool notself)16667460ade1SPaul E. McKenney static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1667d5f177d3SPaul E. McKenney {
16681b04fa99SUladzislau Rezki (Sony) 	// During early boot when there is only the one boot CPU, there
16695d4c90d7SPaul E. McKenney 	// is no idle task for the other CPUs.	Also, the grace-period
167019415004SPaul E. McKenney 	// kthread is always in a quiescent state.  In addition, just return
167119415004SPaul E. McKenney 	// if this task is already on the list.
16727460ade1SPaul E. McKenney 	if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
16737460ade1SPaul E. McKenney 		return false;
16741b04fa99SUladzislau Rezki (Sony) 
16753847b645SPaul E. McKenney 	rcu_st_need_qs(t, 0);
1676d5f177d3SPaul E. McKenney 	t->trc_ipi_to_cpu = -1;
16777460ade1SPaul E. McKenney 	return true;
16787460ade1SPaul E. McKenney }
16797460ade1SPaul E. McKenney 
16807460ade1SPaul E. McKenney /* Do first-round processing for the specified task. */
rcu_tasks_trace_pertask(struct task_struct * t,struct list_head * hop)16817460ade1SPaul E. McKenney static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
16827460ade1SPaul E. McKenney {
16837460ade1SPaul E. McKenney 	if (rcu_tasks_trace_pertask_prep(t, true))
1684d5f177d3SPaul E. McKenney 		trc_wait_for_one_reader(t, hop);
1685d5f177d3SPaul E. McKenney }
1686d5f177d3SPaul E. McKenney 
16871fa98e2eSPaul E. McKenney /* Initialize for a new RCU-tasks-trace grace period. */
rcu_tasks_trace_pregp_step(struct list_head * hop)16887460ade1SPaul E. McKenney static void rcu_tasks_trace_pregp_step(struct list_head *hop)
16891fa98e2eSPaul E. McKenney {
1690dc7d54b4SPaul E. McKenney 	LIST_HEAD(blkd_tasks);
16911fa98e2eSPaul E. McKenney 	int cpu;
1692dc7d54b4SPaul E. McKenney 	unsigned long flags;
1693dc7d54b4SPaul E. McKenney 	struct rcu_tasks_percpu *rtpcp;
1694dc7d54b4SPaul E. McKenney 	struct task_struct *t;
16951fa98e2eSPaul E. McKenney 
16961fa98e2eSPaul E. McKenney 	// There shouldn't be any old IPIs, but...
16971fa98e2eSPaul E. McKenney 	for_each_possible_cpu(cpu)
16981fa98e2eSPaul E. McKenney 		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
16991fa98e2eSPaul E. McKenney 
1700eea3423bSPaul E. McKenney 	// Disable CPU hotplug across the CPU scan for the benefit of
1701eea3423bSPaul E. McKenney 	// any IPIs that might be needed.  This also waits for all readers
1702eea3423bSPaul E. McKenney 	// in CPU-hotplug code paths.
17031fa98e2eSPaul E. McKenney 	cpus_read_lock();
17047460ade1SPaul E. McKenney 
1705eea3423bSPaul E. McKenney 	// These rcu_tasks_trace_pertask_prep() calls are serialized to
17067460ade1SPaul E. McKenney 	// allow safe access to the hop list.
1707e386b672SPaul E. McKenney 	for_each_online_cpu(cpu) {
1708e386b672SPaul E. McKenney 		rcu_read_lock();
170943b75d54SFrederic Weisbecker 		// Note that cpu_curr_snapshot() picks up the target
171043b75d54SFrederic Weisbecker 		// CPU's current task while its runqueue is locked with
171143b75d54SFrederic Weisbecker 		// an smp_mb__after_spinlock().  This ensures that either
171243b75d54SFrederic Weisbecker 		// the grace-period kthread will see that task's read-side
171343b75d54SFrederic Weisbecker 		// critical section or the task will see the updater's pre-GP
171443b75d54SFrederic Weisbecker 		// accesses.  The trailing smp_mb() in cpu_curr_snapshot()
171543b75d54SFrederic Weisbecker 		// does not currently play a role other than simplify
171643b75d54SFrederic Weisbecker 		// that function's ordering semantics.  If these simplified
171743b75d54SFrederic Weisbecker 		// ordering semantics continue to be redundant, that smp_mb()
171843b75d54SFrederic Weisbecker 		// might be removed.
1719e386b672SPaul E. McKenney 		t = cpu_curr_snapshot(cpu);
1720e386b672SPaul E. McKenney 		if (rcu_tasks_trace_pertask_prep(t, true))
1721e386b672SPaul E. McKenney 			trc_add_holdout(t, hop);
1722e386b672SPaul E. McKenney 		rcu_read_unlock();
1723d6ad6063SPaul E. McKenney 		cond_resched_tasks_rcu_qs();
1724e386b672SPaul E. McKenney 	}
1725dc7d54b4SPaul E. McKenney 
1726dc7d54b4SPaul E. McKenney 	// Only after all running tasks have been accounted for is it
1727dc7d54b4SPaul E. McKenney 	// safe to take care of the tasks that have blocked within their
1728dc7d54b4SPaul E. McKenney 	// current RCU tasks trace read-side critical section.
1729dc7d54b4SPaul E. McKenney 	for_each_possible_cpu(cpu) {
1730dc7d54b4SPaul E. McKenney 		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1731dc7d54b4SPaul E. McKenney 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1732dc7d54b4SPaul E. McKenney 		list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1733dc7d54b4SPaul E. McKenney 		while (!list_empty(&blkd_tasks)) {
1734dc7d54b4SPaul E. McKenney 			rcu_read_lock();
1735dc7d54b4SPaul E. McKenney 			t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1736dc7d54b4SPaul E. McKenney 			list_del_init(&t->trc_blkd_node);
1737dc7d54b4SPaul E. McKenney 			list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1738dc7d54b4SPaul E. McKenney 			raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1739dc7d54b4SPaul E. McKenney 			rcu_tasks_trace_pertask(t, hop);
1740dc7d54b4SPaul E. McKenney 			rcu_read_unlock();
1741dc7d54b4SPaul E. McKenney 			raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1742dc7d54b4SPaul E. McKenney 		}
1743dc7d54b4SPaul E. McKenney 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1744d6ad6063SPaul E. McKenney 		cond_resched_tasks_rcu_qs();
1745dc7d54b4SPaul E. McKenney 	}
174656096ecdSPaul E. McKenney 
174756096ecdSPaul E. McKenney 	// Re-enable CPU hotplug now that the holdout list is populated.
174856096ecdSPaul E. McKenney 	cpus_read_unlock();
17491fa98e2eSPaul E. McKenney }
17501fa98e2eSPaul E. McKenney 
17519796e1aeSPaul E. McKenney /*
1752955a0192SPaul E. McKenney  * Do intermediate processing between task and holdout scans.
17539796e1aeSPaul E. McKenney  */
rcu_tasks_trace_postscan(struct list_head * hop)17549796e1aeSPaul E. McKenney static void rcu_tasks_trace_postscan(struct list_head *hop)
1755d5f177d3SPaul E. McKenney {
1756d5f177d3SPaul E. McKenney 	// Wait for late-stage exiting tasks to finish exiting.
1757d5f177d3SPaul E. McKenney 	// These might have passed the call to exit_tasks_rcu_finish().
1758e6c86c51SPaul E. McKenney 
1759e6c86c51SPaul E. McKenney 	// If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1760d5f177d3SPaul E. McKenney 	synchronize_rcu();
17613847b645SPaul E. McKenney 	// Any tasks that exit after this point will set
17623847b645SPaul E. McKenney 	// TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1763d5f177d3SPaul E. McKenney }
1764d5f177d3SPaul E. McKenney 
176565b629e7SNeeraj Upadhyay /* Communicate task state back to the RCU tasks trace stall warning request. */
176665b629e7SNeeraj Upadhyay struct trc_stall_chk_rdr {
176765b629e7SNeeraj Upadhyay 	int nesting;
176865b629e7SNeeraj Upadhyay 	int ipi_to_cpu;
176965b629e7SNeeraj Upadhyay 	u8 needqs;
177065b629e7SNeeraj Upadhyay };
177165b629e7SNeeraj Upadhyay 
trc_check_slow_task(struct task_struct * t,void * arg)177265b629e7SNeeraj Upadhyay static int trc_check_slow_task(struct task_struct *t, void *arg)
177365b629e7SNeeraj Upadhyay {
177465b629e7SNeeraj Upadhyay 	struct trc_stall_chk_rdr *trc_rdrp = arg;
177565b629e7SNeeraj Upadhyay 
1776f90f19daSPaul E. McKenney 	if (task_curr(t) && cpu_online(task_cpu(t)))
177765b629e7SNeeraj Upadhyay 		return false; // It is running, so decline to inspect it.
177865b629e7SNeeraj Upadhyay 	trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
177965b629e7SNeeraj Upadhyay 	trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
17803847b645SPaul E. McKenney 	trc_rdrp->needqs = rcu_ld_need_qs(t);
178165b629e7SNeeraj Upadhyay 	return true;
178265b629e7SNeeraj Upadhyay }
178365b629e7SNeeraj Upadhyay 
17844593e772SPaul E. McKenney /* Show the state of a task stalling the current RCU tasks trace GP. */
show_stalled_task_trace(struct task_struct * t,bool * firstreport)17854593e772SPaul E. McKenney static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
17864593e772SPaul E. McKenney {
17874593e772SPaul E. McKenney 	int cpu;
178865b629e7SNeeraj Upadhyay 	struct trc_stall_chk_rdr trc_rdr;
178965b629e7SNeeraj Upadhyay 	bool is_idle_tsk = is_idle_task(t);
17904593e772SPaul E. McKenney 
17914593e772SPaul E. McKenney 	if (*firstreport) {
17924593e772SPaul E. McKenney 		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
17934593e772SPaul E. McKenney 		*firstreport = false;
17944593e772SPaul E. McKenney 	}
17954593e772SPaul E. McKenney 	cpu = task_cpu(t);
179665b629e7SNeeraj Upadhyay 	if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
17979f3eb5fbSPaul E. McKenney 		pr_alert("P%d: %c%c\n",
179865b629e7SNeeraj Upadhyay 			 t->pid,
17999f3eb5fbSPaul E. McKenney 			 ".I"[t->trc_ipi_to_cpu >= 0],
180065b629e7SNeeraj Upadhyay 			 ".i"[is_idle_tsk]);
180165b629e7SNeeraj Upadhyay 	else
1802387c0ad7SPaul E. McKenney 		pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
18034593e772SPaul E. McKenney 			 t->pid,
180465b629e7SNeeraj Upadhyay 			 ".I"[trc_rdr.ipi_to_cpu >= 0],
180565b629e7SNeeraj Upadhyay 			 ".i"[is_idle_tsk],
1806d39ec8f3SNeeraj Upadhyay 			 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1807387c0ad7SPaul E. McKenney 			 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
180865b629e7SNeeraj Upadhyay 			 trc_rdr.nesting,
1809be15a164SPaul E. McKenney 			 " !CN"[trc_rdr.needqs & 0x3],
1810be15a164SPaul E. McKenney 			 " ?"[trc_rdr.needqs > 0x3],
1811c8c03ad9SPaul E. McKenney 			 cpu, cpu_online(cpu) ? "" : "(offline)");
18124593e772SPaul E. McKenney 	sched_show_task(t);
18134593e772SPaul E. McKenney }
18144593e772SPaul E. McKenney 
18154593e772SPaul E. McKenney /* List stalled IPIs for RCU tasks trace. */
show_stalled_ipi_trace(void)18164593e772SPaul E. McKenney static void show_stalled_ipi_trace(void)
18174593e772SPaul E. McKenney {
18184593e772SPaul E. McKenney 	int cpu;
18194593e772SPaul E. McKenney 
18204593e772SPaul E. McKenney 	for_each_possible_cpu(cpu)
18214593e772SPaul E. McKenney 		if (per_cpu(trc_ipi_to_cpu, cpu))
18224593e772SPaul E. McKenney 			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
18234593e772SPaul E. McKenney }
18244593e772SPaul E. McKenney 
1825d5f177d3SPaul E. McKenney /* Do one scan of the holdout list. */
check_all_holdout_tasks_trace(struct list_head * hop,bool needreport,bool * firstreport)1826d5f177d3SPaul E. McKenney static void check_all_holdout_tasks_trace(struct list_head *hop,
18274593e772SPaul E. McKenney 					  bool needreport, bool *firstreport)
1828d5f177d3SPaul E. McKenney {
1829d5f177d3SPaul E. McKenney 	struct task_struct *g, *t;
1830d5f177d3SPaul E. McKenney 
1831eea3423bSPaul E. McKenney 	// Disable CPU hotplug across the holdout list scan for IPIs.
183281b4a7bcSPaul E. McKenney 	cpus_read_lock();
183381b4a7bcSPaul E. McKenney 
1834d5f177d3SPaul E. McKenney 	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1835d5f177d3SPaul E. McKenney 		// If safe and needed, try to check the current task.
1836d5f177d3SPaul E. McKenney 		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
18373847b645SPaul E. McKenney 		    !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1838d5f177d3SPaul E. McKenney 			trc_wait_for_one_reader(t, hop);
1839d5f177d3SPaul E. McKenney 
1840d5f177d3SPaul E. McKenney 		// If check succeeded, remove this task from the list.
1841f5dbc594SPaul E. McKenney 		if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
18423847b645SPaul E. McKenney 		    rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1843d5f177d3SPaul E. McKenney 			trc_del_holdout(t);
18444593e772SPaul E. McKenney 		else if (needreport)
18454593e772SPaul E. McKenney 			show_stalled_task_trace(t, firstreport);
1846d6ad6063SPaul E. McKenney 		cond_resched_tasks_rcu_qs();
18474593e772SPaul E. McKenney 	}
184881b4a7bcSPaul E. McKenney 
184981b4a7bcSPaul E. McKenney 	// Re-enable CPU hotplug now that the holdout list scan has completed.
185081b4a7bcSPaul E. McKenney 	cpus_read_unlock();
185181b4a7bcSPaul E. McKenney 
18524593e772SPaul E. McKenney 	if (needreport) {
185389401176SNeeraj Upadhyay 		if (*firstreport)
18544593e772SPaul E. McKenney 			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
18554593e772SPaul E. McKenney 		show_stalled_ipi_trace();
1856d5f177d3SPaul E. McKenney 	}
1857d5f177d3SPaul E. McKenney }
1858d5f177d3SPaul E. McKenney 
rcu_tasks_trace_empty_fn(void * unused)1859cbe0d8d9SPaul E. McKenney static void rcu_tasks_trace_empty_fn(void *unused)
1860cbe0d8d9SPaul E. McKenney {
1861cbe0d8d9SPaul E. McKenney }
1862cbe0d8d9SPaul E. McKenney 
1863d5f177d3SPaul E. McKenney /* Wait for grace period to complete and provide ordering. */
rcu_tasks_trace_postgp(struct rcu_tasks * rtp)1864af051ca4SPaul E. McKenney static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1865d5f177d3SPaul E. McKenney {
1866cbe0d8d9SPaul E. McKenney 	int cpu;
18674593e772SPaul E. McKenney 
1868cbe0d8d9SPaul E. McKenney 	// Wait for any lingering IPI handlers to complete.  Note that
1869cbe0d8d9SPaul E. McKenney 	// if a CPU has gone offline or transitioned to userspace in the
1870cbe0d8d9SPaul E. McKenney 	// meantime, all IPI handlers should have been drained beforehand.
1871cbe0d8d9SPaul E. McKenney 	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1872cbe0d8d9SPaul E. McKenney 	// changes, there will need to be a recheck and/or timed wait.
1873cbe0d8d9SPaul E. McKenney 	for_each_online_cpu(cpu)
1874f5dbc594SPaul E. McKenney 		if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1875cbe0d8d9SPaul E. McKenney 			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1876cbe0d8d9SPaul E. McKenney 
1877d5f177d3SPaul E. McKenney 	smp_mb(); // Caller's code must be ordered after wakeup.
187843766c3eSPaul E. McKenney 		  // Pairs with pretty much every ordering primitive.
1879d5f177d3SPaul E. McKenney }
1880d5f177d3SPaul E. McKenney 
1881d5f177d3SPaul E. McKenney /* Report any needed quiescent state for this exiting task. */
exit_tasks_rcu_finish_trace(struct task_struct * t)188225246fc8SPaul E. McKenney static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1883d5f177d3SPaul E. McKenney {
18840356d4e6SPaul E. McKenney 	union rcu_special trs = READ_ONCE(t->trc_reader_special);
18850356d4e6SPaul E. McKenney 
18863847b645SPaul E. McKenney 	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1887bdb0cca0SPaul E. McKenney 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
18880bcb3868SPaul E. McKenney 	if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1889a5c071ccSPaul E. McKenney 		rcu_read_unlock_trace_special(t);
18903847b645SPaul E. McKenney 	else
18913847b645SPaul E. McKenney 		WRITE_ONCE(t->trc_reader_nesting, 0);
1892d5f177d3SPaul E. McKenney }
1893d5f177d3SPaul E. McKenney 
1894d5f177d3SPaul E. McKenney /**
1895d5f177d3SPaul E. McKenney  * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1896d5f177d3SPaul E. McKenney  * @rhp: structure to be used for queueing the RCU updates.
1897d5f177d3SPaul E. McKenney  * @func: actual callback function to be invoked after the grace period
1898d5f177d3SPaul E. McKenney  *
1899ed42c380SNeeraj Upadhyay  * The callback function will be invoked some time after a trace rcu-tasks
1900ed42c380SNeeraj Upadhyay  * grace period elapses, in other words after all currently executing
1901ed42c380SNeeraj Upadhyay  * trace rcu-tasks read-side critical sections have completed. These
1902ed42c380SNeeraj Upadhyay  * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1903ed42c380SNeeraj Upadhyay  * and rcu_read_unlock_trace().
1904d5f177d3SPaul E. McKenney  *
1905d5f177d3SPaul E. McKenney  * See the description of call_rcu() for more detailed information on
1906d5f177d3SPaul E. McKenney  * memory ordering guarantees.
1907d5f177d3SPaul E. McKenney  */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)1908d5f177d3SPaul E. McKenney void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1909d5f177d3SPaul E. McKenney {
1910d5f177d3SPaul E. McKenney 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1911d5f177d3SPaul E. McKenney }
1912d5f177d3SPaul E. McKenney EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1913d5f177d3SPaul E. McKenney 
1914d5f177d3SPaul E. McKenney /**
1915d5f177d3SPaul E. McKenney  * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1916d5f177d3SPaul E. McKenney  *
1917d5f177d3SPaul E. McKenney  * Control will return to the caller some time after a trace rcu-tasks
1918c7dcf810SPaul E. McKenney  * grace period has elapsed, in other words after all currently executing
1919ed42c380SNeeraj Upadhyay  * trace rcu-tasks read-side critical sections have elapsed. These read-side
1920c7dcf810SPaul E. McKenney  * critical sections are delimited by calls to rcu_read_lock_trace()
1921c7dcf810SPaul E. McKenney  * and rcu_read_unlock_trace().
1922d5f177d3SPaul E. McKenney  *
1923d5f177d3SPaul E. McKenney  * This is a very specialized primitive, intended only for a few uses in
1924d5f177d3SPaul E. McKenney  * tracing and other situations requiring manipulation of function preambles
1925d5f177d3SPaul E. McKenney  * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1926d5f177d3SPaul E. McKenney  * (yet) intended for heavy use from multiple CPUs.
1927d5f177d3SPaul E. McKenney  *
1928d5f177d3SPaul E. McKenney  * See the description of synchronize_rcu() for more detailed information
1929d5f177d3SPaul E. McKenney  * on memory ordering guarantees.
1930d5f177d3SPaul E. McKenney  */
synchronize_rcu_tasks_trace(void)1931d5f177d3SPaul E. McKenney void synchronize_rcu_tasks_trace(void)
1932d5f177d3SPaul E. McKenney {
1933d5f177d3SPaul E. McKenney 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1934d5f177d3SPaul E. McKenney 	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1935d5f177d3SPaul E. McKenney }
1936d5f177d3SPaul E. McKenney EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1937d5f177d3SPaul E. McKenney 
1938d5f177d3SPaul E. McKenney /**
1939d5f177d3SPaul E. McKenney  * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1940d5f177d3SPaul E. McKenney  *
1941d5f177d3SPaul E. McKenney  * Although the current implementation is guaranteed to wait, it is not
1942d5f177d3SPaul E. McKenney  * obligated to, for example, if there are no pending callbacks.
1943d5f177d3SPaul E. McKenney  */
rcu_barrier_tasks_trace(void)1944d5f177d3SPaul E. McKenney void rcu_barrier_tasks_trace(void)
1945d5f177d3SPaul E. McKenney {
1946ce9b1c66SPaul E. McKenney 	rcu_barrier_tasks_generic(&rcu_tasks_trace);
1947d5f177d3SPaul E. McKenney }
1948d5f177d3SPaul E. McKenney EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1949d5f177d3SPaul E. McKenney 
1950450d461aSPaul E. McKenney int rcu_tasks_trace_lazy_ms = -1;
1951450d461aSPaul E. McKenney module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1952450d461aSPaul E. McKenney 
rcu_spawn_tasks_trace_kthread(void)1953d5f177d3SPaul E. McKenney static int __init rcu_spawn_tasks_trace_kthread(void)
1954d5f177d3SPaul E. McKenney {
1955cafafd67SPaul E. McKenney 	cblist_init_generic(&rcu_tasks_trace);
19562393a613SPaul E. McKenney 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
19574fe192dfSPaul E. McKenney 		rcu_tasks_trace.gp_sleep = HZ / 10;
195875dc2da5SPaul E. McKenney 		rcu_tasks_trace.init_fract = HZ / 10;
19592393a613SPaul E. McKenney 	} else {
19604fe192dfSPaul E. McKenney 		rcu_tasks_trace.gp_sleep = HZ / 200;
19614fe192dfSPaul E. McKenney 		if (rcu_tasks_trace.gp_sleep <= 0)
19624fe192dfSPaul E. McKenney 			rcu_tasks_trace.gp_sleep = 1;
196375dc2da5SPaul E. McKenney 		rcu_tasks_trace.init_fract = HZ / 200;
19642393a613SPaul E. McKenney 		if (rcu_tasks_trace.init_fract <= 0)
19652393a613SPaul E. McKenney 			rcu_tasks_trace.init_fract = 1;
19662393a613SPaul E. McKenney 	}
1967450d461aSPaul E. McKenney 	if (rcu_tasks_trace_lazy_ms >= 0)
1968450d461aSPaul E. McKenney 		rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1969d5f177d3SPaul E. McKenney 	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1970d5f177d3SPaul E. McKenney 	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1971d5f177d3SPaul E. McKenney 	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1972d5f177d3SPaul E. McKenney 	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1973d5f177d3SPaul E. McKenney 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1974d5f177d3SPaul E. McKenney 	return 0;
1975d5f177d3SPaul E. McKenney }
1976d5f177d3SPaul E. McKenney 
197727c0f144SPaul E. McKenney #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_trace_gp_kthread(void)197827c0f144SPaul E. McKenney void show_rcu_tasks_trace_gp_kthread(void)
1979e21408ceSPaul E. McKenney {
198040471509SPaul E. McKenney 	char buf[64];
1981e21408ceSPaul E. McKenney 
198232d988f4SNikita Kiryushin 	snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
1983ffcc21a3SPaul E. McKenney 		data_race(n_trc_holdouts),
1984edf3775fSPaul E. McKenney 		data_race(n_heavy_reader_ofl_updates),
198540471509SPaul E. McKenney 		data_race(n_heavy_reader_updates),
198640471509SPaul E. McKenney 		data_race(n_heavy_reader_attempts));
1987e21408ceSPaul E. McKenney 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1988e21408ceSPaul E. McKenney }
198927c0f144SPaul E. McKenney EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
199027c0f144SPaul E. McKenney #endif // !defined(CONFIG_TINY_RCU)
1991e21408ceSPaul E. McKenney 
get_rcu_tasks_trace_gp_kthread(void)19925f8e3202SPaul E. McKenney struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
19935f8e3202SPaul E. McKenney {
19945f8e3202SPaul E. McKenney 	return rcu_tasks_trace.kthread_ptr;
19955f8e3202SPaul E. McKenney }
19965f8e3202SPaul E. McKenney EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
19975f8e3202SPaul E. McKenney 
1998d5f177d3SPaul E. McKenney #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
exit_tasks_rcu_finish_trace(struct task_struct * t)199925246fc8SPaul E. McKenney static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
2000d5f177d3SPaul E. McKenney #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
20018fd8ca38SPaul E. McKenney 
20028344496eSPaul E. McKenney #ifndef CONFIG_TINY_RCU
show_rcu_tasks_gp_kthreads(void)2003e21408ceSPaul E. McKenney void show_rcu_tasks_gp_kthreads(void)
2004e21408ceSPaul E. McKenney {
2005e21408ceSPaul E. McKenney 	show_rcu_tasks_classic_gp_kthread();
2006e21408ceSPaul E. McKenney 	show_rcu_tasks_rude_gp_kthread();
2007e21408ceSPaul E. McKenney 	show_rcu_tasks_trace_gp_kthread();
2008e21408ceSPaul E. McKenney }
20098344496eSPaul E. McKenney #endif /* #ifndef CONFIG_TINY_RCU */
2010e21408ceSPaul E. McKenney 
2011bfba7ed0SUladzislau Rezki (Sony) #ifdef CONFIG_PROVE_RCU
2012bfba7ed0SUladzislau Rezki (Sony) struct rcu_tasks_test_desc {
2013bfba7ed0SUladzislau Rezki (Sony) 	struct rcu_head rh;
2014bfba7ed0SUladzislau Rezki (Sony) 	const char *name;
2015bfba7ed0SUladzislau Rezki (Sony) 	bool notrun;
20161cf1144eSPaul E. McKenney 	unsigned long runstart;
2017bfba7ed0SUladzislau Rezki (Sony) };
2018bfba7ed0SUladzislau Rezki (Sony) 
2019bfba7ed0SUladzislau Rezki (Sony) static struct rcu_tasks_test_desc tests[] = {
2020bfba7ed0SUladzislau Rezki (Sony) 	{
2021bfba7ed0SUladzislau Rezki (Sony) 		.name = "call_rcu_tasks()",
2022bfba7ed0SUladzislau Rezki (Sony) 		/* If not defined, the test is skipped. */
20231cf1144eSPaul E. McKenney 		.notrun = IS_ENABLED(CONFIG_TASKS_RCU),
2024bfba7ed0SUladzislau Rezki (Sony) 	},
2025bfba7ed0SUladzislau Rezki (Sony) 	{
2026bfba7ed0SUladzislau Rezki (Sony) 		.name = "call_rcu_tasks_rude()",
2027bfba7ed0SUladzislau Rezki (Sony) 		/* If not defined, the test is skipped. */
20281cf1144eSPaul E. McKenney 		.notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
2029bfba7ed0SUladzislau Rezki (Sony) 	},
2030bfba7ed0SUladzislau Rezki (Sony) 	{
2031bfba7ed0SUladzislau Rezki (Sony) 		.name = "call_rcu_tasks_trace()",
2032bfba7ed0SUladzislau Rezki (Sony) 		/* If not defined, the test is skipped. */
20331cf1144eSPaul E. McKenney 		.notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
2034bfba7ed0SUladzislau Rezki (Sony) 	}
2035bfba7ed0SUladzislau Rezki (Sony) };
2036bfba7ed0SUladzislau Rezki (Sony) 
test_rcu_tasks_callback(struct rcu_head * rhp)2037bfba7ed0SUladzislau Rezki (Sony) static void test_rcu_tasks_callback(struct rcu_head *rhp)
2038bfba7ed0SUladzislau Rezki (Sony) {
2039bfba7ed0SUladzislau Rezki (Sony) 	struct rcu_tasks_test_desc *rttd =
2040bfba7ed0SUladzislau Rezki (Sony) 		container_of(rhp, struct rcu_tasks_test_desc, rh);
2041bfba7ed0SUladzislau Rezki (Sony) 
2042bfba7ed0SUladzislau Rezki (Sony) 	pr_info("Callback from %s invoked.\n", rttd->name);
2043bfba7ed0SUladzislau Rezki (Sony) 
20441cf1144eSPaul E. McKenney 	rttd->notrun = false;
2045bfba7ed0SUladzislau Rezki (Sony) }
2046bfba7ed0SUladzislau Rezki (Sony) 
rcu_tasks_initiate_self_tests(void)2047bfba7ed0SUladzislau Rezki (Sony) static void rcu_tasks_initiate_self_tests(void)
2048bfba7ed0SUladzislau Rezki (Sony) {
2049bfba7ed0SUladzislau Rezki (Sony) 	pr_info("Running RCU-tasks wait API self tests\n");
2050bfba7ed0SUladzislau Rezki (Sony) #ifdef CONFIG_TASKS_RCU
20519420fb93SZqiang 	tests[0].runstart = jiffies;
2052bfba7ed0SUladzislau Rezki (Sony) 	synchronize_rcu_tasks();
2053bfba7ed0SUladzislau Rezki (Sony) 	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2054bfba7ed0SUladzislau Rezki (Sony) #endif
2055bfba7ed0SUladzislau Rezki (Sony) 
2056bfba7ed0SUladzislau Rezki (Sony) #ifdef CONFIG_TASKS_RUDE_RCU
20579420fb93SZqiang 	tests[1].runstart = jiffies;
2058bfba7ed0SUladzislau Rezki (Sony) 	synchronize_rcu_tasks_rude();
2059bfba7ed0SUladzislau Rezki (Sony) 	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2060bfba7ed0SUladzislau Rezki (Sony) #endif
2061bfba7ed0SUladzislau Rezki (Sony) 
2062bfba7ed0SUladzislau Rezki (Sony) #ifdef CONFIG_TASKS_TRACE_RCU
20639420fb93SZqiang 	tests[2].runstart = jiffies;
2064bfba7ed0SUladzislau Rezki (Sony) 	synchronize_rcu_tasks_trace();
2065bfba7ed0SUladzislau Rezki (Sony) 	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2066bfba7ed0SUladzislau Rezki (Sony) #endif
2067bfba7ed0SUladzislau Rezki (Sony) }
2068bfba7ed0SUladzislau Rezki (Sony) 
2069e72ee5e1SWaiman Long /*
2070e72ee5e1SWaiman Long  * Return:  0 - test passed
2071e72ee5e1SWaiman Long  *	    1 - test failed, but have not timed out yet
2072e72ee5e1SWaiman Long  *	   -1 - test failed and timed out
2073e72ee5e1SWaiman Long  */
rcu_tasks_verify_self_tests(void)2074bfba7ed0SUladzislau Rezki (Sony) static int rcu_tasks_verify_self_tests(void)
2075bfba7ed0SUladzislau Rezki (Sony) {
2076bfba7ed0SUladzislau Rezki (Sony) 	int ret = 0;
2077bfba7ed0SUladzislau Rezki (Sony) 	int i;
20781cf1144eSPaul E. McKenney 	unsigned long bst = rcu_task_stall_timeout;
2079bfba7ed0SUladzislau Rezki (Sony) 
20801cf1144eSPaul E. McKenney 	if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
20811cf1144eSPaul E. McKenney 		bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2082bfba7ed0SUladzislau Rezki (Sony) 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
20831cf1144eSPaul E. McKenney 		while (tests[i].notrun) {		// still hanging.
20841cf1144eSPaul E. McKenney 			if (time_after(jiffies, tests[i].runstart + bst)) {
20851cf1144eSPaul E. McKenney 				pr_err("%s has failed boot-time tests.\n", tests[i].name);
2086bfba7ed0SUladzislau Rezki (Sony) 				ret = -1;
20871cf1144eSPaul E. McKenney 				break;
20881cf1144eSPaul E. McKenney 			}
2089e72ee5e1SWaiman Long 			ret = 1;
2090e72ee5e1SWaiman Long 			break;
2091bfba7ed0SUladzislau Rezki (Sony) 		}
2092bfba7ed0SUladzislau Rezki (Sony) 	}
2093e72ee5e1SWaiman Long 	WARN_ON(ret < 0);
2094bfba7ed0SUladzislau Rezki (Sony) 
2095bfba7ed0SUladzislau Rezki (Sony) 	return ret;
2096bfba7ed0SUladzislau Rezki (Sony) }
2097e72ee5e1SWaiman Long 
2098e72ee5e1SWaiman Long /*
2099e72ee5e1SWaiman Long  * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2100e72ee5e1SWaiman Long  * test passes or has timed out.
2101e72ee5e1SWaiman Long  */
2102e72ee5e1SWaiman Long static struct delayed_work rcu_tasks_verify_work;
rcu_tasks_verify_work_fn(struct work_struct * work __maybe_unused)2103e72ee5e1SWaiman Long static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2104e72ee5e1SWaiman Long {
2105e72ee5e1SWaiman Long 	int ret = rcu_tasks_verify_self_tests();
2106e72ee5e1SWaiman Long 
2107e72ee5e1SWaiman Long 	if (ret <= 0)
2108e72ee5e1SWaiman Long 		return;
2109e72ee5e1SWaiman Long 
2110e72ee5e1SWaiman Long 	/* Test fails but not timed out yet, reschedule another check */
2111e72ee5e1SWaiman Long 	schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2112e72ee5e1SWaiman Long }
2113e72ee5e1SWaiman Long 
rcu_tasks_verify_schedule_work(void)2114e72ee5e1SWaiman Long static int rcu_tasks_verify_schedule_work(void)
2115e72ee5e1SWaiman Long {
2116e72ee5e1SWaiman Long 	INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2117e72ee5e1SWaiman Long 	rcu_tasks_verify_work_fn(NULL);
2118e72ee5e1SWaiman Long 	return 0;
2119e72ee5e1SWaiman Long }
2120e72ee5e1SWaiman Long late_initcall(rcu_tasks_verify_schedule_work);
2121bfba7ed0SUladzislau Rezki (Sony) #else /* #ifdef CONFIG_PROVE_RCU */
rcu_tasks_initiate_self_tests(void)2122bfba7ed0SUladzislau Rezki (Sony) static void rcu_tasks_initiate_self_tests(void) { }
2123bfba7ed0SUladzislau Rezki (Sony) #endif /* #else #ifdef CONFIG_PROVE_RCU */
2124bfba7ed0SUladzislau Rezki (Sony) 
rcu_init_tasks_generic(void)21251b04fa99SUladzislau Rezki (Sony) void __init rcu_init_tasks_generic(void)
21261b04fa99SUladzislau Rezki (Sony) {
21271b04fa99SUladzislau Rezki (Sony) #ifdef CONFIG_TASKS_RCU
21281b04fa99SUladzislau Rezki (Sony) 	rcu_spawn_tasks_kthread();
21291b04fa99SUladzislau Rezki (Sony) #endif
21301b04fa99SUladzislau Rezki (Sony) 
21311b04fa99SUladzislau Rezki (Sony) #ifdef CONFIG_TASKS_RUDE_RCU
21321b04fa99SUladzislau Rezki (Sony) 	rcu_spawn_tasks_rude_kthread();
21331b04fa99SUladzislau Rezki (Sony) #endif
21341b04fa99SUladzislau Rezki (Sony) 
21351b04fa99SUladzislau Rezki (Sony) #ifdef CONFIG_TASKS_TRACE_RCU
21361b04fa99SUladzislau Rezki (Sony) 	rcu_spawn_tasks_trace_kthread();
21371b04fa99SUladzislau Rezki (Sony) #endif
2138bfba7ed0SUladzislau Rezki (Sony) 
2139bfba7ed0SUladzislau Rezki (Sony) 	// Run the self-tests.
2140bfba7ed0SUladzislau Rezki (Sony) 	rcu_tasks_initiate_self_tests();
21411b04fa99SUladzislau Rezki (Sony) }
21421b04fa99SUladzislau Rezki (Sony) 
21438fd8ca38SPaul E. McKenney #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
rcu_tasks_bootup_oddness(void)21448fd8ca38SPaul E. McKenney static inline void rcu_tasks_bootup_oddness(void) {}
21458fd8ca38SPaul E. McKenney #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
2146