xref: /openbmc/linux/kernel/rcu/tree.h (revision 1a931707ad4a46e79d4ecfee56d8f6e8cc8d4f28)
122e40925SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
24102adabSPaul E. McKenney /*
34102adabSPaul E. McKenney  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
44102adabSPaul E. McKenney  * Internal non-public definitions.
54102adabSPaul E. McKenney  *
64102adabSPaul E. McKenney  * Copyright IBM Corporation, 2008
74102adabSPaul E. McKenney  *
84102adabSPaul E. McKenney  * Author: Ingo Molnar <mingo@elte.hu>
922e40925SPaul E. McKenney  *	   Paul E. McKenney <paulmck@linux.ibm.com>
104102adabSPaul E. McKenney  */
114102adabSPaul E. McKenney 
124102adabSPaul E. McKenney #include <linux/cache.h>
139621fbeeSKalesh Singh #include <linux/kthread.h>
144102adabSPaul E. McKenney #include <linux/spinlock.h>
15037741a6SIngo Molnar #include <linux/rtmutex.h>
164102adabSPaul E. McKenney #include <linux/threads.h>
174102adabSPaul E. McKenney #include <linux/cpumask.h>
184102adabSPaul E. McKenney #include <linux/seqlock.h>
19abedf8e2SPaul Gortmaker #include <linux/swait.h>
20f2425b4eSPaul E. McKenney #include <linux/rcu_node_tree.h>
214102adabSPaul E. McKenney 
2245753c5fSIngo Molnar #include "rcu_segcblist.h"
2345753c5fSIngo Molnar 
2425f3d7efSPaul E. McKenney /* Communicate arguments to a workqueue handler. */
2525f3d7efSPaul E. McKenney struct rcu_exp_work {
2625f3d7efSPaul E. McKenney 	unsigned long rew_s;
279621fbeeSKalesh Singh #ifdef CONFIG_RCU_EXP_KTHREAD
289621fbeeSKalesh Singh 	struct kthread_work rew_work;
299621fbeeSKalesh Singh #else
3025f3d7efSPaul E. McKenney 	struct work_struct rew_work;
319621fbeeSKalesh Singh #endif /* CONFIG_RCU_EXP_KTHREAD */
3225f3d7efSPaul E. McKenney };
3325f3d7efSPaul E. McKenney 
344102adabSPaul E. McKenney /* RCU's kthread states for tracing. */
354102adabSPaul E. McKenney #define RCU_KTHREAD_STOPPED  0
364102adabSPaul E. McKenney #define RCU_KTHREAD_RUNNING  1
374102adabSPaul E. McKenney #define RCU_KTHREAD_WAITING  2
384102adabSPaul E. McKenney #define RCU_KTHREAD_OFFCPU   3
394102adabSPaul E. McKenney #define RCU_KTHREAD_YIELDING 4
404102adabSPaul E. McKenney #define RCU_KTHREAD_MAX      4
414102adabSPaul E. McKenney 
424102adabSPaul E. McKenney /*
434102adabSPaul E. McKenney  * Definition for node within the RCU grace-period-detection hierarchy.
444102adabSPaul E. McKenney  */
454102adabSPaul E. McKenney struct rcu_node {
4667c583a7SBoqun Feng 	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
4767c583a7SBoqun Feng 					/*  some rcu_state fields as well as */
4867c583a7SBoqun Feng 					/*  following. */
49360fbbb4SLihao Liang 	unsigned long gp_seq;	/* Track rsp->gp_seq. */
50adbccddbSJoel Fernandes (Google) 	unsigned long gp_seq_needed; /* Track furthest future GP request. */
514bc8d555SPaul E. McKenney 	unsigned long completedqs; /* All QSes done for this node. */
524102adabSPaul E. McKenney 	unsigned long qsmask;	/* CPUs or groups that need to switch in */
534102adabSPaul E. McKenney 				/*  order for current grace period to proceed.*/
544102adabSPaul E. McKenney 				/*  In leaf rcu_node, each bit corresponds to */
554102adabSPaul E. McKenney 				/*  an rcu_data structure, otherwise, each */
564102adabSPaul E. McKenney 				/*  bit corresponds to a child rcu_node */
574102adabSPaul E. McKenney 				/*  structure. */
58f2e2df59SPaul E. McKenney 	unsigned long rcu_gp_init_mask;	/* Mask of offline CPUs at GP init. */
594102adabSPaul E. McKenney 	unsigned long qsmaskinit;
60b9585e94SPaul E. McKenney 				/* Per-GP initial value for qsmask. */
610aa04b05SPaul E. McKenney 				/*  Initialized from ->qsmaskinitnext at the */
620aa04b05SPaul E. McKenney 				/*  beginning of each grace period. */
630aa04b05SPaul E. McKenney 	unsigned long qsmaskinitnext;
64b9585e94SPaul E. McKenney 	unsigned long expmask;	/* CPUs or groups that need to check in */
65b9585e94SPaul E. McKenney 				/*  to allow the current expedited GP */
66b9585e94SPaul E. McKenney 				/*  to complete. */
67b9585e94SPaul E. McKenney 	unsigned long expmaskinit;
68b9585e94SPaul E. McKenney 				/* Per-GP initial values for expmask. */
69b9585e94SPaul E. McKenney 				/*  Initialized from ->expmaskinitnext at the */
70b9585e94SPaul E. McKenney 				/*  beginning of each expedited GP. */
71b9585e94SPaul E. McKenney 	unsigned long expmaskinitnext;
72b9585e94SPaul E. McKenney 				/* Online CPUs for next expedited GP. */
731de6e56dSPaul E. McKenney 				/*  Any CPU that has ever been online will */
741de6e56dSPaul E. McKenney 				/*  have its bit set. */
75b2b00ddfSPaul E. McKenney 	unsigned long cbovldmask;
76b2b00ddfSPaul E. McKenney 				/* CPUs experiencing callback overload. */
779b9500daSPaul E. McKenney 	unsigned long ffmask;	/* Fully functional CPUs. */
784102adabSPaul E. McKenney 	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
794102adabSPaul E. McKenney 				/*  Only one bit will be set in this mask. */
80a2dae430SWei Yang 	int	grplo;		/* lowest-numbered CPU here. */
81a2dae430SWei Yang 	int	grphi;		/* highest-numbered CPU here. */
827a0c2b09SWei Yang 	u8	grpnum;		/* group number for next level up. */
834102adabSPaul E. McKenney 	u8	level;		/* root is at level 0. */
840aa04b05SPaul E. McKenney 	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
850aa04b05SPaul E. McKenney 				/*  exit RCU read-side critical sections */
860aa04b05SPaul E. McKenney 				/*  before propagating offline up the */
870aa04b05SPaul E. McKenney 				/*  rcu_node tree? */
884102adabSPaul E. McKenney 	struct rcu_node *parent;
894102adabSPaul E. McKenney 	struct list_head blkd_tasks;
904102adabSPaul E. McKenney 				/* Tasks blocked in RCU read-side critical */
914102adabSPaul E. McKenney 				/*  section.  Tasks are placed at the head */
924102adabSPaul E. McKenney 				/*  of this list and age towards the tail. */
934102adabSPaul E. McKenney 	struct list_head *gp_tasks;
944102adabSPaul E. McKenney 				/* Pointer to the first task blocking the */
954102adabSPaul E. McKenney 				/*  current grace period, or NULL if there */
964102adabSPaul E. McKenney 				/*  is no such task. */
974102adabSPaul E. McKenney 	struct list_head *exp_tasks;
984102adabSPaul E. McKenney 				/* Pointer to the first task blocking the */
994102adabSPaul E. McKenney 				/*  current expedited grace period, or NULL */
1004102adabSPaul E. McKenney 				/*  if there is no such task.  If there */
1014102adabSPaul E. McKenney 				/*  is no current expedited grace period, */
1024102adabSPaul E. McKenney 				/*  then there can cannot be any such task. */
1034102adabSPaul E. McKenney 	struct list_head *boost_tasks;
1044102adabSPaul E. McKenney 				/* Pointer to first task that needs to be */
1054102adabSPaul E. McKenney 				/*  priority boosted, or NULL if no priority */
1064102adabSPaul E. McKenney 				/*  boosting is needed for this rcu_node */
1074102adabSPaul E. McKenney 				/*  structure.  If there are no tasks */
1084102adabSPaul E. McKenney 				/*  queued on this rcu_node structure that */
1094102adabSPaul E. McKenney 				/*  are blocking the current grace period, */
1104102adabSPaul E. McKenney 				/*  there can be no such task. */
111abaa93d9SPaul E. McKenney 	struct rt_mutex boost_mtx;
112abaa93d9SPaul E. McKenney 				/* Used only for the priority-boosting */
113abaa93d9SPaul E. McKenney 				/*  side effect, not as a lock. */
1144102adabSPaul E. McKenney 	unsigned long boost_time;
1154102adabSPaul E. McKenney 				/* When to start boosting (jiffies). */
116218b957aSDavid Woodhouse 	struct mutex boost_kthread_mutex;
117218b957aSDavid Woodhouse 				/* Exclusion for thread spawning and affinity */
118218b957aSDavid Woodhouse 				/*  manipulation. */
1194102adabSPaul E. McKenney 	struct task_struct *boost_kthread_task;
1204102adabSPaul E. McKenney 				/* kthread that takes care of priority */
1214102adabSPaul E. McKenney 				/*  boosting for this rcu_node structure. */
1224102adabSPaul E. McKenney 	unsigned int boost_kthread_status;
1234102adabSPaul E. McKenney 				/* State of boost_kthread_task for tracing. */
124396eba65SPaul E. McKenney 	unsigned long n_boosts;	/* Number of boosts for this rcu_node structure. */
1254102adabSPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU
126abedf8e2SPaul Gortmaker 	struct swait_queue_head nocb_gp_wq[2];
1274102adabSPaul E. McKenney 				/* Place for rcu_nocb_kthread() to wait GP. */
1284102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
1294102adabSPaul E. McKenney 	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
130385b73c0SPaul E. McKenney 
131f6a12f34SPaul E. McKenney 	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
132f6a12f34SPaul E. McKenney 	unsigned long exp_seq_rq;
1333b5f668eSPaul E. McKenney 	wait_queue_head_t exp_wq[4];
13425f3d7efSPaul E. McKenney 	struct rcu_exp_work rew;
13525f3d7efSPaul E. McKenney 	bool exp_need_flush;	/* Need to flush workitem? */
136d96c52feSPaul E. McKenney 	raw_spinlock_t exp_poll_lock;
137d96c52feSPaul E. McKenney 				/* Lock and data for polled expedited grace periods. */
138d96c52feSPaul E. McKenney 	unsigned long exp_seq_poll_rq;
139d96c52feSPaul E. McKenney 	struct work_struct exp_poll_wq;
1404102adabSPaul E. McKenney } ____cacheline_internodealigned_in_smp;
1414102adabSPaul E. McKenney 
1424102adabSPaul E. McKenney /*
143bc75e999SMark Rutland  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
144bc75e999SMark Rutland  * are indexed relative to this interval rather than the global CPU ID space.
145bc75e999SMark Rutland  * This generates the bit for a CPU in node-local masks.
146bc75e999SMark Rutland  */
147df63fa5bSPaul E. McKenney #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
148bc75e999SMark Rutland 
149bc75e999SMark Rutland /*
1505b74c458SPaul E. McKenney  * Union to allow "aggregate OR" operation on the need for a quiescent
1515b74c458SPaul E. McKenney  * state by the normal and expedited grace periods.
1525b74c458SPaul E. McKenney  */
1535b74c458SPaul E. McKenney union rcu_noqs {
1545b74c458SPaul E. McKenney 	struct {
1555b74c458SPaul E. McKenney 		u8 norm;
1565b74c458SPaul E. McKenney 		u8 exp;
1575b74c458SPaul E. McKenney 	} b; /* Bits. */
1585b74c458SPaul E. McKenney 	u16 s; /* Set of bits, aggregate OR here. */
1595b74c458SPaul E. McKenney };
1605b74c458SPaul E. McKenney 
1614102adabSPaul E. McKenney /*
1624102adabSPaul E. McKenney  * Record the snapshot of the core stats at half of the first RCU stall timeout.
1634102adabSPaul E. McKenney  * The member gp_seq is used to ensure that all members are updated only once
164360fbbb4SLihao Liang  * during the sampling period. The snapshot is taken only if this gp_seq is not
165adbccddbSJoel Fernandes (Google)  * equal to rdp->gp_seq.
1665b74c458SPaul E. McKenney  */
167a616aec9SIngo Molnar struct rcu_snap_record {
1684102adabSPaul E. McKenney 	unsigned long	gp_seq;		/* Track rdp->gp_seq counter */
169ff3bb6f4SPaul E. McKenney 	u64		cputime_irq;	/* Accumulated cputime of hard irqs */
170c0f97f20SPaul E. McKenney 	u64		cputime_softirq;/* Accumulated cputime of soft irqs */
1714102adabSPaul E. McKenney 	u64		cputime_system; /* Accumulated cputime of kernel tasks */
1724102adabSPaul E. McKenney 	unsigned long	nr_hardirqs;	/* Accumulated number of hard irqs */
1734102adabSPaul E. McKenney 	unsigned int	nr_softirqs;	/* Accumulated number of soft irqs */
1744102adabSPaul E. McKenney 	unsigned long long nr_csw;	/* Accumulated number of task switches */
1754102adabSPaul E. McKenney 	unsigned long   jiffies;	/* Track jiffies value */
1764102adabSPaul E. McKenney };
1770864f057SPaul E. McKenney 
1780864f057SPaul E. McKenney /* Per-CPU data for read-copy update. */
179a657f261SPaul E. McKenney struct rcu_data {
1804102adabSPaul E. McKenney 	/* 1) quiescent-state and grace-period handling : */
1814102adabSPaul E. McKenney 	unsigned long	gp_seq;		/* Track rsp->gp_seq counter. */
18215fecf89SPaul E. McKenney 	unsigned long	gp_seq_needed;	/* Track furthest future GP request. */
18315fecf89SPaul E. McKenney 	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
18415fecf89SPaul E. McKenney 	bool		core_needs_qs;	/* Core waits for quiescent state. */
1854102adabSPaul E. McKenney 	bool		beenonline;	/* CPU online at least once. */
1864102adabSPaul E. McKenney 	bool		gpwrap;		/* Possible ->gp_seq wrap. */
187e816d56fSPaul E. McKenney 	bool		cpu_started;	/* RCU watching this onlining CPU. */
1884102adabSPaul E. McKenney 	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
1894102adabSPaul E. McKenney 	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
1904102adabSPaul E. McKenney 	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
1914102adabSPaul E. McKenney 					/*  ticks this CPU has handled */
1924102adabSPaul E. McKenney 					/*  during and after the last grace */
1934102adabSPaul E. McKenney 					/* period it is aware of. */
194dc5a4f29SPaul E. McKenney 	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
195cc72046cSPaul E. McKenney 	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
19666e4c33bSPaul E. McKenney 	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
197df1e849aSPaul E. McKenney 
1984102adabSPaul E. McKenney 	/* 2) batch handling */
1998d8a9d0eSPaul E. McKenney 	struct rcu_segcblist cblist;	/* Segmented callback list, with */
200a16578ddSPaul E. McKenney 					/* different callbacks waiting for */
2014102adabSPaul E. McKenney 					/* different grace periods. */
2020742ac3eSPaul E. McKenney 	long		qlen_last_fqs_check;
2034102adabSPaul E. McKenney 					/* qlen at last check for QS forcing */
2048d8a9d0eSPaul E. McKenney 	unsigned long	n_cbs_invoked;	/* # callbacks invoked since boot. */
2054102adabSPaul E. McKenney 	unsigned long	n_force_qs_snap;
20612f54c3aSPaul E. McKenney 					/* did other CPU force QS recently? */
207d97b0781SFrederic Weisbecker 	long		blimit;		/* Upper limit on a processed batch */
20812f54c3aSPaul E. McKenney 
2098be6e1b1SPaul E. McKenney 	/* 3) dynticks interface. */
21081c0b3d7SPaul E. McKenney 	int dynticks_snap;		/* Per-GP tracking for dynticks. */
2119fdd3bc9SPaul E. McKenney 	bool rcu_need_heavy_qs;		/* GP old, so heavy quiescent state! */
2128be6e1b1SPaul E. McKenney 	bool rcu_urgent_qs;		/* GP old need light quiescent state. */
213d1b222c6SPaul E. McKenney 	bool rcu_forced_tick;		/* Forced tick to provide QS. */
21402e30241SNeeraj Upadhyay 	bool rcu_forced_tick_exp;	/*   ... provide QS to expedited GP. */
21502e30241SNeeraj Upadhyay 
216d1b222c6SPaul E. McKenney 	/* 4) rcu_barrier(), OOM callbacks, and expediting. */
217d1b222c6SPaul E. McKenney 	unsigned long barrier_seq_snap;	/* Snap of rcu_state.barrier_sequence. */
218d1b222c6SPaul E. McKenney 	struct rcu_head barrier_head;
219d1b222c6SPaul E. McKenney 	int exp_dynticks_snap;		/* Double-check need for IPI. */
220d1b222c6SPaul E. McKenney 
221d1b222c6SPaul E. McKenney 	/* 5) Callback offloading. */
222d1b222c6SPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU
223fbce7497SPaul E. McKenney 	struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
2246484fe54SPaul E. McKenney 	struct swait_queue_head nocb_state_wq; /* For offloading state changes */
2254fd8c5f1SPaul E. McKenney 	struct task_struct *nocb_gp_kthread;
226f7a81b12SPaul E. McKenney 	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */
227f7a81b12SPaul E. McKenney 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
228f7a81b12SPaul E. McKenney 	struct timer_list nocb_timer;	/* Enforce finite deferral. */
229f7a81b12SPaul E. McKenney 	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */
230f7a81b12SPaul E. McKenney 	struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
23112f54c3aSPaul E. McKenney 					    /* spawning */
2325d6742b3SPaul E. McKenney 
23312f54c3aSPaul E. McKenney 	/* The following fields are used by call_rcu, hence own cacheline. */
2342ebc45c4SFrederic Weisbecker 	raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
2352ebc45c4SFrederic Weisbecker 	struct rcu_cblist nocb_bypass;	/* Lock-contention-bypass CB list. */
2362ebc45c4SFrederic Weisbecker 	unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
2372ebc45c4SFrederic Weisbecker 	unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
2382ebc45c4SFrederic Weisbecker 	int nocb_nobypass_count;	/* # ->cblist enqueues at ^^^ time. */
2391598f4a4SFrederic Weisbecker 
240fbce7497SPaul E. McKenney 	/* The following fields are used by GP kthread, hence own cacheline. */
241d1b222c6SPaul E. McKenney 	raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
24258bf6f77SPaul E. McKenney 	u8 nocb_gp_sleep;		/* Is the nocb GP thread asleep? */
2436484fe54SPaul E. McKenney 	u8 nocb_gp_bypass;		/* Found a bypass on last scan? */
2444102adabSPaul E. McKenney 	u8 nocb_gp_gp;			/* GP to wait for on last scan? */
2454102adabSPaul E. McKenney 	unsigned long nocb_gp_seq;	/*  If so, ->gp_seq to wait for. */
24637f62d7cSPaul E. McKenney 	unsigned long nocb_gp_loops;	/* # passes through wait code. */
24737f62d7cSPaul E. McKenney 	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
24837f62d7cSPaul E. McKenney 	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */
2496ffdde28SPaul E. McKenney 	struct task_struct *nocb_cb_kthread;
250f7e972eeSPaul E. McKenney 	struct list_head nocb_head_rdp; /*
251c9515875SZqiang 					 * Head of rcu_data list in wakeup chain,
25237f62d7cSPaul E. McKenney 					 * if rdp_gp.
25337f62d7cSPaul E. McKenney 					 */
2544102adabSPaul E. McKenney 	struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
2559b9500daSPaul E. McKenney 	struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */
2569b9500daSPaul E. McKenney 
2579b9500daSPaul E. McKenney 	/* The following fields are used by CB kthread, hence new cacheline. */
2588aa670cdSPaul E. McKenney 	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
25957738942SPaul E. McKenney 					/* GP rdp takes GP-end wakeups. */
26057738942SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
26157738942SPaul E. McKenney 
26257738942SPaul E. McKenney 	/* 6) RCU priority boosting. */
263d3052109SPaul E. McKenney 	struct task_struct *rcu_cpu_kthread_task;
264c708b08cSPaul E. McKenney 					/* rcuc per-CPU kthread or NULL. */
2654102adabSPaul E. McKenney 	unsigned int rcu_cpu_kthread_status;
266*3cb278e7SJoel Fernandes (Google) 	char rcu_cpu_has_work;
2674102adabSPaul E. McKenney 	unsigned long rcuc_activity;
2684102adabSPaul E. McKenney 
2694102adabSPaul E. McKenney 	/* 7) Diagnostic data, including RCU CPU stall warnings. */
2709fdd3bc9SPaul E. McKenney 	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
271511324e4SPaul E. McKenney 	/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
272e75bcd48SFrederic Weisbecker 	struct irq_work rcu_iw;		/* Check for non-irq activity. */
273*3cb278e7SJoel Fernandes (Google) 	bool rcu_iw_pending;		/* Is ->rcu_iw pending? */
274*3cb278e7SJoel Fernandes (Google) 	unsigned long rcu_iw_gp_seq;	/* ->gp_seq associated with ->rcu_iw. */
275*3cb278e7SJoel Fernandes (Google) 	unsigned long rcu_ofl_gp_seq;	/* ->gp_seq at last offline. */
2769fdd3bc9SPaul E. McKenney 	short rcu_ofl_gp_flags;		/* ->gp_flags at last offline. */
2774102adabSPaul E. McKenney 	unsigned long rcu_onl_gp_seq;	/* ->gp_seq at last online. */
2784102adabSPaul E. McKenney 	short rcu_onl_gp_flags;		/* ->gp_flags at last online. */
2794102adabSPaul E. McKenney 	unsigned long last_fqs_resched;	/* Time of last rcu_resched(). */
2804102adabSPaul E. McKenney 	unsigned long last_sched_clock;	/* Jiffies of last rcu_sched_clock_irq(). */
2814102adabSPaul E. McKenney 	struct rcu_snap_record snap_record; /* Snapshot of core stats at half of */
2824102adabSPaul E. McKenney 					    /* the first RCU stall timeout */
2834102adabSPaul E. McKenney 
2844102adabSPaul E. McKenney 	long lazy_len;			/* Length of buffered lazy callbacks. */
2854102adabSPaul E. McKenney 	int cpu;
2864102adabSPaul E. McKenney };
2874102adabSPaul E. McKenney 
2884102adabSPaul E. McKenney /* Values for nocb_defer_wakeup field in struct rcu_data. */
2894102adabSPaul E. McKenney #define RCU_NOCB_WAKE_NOT	0
2904102adabSPaul E. McKenney #define RCU_NOCB_WAKE_BYPASS	1
2914102adabSPaul E. McKenney #define RCU_NOCB_WAKE_LAZY	2
2924102adabSPaul E. McKenney #define RCU_NOCB_WAKE		3
2934102adabSPaul E. McKenney #define RCU_NOCB_WAKE_FORCE	4
2944102adabSPaul E. McKenney 
2954102adabSPaul E. McKenney #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
2964102adabSPaul E. McKenney 					/* For jiffies_till_first_fqs and */
2974102adabSPaul E. McKenney 					/*  and jiffies_till_next_fqs. */
2984102adabSPaul E. McKenney 
2994102adabSPaul E. McKenney #define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
3004102adabSPaul E. McKenney 					/*  delay between bouts of */
3014102adabSPaul E. McKenney 					/*  quiescent-state forcing. */
3024102adabSPaul E. McKenney 
3034102adabSPaul E. McKenney #define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
3044102adabSPaul E. McKenney 					/*  at least one scheduling clock */
3054102adabSPaul E. McKenney 					/*  irq before ratting on them. */
3064102adabSPaul E. McKenney 
3074102adabSPaul E. McKenney #define rcu_wait(cond)							\
3084102adabSPaul E. McKenney do {									\
3094102adabSPaul E. McKenney 	for (;;) {							\
3104102adabSPaul E. McKenney 		set_current_state(TASK_INTERRUPTIBLE);			\
3114102adabSPaul E. McKenney 		if (cond)						\
312032dfc87SAlexander Gordeev 			break;						\
313032dfc87SAlexander Gordeev 		schedule();						\
314032dfc87SAlexander Gordeev 	}								\
315b9585e94SPaul E. McKenney 	__set_current_state(TASK_RUNNING);				\
316ed73860cSNeeraj Upadhyay } while (0)
3174102adabSPaul E. McKenney 
3184102adabSPaul E. McKenney /*
3194102adabSPaul E. McKenney  * RCU global state, including node hierarchy.  This hierarchy is
320eae9f147SNeeraj Upadhyay  * represented in "heap" form in a dense array.  The root (first level)
321eae9f147SNeeraj Upadhyay  * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
32200943a60SWei Yang  * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
32300943a60SWei Yang  * and the third level in ->node[m+1] and following (->node[m+1] referenced
3244102adabSPaul E. McKenney  * by ->level[2]).  The number of levels is determined by the number of
325abedf8e2SPaul Gortmaker  * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
326afea227fSPaul E. McKenney  * consisting of a single rcu_node.
327afea227fSPaul E. McKenney  */
328fd897573SPaul E. McKenney struct rcu_state {
329fd897573SPaul E. McKenney 	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
330bf95b2bcSPaul E. McKenney 	struct rcu_node *level[RCU_NUM_LVLS + 1];
331bf95b2bcSPaul E. McKenney 						/* Hierarchy levels (+1 to */
332dd041405SPaul E. McKenney 						/*  shut bogus gcc warning) */
3334102adabSPaul E. McKenney 	int ncpus;				/* # CPUs seen so far. */
3344102adabSPaul E. McKenney 	int n_online_cpus;			/* # CPUs online for RCU. */
3354102adabSPaul E. McKenney 
3364102adabSPaul E. McKenney 	/* The following fields are guarded by the root rcu_node's lock. */
3374102adabSPaul E. McKenney 
3384102adabSPaul E. McKenney 	unsigned long gp_seq ____cacheline_internodealigned_in_smp;
3394f525a52SPaul E. McKenney 						/* Grace-period sequence #. */
340dd46a788SPaul E. McKenney 	unsigned long gp_max;			/* Maximum GP duration in */
3414102adabSPaul E. McKenney 						/*  jiffies. */
3424102adabSPaul E. McKenney 	struct task_struct *gp_kthread;		/* Task for grace periods. */
34380b3fd47SPaul E. McKenney 	struct swait_queue_head gp_wq;		/* Where GP task waits. */
34480b3fd47SPaul E. McKenney 	short gp_flags;				/* Commands for GP task. */
345f6a12f34SPaul E. McKenney 	short gp_state;				/* GP kthread sleep state. */
3463b5f668eSPaul E. McKenney 	unsigned long gp_wake_time;		/* Last GP kthread wake. */
347d6ada2cfSPaul E. McKenney 	unsigned long gp_wake_seq;		/* ->gp_seq at ^^^. */
3483a6d7c64SPeter Zijlstra 	unsigned long gp_seq_polled;		/* GP seq for polled API. */
349abedf8e2SPaul Gortmaker 	unsigned long gp_seq_polled_snap;	/* ->gp_seq_polled at normal GP start. */
350b9585e94SPaul E. McKenney 	unsigned long gp_seq_polled_exp_snap;	/* ->gp_seq_polled at expedited GP start. */
351b2b00ddfSPaul E. McKenney 
352b2b00ddfSPaul E. McKenney 	/* End of fields guarded by root rcu_node's lock. */
3534102adabSPaul E. McKenney 
3544102adabSPaul E. McKenney 	struct mutex barrier_mutex;		/* Guards barrier fields. */
3554102adabSPaul E. McKenney 	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
3568c7c4829SPaul E. McKenney 	struct completion barrier_completion;	/* Wake at barrier end. */
3578c7c4829SPaul E. McKenney 	unsigned long barrier_sequence;		/* ++ at start and end of */
3584102adabSPaul E. McKenney 						/*  rcu_barrier(). */
3594102adabSPaul E. McKenney 	/* End of fields guarded by barrier_mutex. */
3604102adabSPaul E. McKenney 
3614102adabSPaul E. McKenney 	raw_spinlock_t barrier_lock;		/* Protects ->barrier_seq_snap. */
362c51d7b5eSPaul E. McKenney 
363c51d7b5eSPaul E. McKenney 	struct mutex exp_mutex;			/* Serialize expedited GP. */
3646ccd2ecdSPaul E. McKenney 	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
3656ccd2ecdSPaul E. McKenney 	unsigned long expedited_sequence;	/* Take a ticket. */
36626d950a9SPaul E. McKenney 	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
36726d950a9SPaul E. McKenney 	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
3684102adabSPaul E. McKenney 	int ncpus_snap;				/* # CPUs seen last time. */
3694102adabSPaul E. McKenney 	u8 cbovld;				/* Callback overload now? */
3706193c76aSPaul E. McKenney 	u8 cbovldnext;				/* ^        ^  next time? */
3716193c76aSPaul E. McKenney 
372fc908ed3SPaul E. McKenney 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
373fc908ed3SPaul E. McKenney 						/*  force_quiescent_state(). */
3744102adabSPaul E. McKenney 	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
3754102adabSPaul E. McKenney 						/*  kthreads, if configured. */
3761e64b15aSPaul E. McKenney 	unsigned long n_force_qs;		/* Number of calls to */
37782980b16SDavid Woodhouse 						/*  force_quiescent_state(). */
3781e64b15aSPaul E. McKenney 	unsigned long gp_start;			/* Time at which GP started, */
3791e64b15aSPaul E. McKenney 						/*  but in jiffies. */
3808d2aaa9bSFrederic Weisbecker 	unsigned long gp_end;			/* Time last GP ended, again */
3814102adabSPaul E. McKenney 						/*  in jiffies. */
3824102adabSPaul E. McKenney 	unsigned long gp_activity;		/* Time of last GP kthread */
3834102adabSPaul E. McKenney 						/*  activity in jiffies. */
3844102adabSPaul E. McKenney 	unsigned long gp_req_activity;		/* Time of last GP request */
3854102adabSPaul E. McKenney 						/*  in jiffies. */
3861fca4d12SPaul E. McKenney 	unsigned long jiffies_stall;		/* Time at which to check */
3874102adabSPaul E. McKenney 						/*  for CPU stalls. */
388c34d2f41SPaul E. McKenney 	int nr_fqs_jiffies_stall;		/* Number of fqs loops after
38977f81fe0SPetr Mladek 						 * which read jiffies and set
390afea227fSPaul E. McKenney 						 * jiffies_stall. Stall
391319362c9SPaul E. McKenney 						 * warnings disabled if !0. */
392fea3f222SPaul E. McKenney 	unsigned long jiffies_resched;		/* Time at which to resched */
393fea3f222SPaul E. McKenney 						/*  a reluctant CPU. */
394fea3f222SPaul E. McKenney 	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
395fea3f222SPaul E. McKenney 						/*  GP start. */
396fea3f222SPaul E. McKenney 	const char *name;			/* Name of structure. */
397fea3f222SPaul E. McKenney 	char abbr;				/* Abbreviated name. */
398afea227fSPaul E. McKenney 
399358be2d3SPaul E. McKenney 	arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
400358be2d3SPaul E. McKenney 						/* Synchronize offline with */
401358be2d3SPaul E. McKenney 						/*  GP pre-initialization. */
402358be2d3SPaul E. McKenney 	int nocb_is_setup;			/* nocb is setup from boot */
403358be2d3SPaul E. McKenney };
404358be2d3SPaul E. McKenney 
405358be2d3SPaul E. McKenney /* Values for rcu_state structure's gp_flags field. */
406358be2d3SPaul E. McKenney #define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
407358be2d3SPaul E. McKenney #define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
408358be2d3SPaul E. McKenney #define RCU_GP_FLAG_OVLD 0x4	/* Experiencing callback overload. */
409358be2d3SPaul E. McKenney 
410358be2d3SPaul E. McKenney /* Values for rcu_state structure's gp_state field. */
411358be2d3SPaul E. McKenney #define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
412358be2d3SPaul E. McKenney #define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
413358be2d3SPaul E. McKenney #define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
414358be2d3SPaul E. McKenney #define RCU_GP_ONOFF     3	/* Grace-period initialization hotplug. */
415358be2d3SPaul E. McKenney #define RCU_GP_INIT      4	/* Grace-period initialization. */
416358be2d3SPaul E. McKenney #define RCU_GP_WAIT_FQS  5	/* Wait for force-quiescent-state time. */
417358be2d3SPaul E. McKenney #define RCU_GP_DOING_FQS 6	/* Wait done for force-quiescent-state time. */
418358be2d3SPaul E. McKenney #define RCU_GP_CLEANUP   7	/* Grace-period cleanup started. */
419358be2d3SPaul E. McKenney #define RCU_GP_CLEANED   8	/* Grace-period cleanup complete. */
420358be2d3SPaul E. McKenney 
4216b50e119SPaul E. McKenney /*
42232255d51SPaul E. McKenney  * In order to export the rcu_state name to the tracing tools, it
4234102adabSPaul E. McKenney  * needs to be added in the __tracepoint_string section.
42445975c7dSPaul E. McKenney  * This requires defining a separate variable tp_<sname>_varname
4254102adabSPaul E. McKenney  * that points to the string being used, and this will allow
4264102adabSPaul E. McKenney  * the tracing userspace tools to be able to decipher the string
4278af3a5e7SPaul E. McKenney  * address to the matching string.
4284102adabSPaul E. McKenney  */
42974611ecbSPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
43081ab59a3SPaul E. McKenney #define RCU_ABBR 'p'
431c98cac60SPaul E. McKenney #define RCU_NAME_RAW "rcu_preempt"
43281ab59a3SPaul E. McKenney #else /* #ifdef CONFIG_PREEMPT_RCU */
4334102adabSPaul E. McKenney #define RCU_ABBR 's'
4344102adabSPaul E. McKenney #define RCU_NAME_RAW "rcu_sched"
43551038506SZqiang #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
43648d07c04SSebastian Andrzej Siewior #ifndef CONFIG_TRACING
4373ef5a1c3SPaul E. McKenney #define RCU_NAME RCU_NAME_RAW
4380aa04b05SPaul E. McKenney #else /* #ifdef CONFIG_TRACING */
4393e310098SPaul E. McKenney static char rcu_name[] = RCU_NAME_RAW;
4404102adabSPaul E. McKenney static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
441abedf8e2SPaul Gortmaker #define RCU_NAME rcu_name
442abedf8e2SPaul Gortmaker #endif /* #else #ifdef CONFIG_TRACING */
4434102adabSPaul E. McKenney 
444b8f7aca3SFrederic Weisbecker /* Forward declarations for tree_plugin.h */
445d1b222c6SPaul E. McKenney static void rcu_bootup_announce(void);
446*3cb278e7SJoel Fernandes (Google) static void rcu_qs(void);
447d1b222c6SPaul E. McKenney static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
448*3cb278e7SJoel Fernandes (Google) #ifdef CONFIG_HOTPLUG_CPU
449*3cb278e7SJoel Fernandes (Google) static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
4505d6742b3SPaul E. McKenney #endif /* #ifdef CONFIG_HOTPLUG_CPU */
45196d3fd0dSPaul E. McKenney static int rcu_print_task_exp_stall(struct rcu_node *rnp);
45287090516SFrederic Weisbecker static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
453f8bb5caeSFrederic Weisbecker static void rcu_flavor_sched_clock_irq(int user);
4544102adabSPaul E. McKenney static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
455ad368d15SPaul E. McKenney static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
456f7a81b12SPaul E. McKenney static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
4575d6742b3SPaul E. McKenney static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
4585d6742b3SPaul E. McKenney static void rcu_cpu_kthread_setup(unsigned int cpu);
4595d6742b3SPaul E. McKenney static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
4605d6742b3SPaul E. McKenney static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
461d1b222c6SPaul E. McKenney static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
46235ce7f29SPaul E. McKenney static void zero_cpu_stall_ticks(struct rcu_data *rdp);
4634580b054SPaul E. McKenney static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
464118e0d4aSFrederic Weisbecker static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
465118e0d4aSFrederic Weisbecker static void rcu_init_one_nocb(struct rcu_node *rnp);
466118e0d4aSFrederic Weisbecker static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
467118e0d4aSFrederic Weisbecker static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
468118e0d4aSFrederic Weisbecker 				  unsigned long j, bool lazy);
46981c0b3d7SPaul E. McKenney static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
47081c0b3d7SPaul E. McKenney 				bool *was_alldone, unsigned long flags,
47181c0b3d7SPaul E. McKenney 				bool lazy);
472118e0d4aSFrederic Weisbecker static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
473118e0d4aSFrederic Weisbecker 				 unsigned long flags);
47481c0b3d7SPaul E. McKenney static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level);
47581c0b3d7SPaul E. McKenney static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
47681c0b3d7SPaul E. McKenney static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
47781c0b3d7SPaul E. McKenney static void rcu_spawn_cpu_nocb_kthread(int cpu);
47881c0b3d7SPaul E. McKenney static void show_rcu_nocb_state(struct rcu_data *rdp);
4794102adabSPaul E. McKenney static void rcu_nocb_lock(struct rcu_data *rdp);
4804580b054SPaul E. McKenney static void rcu_nocb_unlock(struct rcu_data *rdp);
48132255d51SPaul E. McKenney static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
48232255d51SPaul E. McKenney 				       unsigned long flags);
48332255d51SPaul E. McKenney static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
4847ac1907cSPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU
48532255d51SPaul E. McKenney static void __init rcu_organize_nocb_kthreads(void);
486b51bcbbfSPaul E. McKenney 
487b51bcbbfSPaul E. McKenney /*
488d96c52feSPaul E. McKenney  * Disable IRQs before checking offloaded state so that local
489d96c52feSPaul E. McKenney  * locking is safe against concurrent de-offloading.
490d96c52feSPaul E. McKenney  */
491 #define rcu_nocb_lock_irqsave(rdp, flags)			\
492 do {								\
493 	local_irq_save(flags);					\
494 	if (rcu_segcblist_is_offloaded(&(rdp)->cblist))	\
495 		raw_spin_lock(&(rdp)->nocb_lock);		\
496 } while (0)
497 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
498 #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
499 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
500 
501 static void rcu_bind_gp_kthread(void);
502 static bool rcu_nohz_full_cpu(void);
503 
504 /* Forward declarations for tree_stall.h */
505 static void record_gp_stall_check_time(void);
506 static void rcu_iw_handler(struct irq_work *iwp);
507 static void check_cpu_stall(struct rcu_data *rdp);
508 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
509 				     const unsigned long gpssdelay);
510 
511 /* Forward declarations for tree_exp.h. */
512 static void sync_rcu_do_polled_gp(struct work_struct *wp);
513