xref: /openbmc/linux/kernel/rcu/rcu.h (revision 6136d6e48a0138f6be5bb3427dbeb0ba07a546a4)
14102adabSPaul E. McKenney /*
24102adabSPaul E. McKenney  * Read-Copy Update definitions shared among RCU implementations.
34102adabSPaul E. McKenney  *
44102adabSPaul E. McKenney  * This program is free software; you can redistribute it and/or modify
54102adabSPaul E. McKenney  * it under the terms of the GNU General Public License as published by
64102adabSPaul E. McKenney  * the Free Software Foundation; either version 2 of the License, or
74102adabSPaul E. McKenney  * (at your option) any later version.
84102adabSPaul E. McKenney  *
94102adabSPaul E. McKenney  * This program is distributed in the hope that it will be useful,
104102adabSPaul E. McKenney  * but WITHOUT ANY WARRANTY; without even the implied warranty of
114102adabSPaul E. McKenney  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
124102adabSPaul E. McKenney  * GNU General Public License for more details.
134102adabSPaul E. McKenney  *
144102adabSPaul E. McKenney  * You should have received a copy of the GNU General Public License
1587de1cfdSPaul E. McKenney  * along with this program; if not, you can access it online at
1687de1cfdSPaul E. McKenney  * http://www.gnu.org/licenses/gpl-2.0.html.
174102adabSPaul E. McKenney  *
184102adabSPaul E. McKenney  * Copyright IBM Corporation, 2011
194102adabSPaul E. McKenney  *
204102adabSPaul E. McKenney  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
214102adabSPaul E. McKenney  */
224102adabSPaul E. McKenney 
234102adabSPaul E. McKenney #ifndef __LINUX_RCU_H
244102adabSPaul E. McKenney #define __LINUX_RCU_H
254102adabSPaul E. McKenney 
265cb5c6e1SPaul Gortmaker #include <trace/events/rcu.h>
274102adabSPaul E. McKenney #ifdef CONFIG_RCU_TRACE
284102adabSPaul E. McKenney #define RCU_TRACE(stmt) stmt
294102adabSPaul E. McKenney #else /* #ifdef CONFIG_RCU_TRACE */
304102adabSPaul E. McKenney #define RCU_TRACE(stmt)
314102adabSPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_TRACE */
324102adabSPaul E. McKenney 
334102adabSPaul E. McKenney /*
344102adabSPaul E. McKenney  * Process-level increment to ->dynticks_nesting field.  This allows for
354102adabSPaul E. McKenney  * architectures that use half-interrupts and half-exceptions from
364102adabSPaul E. McKenney  * process context.
374102adabSPaul E. McKenney  *
384102adabSPaul E. McKenney  * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
394102adabSPaul E. McKenney  * that counts the number of process-based reasons why RCU cannot
404102adabSPaul E. McKenney  * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
414102adabSPaul E. McKenney  * is the value used to increment or decrement this field.
424102adabSPaul E. McKenney  *
434102adabSPaul E. McKenney  * The rest of the bits could in principle be used to count interrupts,
444102adabSPaul E. McKenney  * but this would mean that a negative-one value in the interrupt
454102adabSPaul E. McKenney  * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
464102adabSPaul E. McKenney  * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
474102adabSPaul E. McKenney  * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
484102adabSPaul E. McKenney  * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
494102adabSPaul E. McKenney  * initial exit from idle.
504102adabSPaul E. McKenney  */
514102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_WIDTH 7
524102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
534102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
544102adabSPaul E. McKenney #define DYNTICK_TASK_FLAG	   ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
554102adabSPaul E. McKenney #define DYNTICK_TASK_MASK	   ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
564102adabSPaul E. McKenney #define DYNTICK_TASK_EXIT_IDLE	   (DYNTICK_TASK_NEST_VALUE + \
574102adabSPaul E. McKenney 				    DYNTICK_TASK_FLAG)
584102adabSPaul E. McKenney 
59*6136d6e4SPaul E. McKenney #define DYNTICK_IRQ_NONIDLE	((INT_MAX / 2) + 1)
60*6136d6e4SPaul E. McKenney 
612e8c28c2SPaul E. McKenney 
622e8c28c2SPaul E. McKenney /*
632e8c28c2SPaul E. McKenney  * Grace-period counter management.
642e8c28c2SPaul E. McKenney  */
652e8c28c2SPaul E. McKenney 
66f1ec57a4SPaul E. McKenney #define RCU_SEQ_CTR_SHIFT	2
67031aeee0SPaul E. McKenney #define RCU_SEQ_STATE_MASK	((1 << RCU_SEQ_CTR_SHIFT) - 1)
68031aeee0SPaul E. McKenney 
69031aeee0SPaul E. McKenney /*
70031aeee0SPaul E. McKenney  * Return the counter portion of a sequence number previously returned
71031aeee0SPaul E. McKenney  * by rcu_seq_snap() or rcu_seq_current().
72031aeee0SPaul E. McKenney  */
73031aeee0SPaul E. McKenney static inline unsigned long rcu_seq_ctr(unsigned long s)
74031aeee0SPaul E. McKenney {
75031aeee0SPaul E. McKenney 	return s >> RCU_SEQ_CTR_SHIFT;
76031aeee0SPaul E. McKenney }
77031aeee0SPaul E. McKenney 
78031aeee0SPaul E. McKenney /*
79031aeee0SPaul E. McKenney  * Return the state portion of a sequence number previously returned
80031aeee0SPaul E. McKenney  * by rcu_seq_snap() or rcu_seq_current().
81031aeee0SPaul E. McKenney  */
82031aeee0SPaul E. McKenney static inline int rcu_seq_state(unsigned long s)
83031aeee0SPaul E. McKenney {
84031aeee0SPaul E. McKenney 	return s & RCU_SEQ_STATE_MASK;
85031aeee0SPaul E. McKenney }
86031aeee0SPaul E. McKenney 
8780a7956fSPaul E. McKenney /*
8880a7956fSPaul E. McKenney  * Set the state portion of the pointed-to sequence number.
8980a7956fSPaul E. McKenney  * The caller is responsible for preventing conflicting updates.
9080a7956fSPaul E. McKenney  */
9180a7956fSPaul E. McKenney static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
9280a7956fSPaul E. McKenney {
9380a7956fSPaul E. McKenney 	WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
9480a7956fSPaul E. McKenney 	WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
9580a7956fSPaul E. McKenney }
9680a7956fSPaul E. McKenney 
972e8c28c2SPaul E. McKenney /* Adjust sequence number for start of update-side operation. */
982e8c28c2SPaul E. McKenney static inline void rcu_seq_start(unsigned long *sp)
992e8c28c2SPaul E. McKenney {
1002e8c28c2SPaul E. McKenney 	WRITE_ONCE(*sp, *sp + 1);
1012e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation after counter increment. */
102031aeee0SPaul E. McKenney 	WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
1032e8c28c2SPaul E. McKenney }
1042e8c28c2SPaul E. McKenney 
1052e8c28c2SPaul E. McKenney /* Adjust sequence number for end of update-side operation. */
1062e8c28c2SPaul E. McKenney static inline void rcu_seq_end(unsigned long *sp)
1072e8c28c2SPaul E. McKenney {
1082e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation before counter increment. */
109031aeee0SPaul E. McKenney 	WARN_ON_ONCE(!rcu_seq_state(*sp));
110031aeee0SPaul E. McKenney 	WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
1112e8c28c2SPaul E. McKenney }
1122e8c28c2SPaul E. McKenney 
1132e8c28c2SPaul E. McKenney /* Take a snapshot of the update side's sequence number. */
1142e8c28c2SPaul E. McKenney static inline unsigned long rcu_seq_snap(unsigned long *sp)
1152e8c28c2SPaul E. McKenney {
1162e8c28c2SPaul E. McKenney 	unsigned long s;
1172e8c28c2SPaul E. McKenney 
118031aeee0SPaul E. McKenney 	s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
1192e8c28c2SPaul E. McKenney 	smp_mb(); /* Above access must not bleed into critical section. */
1202e8c28c2SPaul E. McKenney 	return s;
1212e8c28c2SPaul E. McKenney }
1222e8c28c2SPaul E. McKenney 
1238660b7d8SPaul E. McKenney /* Return the current value the update side's sequence number, no ordering. */
1248660b7d8SPaul E. McKenney static inline unsigned long rcu_seq_current(unsigned long *sp)
1258660b7d8SPaul E. McKenney {
1268660b7d8SPaul E. McKenney 	return READ_ONCE(*sp);
1278660b7d8SPaul E. McKenney }
1288660b7d8SPaul E. McKenney 
1292e8c28c2SPaul E. McKenney /*
1302e8c28c2SPaul E. McKenney  * Given a snapshot from rcu_seq_snap(), determine whether or not a
1312e8c28c2SPaul E. McKenney  * full update-side operation has occurred.
1322e8c28c2SPaul E. McKenney  */
1332e8c28c2SPaul E. McKenney static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
1342e8c28c2SPaul E. McKenney {
1352e8c28c2SPaul E. McKenney 	return ULONG_CMP_GE(READ_ONCE(*sp), s);
1362e8c28c2SPaul E. McKenney }
1372e8c28c2SPaul E. McKenney 
1384102adabSPaul E. McKenney /*
1394102adabSPaul E. McKenney  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
1404102adabSPaul E. McKenney  * by call_rcu() and rcu callback execution, and are therefore not part of the
1414102adabSPaul E. McKenney  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
1424102adabSPaul E. McKenney  */
1434102adabSPaul E. McKenney 
1444102adabSPaul E. McKenney #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1454102adabSPaul E. McKenney # define STATE_RCU_HEAD_READY	0
1464102adabSPaul E. McKenney # define STATE_RCU_HEAD_QUEUED	1
1474102adabSPaul E. McKenney 
1484102adabSPaul E. McKenney extern struct debug_obj_descr rcuhead_debug_descr;
1494102adabSPaul E. McKenney 
1504102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1514102adabSPaul E. McKenney {
1524102adabSPaul E. McKenney 	int r1;
1534102adabSPaul E. McKenney 
1544102adabSPaul E. McKenney 	r1 = debug_object_activate(head, &rcuhead_debug_descr);
1554102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1564102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY,
1574102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED);
1584102adabSPaul E. McKenney 	return r1;
1594102adabSPaul E. McKenney }
1604102adabSPaul E. McKenney 
1614102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1624102adabSPaul E. McKenney {
1634102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1644102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED,
1654102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY);
1664102adabSPaul E. McKenney 	debug_object_deactivate(head, &rcuhead_debug_descr);
1674102adabSPaul E. McKenney }
1684102adabSPaul E. McKenney #else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1694102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1704102adabSPaul E. McKenney {
1714102adabSPaul E. McKenney 	return 0;
1724102adabSPaul E. McKenney }
1734102adabSPaul E. McKenney 
1744102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1754102adabSPaul E. McKenney {
1764102adabSPaul E. McKenney }
1774102adabSPaul E. McKenney #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1784102adabSPaul E. McKenney 
179bd73a7f5STeodora Baluta void kfree(const void *);
1804102adabSPaul E. McKenney 
181406e3e53SPaul E. McKenney /*
182406e3e53SPaul E. McKenney  * Reclaim the specified callback, either by invoking it (non-lazy case)
183406e3e53SPaul E. McKenney  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
184406e3e53SPaul E. McKenney  */
1854102adabSPaul E. McKenney static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
1864102adabSPaul E. McKenney {
1874102adabSPaul E. McKenney 	unsigned long offset = (unsigned long)head->func;
1884102adabSPaul E. McKenney 
18924ef659aSPaul E. McKenney 	rcu_lock_acquire(&rcu_callback_map);
1904102adabSPaul E. McKenney 	if (__is_kfree_rcu_offset(offset)) {
191dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
1924102adabSPaul E. McKenney 		kfree((void *)head - offset);
19324ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
194406e3e53SPaul E. McKenney 		return true;
1954102adabSPaul E. McKenney 	} else {
196dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
1974102adabSPaul E. McKenney 		head->func(head);
19824ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
199406e3e53SPaul E. McKenney 		return false;
2004102adabSPaul E. McKenney 	}
2014102adabSPaul E. McKenney }
2024102adabSPaul E. McKenney 
2034102adabSPaul E. McKenney #ifdef CONFIG_RCU_STALL_COMMON
2044102adabSPaul E. McKenney 
2054102adabSPaul E. McKenney extern int rcu_cpu_stall_suppress;
2064102adabSPaul E. McKenney int rcu_jiffies_till_stall_check(void);
2074102adabSPaul E. McKenney 
208f22ce091SPaul E. McKenney #define rcu_ftrace_dump_stall_suppress() \
209f22ce091SPaul E. McKenney do { \
210f22ce091SPaul E. McKenney 	if (!rcu_cpu_stall_suppress) \
211f22ce091SPaul E. McKenney 		rcu_cpu_stall_suppress = 3; \
212f22ce091SPaul E. McKenney } while (0)
213f22ce091SPaul E. McKenney 
214f22ce091SPaul E. McKenney #define rcu_ftrace_dump_stall_unsuppress() \
215f22ce091SPaul E. McKenney do { \
216f22ce091SPaul E. McKenney 	if (rcu_cpu_stall_suppress == 3) \
217f22ce091SPaul E. McKenney 		rcu_cpu_stall_suppress = 0; \
218f22ce091SPaul E. McKenney } while (0)
219f22ce091SPaul E. McKenney 
220f22ce091SPaul E. McKenney #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
221f22ce091SPaul E. McKenney #define rcu_ftrace_dump_stall_suppress()
222f22ce091SPaul E. McKenney #define rcu_ftrace_dump_stall_unsuppress()
2234102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
2244102adabSPaul E. McKenney 
2254102adabSPaul E. McKenney /*
2264102adabSPaul E. McKenney  * Strings used in tracepoints need to be exported via the
2274102adabSPaul E. McKenney  * tracing system such that tools like perf and trace-cmd can
2284102adabSPaul E. McKenney  * translate the string address pointers to actual text.
2294102adabSPaul E. McKenney  */
2304102adabSPaul E. McKenney #define TPS(x)  tracepoint_string(x)
2314102adabSPaul E. McKenney 
232b8989b76SPaul E. McKenney /*
233b8989b76SPaul E. McKenney  * Dump the ftrace buffer, but only one time per callsite per boot.
234b8989b76SPaul E. McKenney  */
235b8989b76SPaul E. McKenney #define rcu_ftrace_dump(oops_dump_mode) \
236b8989b76SPaul E. McKenney do { \
237b8989b76SPaul E. McKenney 	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
238b8989b76SPaul E. McKenney 	\
239b8989b76SPaul E. McKenney 	if (!atomic_read(&___rfd_beenhere) && \
24083b6ca1fSPaul E. McKenney 	    !atomic_xchg(&___rfd_beenhere, 1)) { \
24183b6ca1fSPaul E. McKenney 		tracing_off(); \
242f22ce091SPaul E. McKenney 		rcu_ftrace_dump_stall_suppress(); \
243b8989b76SPaul E. McKenney 		ftrace_dump(oops_dump_mode); \
244f22ce091SPaul E. McKenney 		rcu_ftrace_dump_stall_unsuppress(); \
24583b6ca1fSPaul E. McKenney 	} \
246b8989b76SPaul E. McKenney } while (0)
247b8989b76SPaul E. McKenney 
248aa23c6fbSPranith Kumar void rcu_early_boot_tests(void);
24952d7e48bSPaul E. McKenney void rcu_test_sync_prims(void);
250aa23c6fbSPranith Kumar 
2515f6130faSLai Jiangshan /*
2525f6130faSLai Jiangshan  * This function really isn't for public consumption, but RCU is special in
2535f6130faSLai Jiangshan  * that context switches can allow the state machine to make progress.
2545f6130faSLai Jiangshan  */
2555f6130faSLai Jiangshan extern void resched_cpu(int cpu);
2565f6130faSLai Jiangshan 
2572b34c43cSPaul E. McKenney #if defined(SRCU) || !defined(TINY_RCU)
2582b34c43cSPaul E. McKenney 
2592b34c43cSPaul E. McKenney #include <linux/rcu_node_tree.h>
2602b34c43cSPaul E. McKenney 
2612b34c43cSPaul E. McKenney extern int rcu_num_lvls;
262e95d68d2SPaul E. McKenney extern int num_rcu_lvl[];
2632b34c43cSPaul E. McKenney extern int rcu_num_nodes;
2642b34c43cSPaul E. McKenney static bool rcu_fanout_exact;
2652b34c43cSPaul E. McKenney static int rcu_fanout_leaf;
2662b34c43cSPaul E. McKenney 
2672b34c43cSPaul E. McKenney /*
2682b34c43cSPaul E. McKenney  * Compute the per-level fanout, either using the exact fanout specified
2692b34c43cSPaul E. McKenney  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
2702b34c43cSPaul E. McKenney  */
2712b34c43cSPaul E. McKenney static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
2722b34c43cSPaul E. McKenney {
2732b34c43cSPaul E. McKenney 	int i;
2742b34c43cSPaul E. McKenney 
2752b34c43cSPaul E. McKenney 	if (rcu_fanout_exact) {
2762b34c43cSPaul E. McKenney 		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
2772b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 2; i >= 0; i--)
2782b34c43cSPaul E. McKenney 			levelspread[i] = RCU_FANOUT;
2792b34c43cSPaul E. McKenney 	} else {
2802b34c43cSPaul E. McKenney 		int ccur;
2812b34c43cSPaul E. McKenney 		int cprv;
2822b34c43cSPaul E. McKenney 
2832b34c43cSPaul E. McKenney 		cprv = nr_cpu_ids;
2842b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
2852b34c43cSPaul E. McKenney 			ccur = levelcnt[i];
2862b34c43cSPaul E. McKenney 			levelspread[i] = (cprv + ccur - 1) / ccur;
2872b34c43cSPaul E. McKenney 			cprv = ccur;
2882b34c43cSPaul E. McKenney 		}
2892b34c43cSPaul E. McKenney 	}
2902b34c43cSPaul E. McKenney }
2912b34c43cSPaul E. McKenney 
292efbe451dSPaul E. McKenney /*
293efbe451dSPaul E. McKenney  * Do a full breadth-first scan of the rcu_node structures for the
294efbe451dSPaul E. McKenney  * specified rcu_state structure.
295efbe451dSPaul E. McKenney  */
296efbe451dSPaul E. McKenney #define rcu_for_each_node_breadth_first(rsp, rnp) \
297efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
298efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
299efbe451dSPaul E. McKenney 
300efbe451dSPaul E. McKenney /*
301efbe451dSPaul E. McKenney  * Do a breadth-first scan of the non-leaf rcu_node structures for the
302efbe451dSPaul E. McKenney  * specified rcu_state structure.  Note that if there is a singleton
303efbe451dSPaul E. McKenney  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
304efbe451dSPaul E. McKenney  */
305efbe451dSPaul E. McKenney #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
306efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
307efbe451dSPaul E. McKenney 	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
308efbe451dSPaul E. McKenney 
309efbe451dSPaul E. McKenney /*
310efbe451dSPaul E. McKenney  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
311efbe451dSPaul E. McKenney  * structure.  Note that if there is a singleton rcu_node tree with but
312efbe451dSPaul E. McKenney  * one rcu_node structure, this loop -will- visit the rcu_node structure.
313efbe451dSPaul E. McKenney  * It is still a leaf node, even if it is also the root node.
314efbe451dSPaul E. McKenney  */
315efbe451dSPaul E. McKenney #define rcu_for_each_leaf_node(rsp, rnp) \
316efbe451dSPaul E. McKenney 	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
317efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
318efbe451dSPaul E. McKenney 
319efbe451dSPaul E. McKenney /*
320efbe451dSPaul E. McKenney  * Iterate over all possible CPUs in a leaf RCU node.
321efbe451dSPaul E. McKenney  */
322efbe451dSPaul E. McKenney #define for_each_leaf_node_possible_cpu(rnp, cpu) \
323efbe451dSPaul E. McKenney 	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
324efbe451dSPaul E. McKenney 	     cpu <= rnp->grphi; \
325efbe451dSPaul E. McKenney 	     cpu = cpumask_next((cpu), cpu_possible_mask))
326efbe451dSPaul E. McKenney 
32783d40bd3SPaul E. McKenney /*
32883d40bd3SPaul E. McKenney  * Wrappers for the rcu_node::lock acquire and release.
32983d40bd3SPaul E. McKenney  *
33083d40bd3SPaul E. McKenney  * Because the rcu_nodes form a tree, the tree traversal locking will observe
33183d40bd3SPaul E. McKenney  * different lock values, this in turn means that an UNLOCK of one level
33283d40bd3SPaul E. McKenney  * followed by a LOCK of another level does not imply a full memory barrier;
33383d40bd3SPaul E. McKenney  * and most importantly transitivity is lost.
33483d40bd3SPaul E. McKenney  *
33583d40bd3SPaul E. McKenney  * In order to restore full ordering between tree levels, augment the regular
33683d40bd3SPaul E. McKenney  * lock acquire functions with smp_mb__after_unlock_lock().
33783d40bd3SPaul E. McKenney  *
33883d40bd3SPaul E. McKenney  * As ->lock of struct rcu_node is a __private field, therefore one should use
33983d40bd3SPaul E. McKenney  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
34083d40bd3SPaul E. McKenney  */
34183d40bd3SPaul E. McKenney #define raw_spin_lock_rcu_node(p)					\
34283d40bd3SPaul E. McKenney do {									\
34383d40bd3SPaul E. McKenney 	raw_spin_lock(&ACCESS_PRIVATE(p, lock));			\
34483d40bd3SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
34583d40bd3SPaul E. McKenney } while (0)
34683d40bd3SPaul E. McKenney 
34783d40bd3SPaul E. McKenney #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
34883d40bd3SPaul E. McKenney 
34983d40bd3SPaul E. McKenney #define raw_spin_lock_irq_rcu_node(p)					\
35083d40bd3SPaul E. McKenney do {									\
35183d40bd3SPaul E. McKenney 	raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
35283d40bd3SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
35383d40bd3SPaul E. McKenney } while (0)
35483d40bd3SPaul E. McKenney 
35583d40bd3SPaul E. McKenney #define raw_spin_unlock_irq_rcu_node(p)					\
35683d40bd3SPaul E. McKenney 	raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
35783d40bd3SPaul E. McKenney 
3584e4bea74SPaul E. McKenney #define raw_spin_lock_irqsave_rcu_node(p, flags)			\
35983d40bd3SPaul E. McKenney do {									\
3604e4bea74SPaul E. McKenney 	raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
36183d40bd3SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
36283d40bd3SPaul E. McKenney } while (0)
36383d40bd3SPaul E. McKenney 
3644e4bea74SPaul E. McKenney #define raw_spin_unlock_irqrestore_rcu_node(p, flags)			\
3654e4bea74SPaul E. McKenney 	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)	\
36683d40bd3SPaul E. McKenney 
36783d40bd3SPaul E. McKenney #define raw_spin_trylock_rcu_node(p)					\
36883d40bd3SPaul E. McKenney ({									\
36983d40bd3SPaul E. McKenney 	bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));	\
37083d40bd3SPaul E. McKenney 									\
37183d40bd3SPaul E. McKenney 	if (___locked)							\
37283d40bd3SPaul E. McKenney 		smp_mb__after_unlock_lock();				\
37383d40bd3SPaul E. McKenney 	___locked;							\
37483d40bd3SPaul E. McKenney })
37583d40bd3SPaul E. McKenney 
3762b34c43cSPaul E. McKenney #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
3772b34c43cSPaul E. McKenney 
37825c36329SPaul E. McKenney #ifdef CONFIG_TINY_RCU
37925c36329SPaul E. McKenney /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
3807414fac0SPaul E. McKenney static inline bool rcu_gp_is_normal(void) { return true; }
3817414fac0SPaul E. McKenney static inline bool rcu_gp_is_expedited(void) { return false; }
3827414fac0SPaul E. McKenney static inline void rcu_expedite_gp(void) { }
3837414fac0SPaul E. McKenney static inline void rcu_unexpedite_gp(void) { }
38425c36329SPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
38525c36329SPaul E. McKenney bool rcu_gp_is_normal(void);     /* Internal RCU use. */
38625c36329SPaul E. McKenney bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
38725c36329SPaul E. McKenney void rcu_expedite_gp(void);
38825c36329SPaul E. McKenney void rcu_unexpedite_gp(void);
38925c36329SPaul E. McKenney void rcupdate_announce_bootup_oddness(void);
39025c36329SPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
39125c36329SPaul E. McKenney 
39282118249SPaul E. McKenney #define RCU_SCHEDULER_INACTIVE	0
39382118249SPaul E. McKenney #define RCU_SCHEDULER_INIT	1
39482118249SPaul E. McKenney #define RCU_SCHEDULER_RUNNING	2
39582118249SPaul E. McKenney 
396fe21a27eSPaul E. McKenney #ifdef CONFIG_TINY_RCU
397fe21a27eSPaul E. McKenney static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
398fe21a27eSPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
399fe21a27eSPaul E. McKenney void rcu_request_urgent_qs_task(struct task_struct *t);
400fe21a27eSPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
401fe21a27eSPaul E. McKenney 
402cad7b389SPaul E. McKenney enum rcutorture_type {
403cad7b389SPaul E. McKenney 	RCU_FLAVOR,
404cad7b389SPaul E. McKenney 	RCU_BH_FLAVOR,
405cad7b389SPaul E. McKenney 	RCU_SCHED_FLAVOR,
406cad7b389SPaul E. McKenney 	RCU_TASKS_FLAVOR,
407cad7b389SPaul E. McKenney 	SRCU_FLAVOR,
408cad7b389SPaul E. McKenney 	INVALID_RCU_FLAVOR
409cad7b389SPaul E. McKenney };
410cad7b389SPaul E. McKenney 
411cad7b389SPaul E. McKenney #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
412cad7b389SPaul E. McKenney void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
413cad7b389SPaul E. McKenney 			    unsigned long *gpnum, unsigned long *completed);
414cad7b389SPaul E. McKenney void rcutorture_record_test_transition(void);
415cad7b389SPaul E. McKenney void rcutorture_record_progress(unsigned long vernum);
416cad7b389SPaul E. McKenney void do_trace_rcu_torture_read(const char *rcutorturename,
417cad7b389SPaul E. McKenney 			       struct rcu_head *rhp,
418cad7b389SPaul E. McKenney 			       unsigned long secs,
419cad7b389SPaul E. McKenney 			       unsigned long c_old,
420cad7b389SPaul E. McKenney 			       unsigned long c);
421cad7b389SPaul E. McKenney #else
422cad7b389SPaul E. McKenney static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
423cad7b389SPaul E. McKenney 					  int *flags,
424cad7b389SPaul E. McKenney 					  unsigned long *gpnum,
425cad7b389SPaul E. McKenney 					  unsigned long *completed)
426cad7b389SPaul E. McKenney {
427cad7b389SPaul E. McKenney 	*flags = 0;
428cad7b389SPaul E. McKenney 	*gpnum = 0;
429cad7b389SPaul E. McKenney 	*completed = 0;
430cad7b389SPaul E. McKenney }
4317414fac0SPaul E. McKenney static inline void rcutorture_record_test_transition(void) { }
4327414fac0SPaul E. McKenney static inline void rcutorture_record_progress(unsigned long vernum) { }
433cad7b389SPaul E. McKenney #ifdef CONFIG_RCU_TRACE
434cad7b389SPaul E. McKenney void do_trace_rcu_torture_read(const char *rcutorturename,
435cad7b389SPaul E. McKenney 			       struct rcu_head *rhp,
436cad7b389SPaul E. McKenney 			       unsigned long secs,
437cad7b389SPaul E. McKenney 			       unsigned long c_old,
438cad7b389SPaul E. McKenney 			       unsigned long c);
439cad7b389SPaul E. McKenney #else
440cad7b389SPaul E. McKenney #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
441cad7b389SPaul E. McKenney 	do { } while (0)
442cad7b389SPaul E. McKenney #endif
443cad7b389SPaul E. McKenney #endif
444cad7b389SPaul E. McKenney 
445cad7b389SPaul E. McKenney #ifdef CONFIG_TINY_SRCU
446cad7b389SPaul E. McKenney 
447cad7b389SPaul E. McKenney static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
448cad7b389SPaul E. McKenney 					   struct srcu_struct *sp, int *flags,
449cad7b389SPaul E. McKenney 					   unsigned long *gpnum,
450cad7b389SPaul E. McKenney 					   unsigned long *completed)
451cad7b389SPaul E. McKenney {
452cad7b389SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
453cad7b389SPaul E. McKenney 		return;
454cad7b389SPaul E. McKenney 	*flags = 0;
4552464dd94SPaul E. McKenney 	*completed = sp->srcu_idx;
456cad7b389SPaul E. McKenney 	*gpnum = *completed;
457cad7b389SPaul E. McKenney }
458cad7b389SPaul E. McKenney 
459cad7b389SPaul E. McKenney #elif defined(CONFIG_TREE_SRCU)
460cad7b389SPaul E. McKenney 
461cad7b389SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type,
462cad7b389SPaul E. McKenney 			     struct srcu_struct *sp, int *flags,
463cad7b389SPaul E. McKenney 			     unsigned long *gpnum, unsigned long *completed);
464cad7b389SPaul E. McKenney 
465cad7b389SPaul E. McKenney #endif
466cad7b389SPaul E. McKenney 
467e3c8d51eSPaul E. McKenney #ifdef CONFIG_TINY_RCU
4687414fac0SPaul E. McKenney static inline unsigned long rcu_batches_started(void) { return 0; }
4697414fac0SPaul E. McKenney static inline unsigned long rcu_batches_started_bh(void) { return 0; }
4707414fac0SPaul E. McKenney static inline unsigned long rcu_batches_started_sched(void) { return 0; }
4717414fac0SPaul E. McKenney static inline unsigned long rcu_batches_completed(void) { return 0; }
4727414fac0SPaul E. McKenney static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
4737414fac0SPaul E. McKenney static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
4747414fac0SPaul E. McKenney static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
4757414fac0SPaul E. McKenney static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
4767414fac0SPaul E. McKenney static inline unsigned long
4777414fac0SPaul E. McKenney srcu_batches_completed(struct srcu_struct *sp) { return 0; }
4787414fac0SPaul E. McKenney static inline void rcu_force_quiescent_state(void) { }
4797414fac0SPaul E. McKenney static inline void rcu_bh_force_quiescent_state(void) { }
4807414fac0SPaul E. McKenney static inline void rcu_sched_force_quiescent_state(void) { }
4817414fac0SPaul E. McKenney static inline void show_rcu_gp_kthreads(void) { }
482e3c8d51eSPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
483e3c8d51eSPaul E. McKenney extern unsigned long rcutorture_testseq;
484e3c8d51eSPaul E. McKenney extern unsigned long rcutorture_vernum;
485e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started(void);
486e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started_bh(void);
487e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started_sched(void);
488e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed(void);
489e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed_bh(void);
490e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed_sched(void);
491e3c8d51eSPaul E. McKenney unsigned long rcu_exp_batches_completed(void);
492e3c8d51eSPaul E. McKenney unsigned long rcu_exp_batches_completed_sched(void);
4935a0465e1SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *sp);
494e3c8d51eSPaul E. McKenney void show_rcu_gp_kthreads(void);
495e3c8d51eSPaul E. McKenney void rcu_force_quiescent_state(void);
496e3c8d51eSPaul E. McKenney void rcu_bh_force_quiescent_state(void);
497e3c8d51eSPaul E. McKenney void rcu_sched_force_quiescent_state(void);
498e3c8d51eSPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
499e3c8d51eSPaul E. McKenney 
50044c65ff2SPaul E. McKenney #ifdef CONFIG_RCU_NOCB_CPU
5013d54f798SPaul E. McKenney bool rcu_is_nocb_cpu(int cpu);
5023d54f798SPaul E. McKenney #else
5033d54f798SPaul E. McKenney static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
5043d54f798SPaul E. McKenney #endif
5053d54f798SPaul E. McKenney 
5064102adabSPaul E. McKenney #endif /* __LINUX_RCU_H */
507