xref: /openbmc/linux/kernel/rcu/rcu.h (revision 83d40bd3bc3ab3d6b5a4a331f7667d627948a099)
14102adabSPaul E. McKenney /*
24102adabSPaul E. McKenney  * Read-Copy Update definitions shared among RCU implementations.
34102adabSPaul E. McKenney  *
44102adabSPaul E. McKenney  * This program is free software; you can redistribute it and/or modify
54102adabSPaul E. McKenney  * it under the terms of the GNU General Public License as published by
64102adabSPaul E. McKenney  * the Free Software Foundation; either version 2 of the License, or
74102adabSPaul E. McKenney  * (at your option) any later version.
84102adabSPaul E. McKenney  *
94102adabSPaul E. McKenney  * This program is distributed in the hope that it will be useful,
104102adabSPaul E. McKenney  * but WITHOUT ANY WARRANTY; without even the implied warranty of
114102adabSPaul E. McKenney  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
124102adabSPaul E. McKenney  * GNU General Public License for more details.
134102adabSPaul E. McKenney  *
144102adabSPaul E. McKenney  * You should have received a copy of the GNU General Public License
1587de1cfdSPaul E. McKenney  * along with this program; if not, you can access it online at
1687de1cfdSPaul E. McKenney  * http://www.gnu.org/licenses/gpl-2.0.html.
174102adabSPaul E. McKenney  *
184102adabSPaul E. McKenney  * Copyright IBM Corporation, 2011
194102adabSPaul E. McKenney  *
204102adabSPaul E. McKenney  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
214102adabSPaul E. McKenney  */
224102adabSPaul E. McKenney 
234102adabSPaul E. McKenney #ifndef __LINUX_RCU_H
244102adabSPaul E. McKenney #define __LINUX_RCU_H
254102adabSPaul E. McKenney 
265cb5c6e1SPaul Gortmaker #include <trace/events/rcu.h>
274102adabSPaul E. McKenney #ifdef CONFIG_RCU_TRACE
284102adabSPaul E. McKenney #define RCU_TRACE(stmt) stmt
294102adabSPaul E. McKenney #else /* #ifdef CONFIG_RCU_TRACE */
304102adabSPaul E. McKenney #define RCU_TRACE(stmt)
314102adabSPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_TRACE */
324102adabSPaul E. McKenney 
334102adabSPaul E. McKenney /*
344102adabSPaul E. McKenney  * Process-level increment to ->dynticks_nesting field.  This allows for
354102adabSPaul E. McKenney  * architectures that use half-interrupts and half-exceptions from
364102adabSPaul E. McKenney  * process context.
374102adabSPaul E. McKenney  *
384102adabSPaul E. McKenney  * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
394102adabSPaul E. McKenney  * that counts the number of process-based reasons why RCU cannot
404102adabSPaul E. McKenney  * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
414102adabSPaul E. McKenney  * is the value used to increment or decrement this field.
424102adabSPaul E. McKenney  *
434102adabSPaul E. McKenney  * The rest of the bits could in principle be used to count interrupts,
444102adabSPaul E. McKenney  * but this would mean that a negative-one value in the interrupt
454102adabSPaul E. McKenney  * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
464102adabSPaul E. McKenney  * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
474102adabSPaul E. McKenney  * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
484102adabSPaul E. McKenney  * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
494102adabSPaul E. McKenney  * initial exit from idle.
504102adabSPaul E. McKenney  */
514102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_WIDTH 7
524102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
534102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
544102adabSPaul E. McKenney #define DYNTICK_TASK_FLAG	   ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
554102adabSPaul E. McKenney #define DYNTICK_TASK_MASK	   ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
564102adabSPaul E. McKenney #define DYNTICK_TASK_EXIT_IDLE	   (DYNTICK_TASK_NEST_VALUE + \
574102adabSPaul E. McKenney 				    DYNTICK_TASK_FLAG)
584102adabSPaul E. McKenney 
592e8c28c2SPaul E. McKenney 
602e8c28c2SPaul E. McKenney /*
612e8c28c2SPaul E. McKenney  * Grace-period counter management.
622e8c28c2SPaul E. McKenney  */
632e8c28c2SPaul E. McKenney 
64f1ec57a4SPaul E. McKenney #define RCU_SEQ_CTR_SHIFT	2
65031aeee0SPaul E. McKenney #define RCU_SEQ_STATE_MASK	((1 << RCU_SEQ_CTR_SHIFT) - 1)
66031aeee0SPaul E. McKenney 
67031aeee0SPaul E. McKenney /*
68031aeee0SPaul E. McKenney  * Return the counter portion of a sequence number previously returned
69031aeee0SPaul E. McKenney  * by rcu_seq_snap() or rcu_seq_current().
70031aeee0SPaul E. McKenney  */
71031aeee0SPaul E. McKenney static inline unsigned long rcu_seq_ctr(unsigned long s)
72031aeee0SPaul E. McKenney {
73031aeee0SPaul E. McKenney 	return s >> RCU_SEQ_CTR_SHIFT;
74031aeee0SPaul E. McKenney }
75031aeee0SPaul E. McKenney 
76031aeee0SPaul E. McKenney /*
77031aeee0SPaul E. McKenney  * Return the state portion of a sequence number previously returned
78031aeee0SPaul E. McKenney  * by rcu_seq_snap() or rcu_seq_current().
79031aeee0SPaul E. McKenney  */
80031aeee0SPaul E. McKenney static inline int rcu_seq_state(unsigned long s)
81031aeee0SPaul E. McKenney {
82031aeee0SPaul E. McKenney 	return s & RCU_SEQ_STATE_MASK;
83031aeee0SPaul E. McKenney }
84031aeee0SPaul E. McKenney 
8580a7956fSPaul E. McKenney /*
8680a7956fSPaul E. McKenney  * Set the state portion of the pointed-to sequence number.
8780a7956fSPaul E. McKenney  * The caller is responsible for preventing conflicting updates.
8880a7956fSPaul E. McKenney  */
8980a7956fSPaul E. McKenney static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
9080a7956fSPaul E. McKenney {
9180a7956fSPaul E. McKenney 	WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
9280a7956fSPaul E. McKenney 	WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
9380a7956fSPaul E. McKenney }
9480a7956fSPaul E. McKenney 
952e8c28c2SPaul E. McKenney /* Adjust sequence number for start of update-side operation. */
962e8c28c2SPaul E. McKenney static inline void rcu_seq_start(unsigned long *sp)
972e8c28c2SPaul E. McKenney {
982e8c28c2SPaul E. McKenney 	WRITE_ONCE(*sp, *sp + 1);
992e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation after counter increment. */
100031aeee0SPaul E. McKenney 	WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
1012e8c28c2SPaul E. McKenney }
1022e8c28c2SPaul E. McKenney 
1032e8c28c2SPaul E. McKenney /* Adjust sequence number for end of update-side operation. */
1042e8c28c2SPaul E. McKenney static inline void rcu_seq_end(unsigned long *sp)
1052e8c28c2SPaul E. McKenney {
1062e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation before counter increment. */
107031aeee0SPaul E. McKenney 	WARN_ON_ONCE(!rcu_seq_state(*sp));
108031aeee0SPaul E. McKenney 	WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
1092e8c28c2SPaul E. McKenney }
1102e8c28c2SPaul E. McKenney 
1112e8c28c2SPaul E. McKenney /* Take a snapshot of the update side's sequence number. */
1122e8c28c2SPaul E. McKenney static inline unsigned long rcu_seq_snap(unsigned long *sp)
1132e8c28c2SPaul E. McKenney {
1142e8c28c2SPaul E. McKenney 	unsigned long s;
1152e8c28c2SPaul E. McKenney 
116031aeee0SPaul E. McKenney 	s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
1172e8c28c2SPaul E. McKenney 	smp_mb(); /* Above access must not bleed into critical section. */
1182e8c28c2SPaul E. McKenney 	return s;
1192e8c28c2SPaul E. McKenney }
1202e8c28c2SPaul E. McKenney 
1218660b7d8SPaul E. McKenney /* Return the current value the update side's sequence number, no ordering. */
1228660b7d8SPaul E. McKenney static inline unsigned long rcu_seq_current(unsigned long *sp)
1238660b7d8SPaul E. McKenney {
1248660b7d8SPaul E. McKenney 	return READ_ONCE(*sp);
1258660b7d8SPaul E. McKenney }
1268660b7d8SPaul E. McKenney 
1272e8c28c2SPaul E. McKenney /*
1282e8c28c2SPaul E. McKenney  * Given a snapshot from rcu_seq_snap(), determine whether or not a
1292e8c28c2SPaul E. McKenney  * full update-side operation has occurred.
1302e8c28c2SPaul E. McKenney  */
1312e8c28c2SPaul E. McKenney static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
1322e8c28c2SPaul E. McKenney {
1332e8c28c2SPaul E. McKenney 	return ULONG_CMP_GE(READ_ONCE(*sp), s);
1342e8c28c2SPaul E. McKenney }
1352e8c28c2SPaul E. McKenney 
1364102adabSPaul E. McKenney /*
1374102adabSPaul E. McKenney  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
1384102adabSPaul E. McKenney  * by call_rcu() and rcu callback execution, and are therefore not part of the
1394102adabSPaul E. McKenney  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
1404102adabSPaul E. McKenney  */
1414102adabSPaul E. McKenney 
1424102adabSPaul E. McKenney #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1434102adabSPaul E. McKenney # define STATE_RCU_HEAD_READY	0
1444102adabSPaul E. McKenney # define STATE_RCU_HEAD_QUEUED	1
1454102adabSPaul E. McKenney 
1464102adabSPaul E. McKenney extern struct debug_obj_descr rcuhead_debug_descr;
1474102adabSPaul E. McKenney 
1484102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1494102adabSPaul E. McKenney {
1504102adabSPaul E. McKenney 	int r1;
1514102adabSPaul E. McKenney 
1524102adabSPaul E. McKenney 	r1 = debug_object_activate(head, &rcuhead_debug_descr);
1534102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1544102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY,
1554102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED);
1564102adabSPaul E. McKenney 	return r1;
1574102adabSPaul E. McKenney }
1584102adabSPaul E. McKenney 
1594102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1604102adabSPaul E. McKenney {
1614102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1624102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED,
1634102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY);
1644102adabSPaul E. McKenney 	debug_object_deactivate(head, &rcuhead_debug_descr);
1654102adabSPaul E. McKenney }
1664102adabSPaul E. McKenney #else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1674102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1684102adabSPaul E. McKenney {
1694102adabSPaul E. McKenney 	return 0;
1704102adabSPaul E. McKenney }
1714102adabSPaul E. McKenney 
1724102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1734102adabSPaul E. McKenney {
1744102adabSPaul E. McKenney }
1754102adabSPaul E. McKenney #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1764102adabSPaul E. McKenney 
177bd73a7f5STeodora Baluta void kfree(const void *);
1784102adabSPaul E. McKenney 
179406e3e53SPaul E. McKenney /*
180406e3e53SPaul E. McKenney  * Reclaim the specified callback, either by invoking it (non-lazy case)
181406e3e53SPaul E. McKenney  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
182406e3e53SPaul E. McKenney  */
1834102adabSPaul E. McKenney static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
1844102adabSPaul E. McKenney {
1854102adabSPaul E. McKenney 	unsigned long offset = (unsigned long)head->func;
1864102adabSPaul E. McKenney 
18724ef659aSPaul E. McKenney 	rcu_lock_acquire(&rcu_callback_map);
1884102adabSPaul E. McKenney 	if (__is_kfree_rcu_offset(offset)) {
189dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
1904102adabSPaul E. McKenney 		kfree((void *)head - offset);
19124ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
192406e3e53SPaul E. McKenney 		return true;
1934102adabSPaul E. McKenney 	} else {
194dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
1954102adabSPaul E. McKenney 		head->func(head);
19624ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
197406e3e53SPaul E. McKenney 		return false;
1984102adabSPaul E. McKenney 	}
1994102adabSPaul E. McKenney }
2004102adabSPaul E. McKenney 
2014102adabSPaul E. McKenney #ifdef CONFIG_RCU_STALL_COMMON
2024102adabSPaul E. McKenney 
2034102adabSPaul E. McKenney extern int rcu_cpu_stall_suppress;
2044102adabSPaul E. McKenney int rcu_jiffies_till_stall_check(void);
2054102adabSPaul E. McKenney 
2064102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
2074102adabSPaul E. McKenney 
2084102adabSPaul E. McKenney /*
2094102adabSPaul E. McKenney  * Strings used in tracepoints need to be exported via the
2104102adabSPaul E. McKenney  * tracing system such that tools like perf and trace-cmd can
2114102adabSPaul E. McKenney  * translate the string address pointers to actual text.
2124102adabSPaul E. McKenney  */
2134102adabSPaul E. McKenney #define TPS(x)  tracepoint_string(x)
2144102adabSPaul E. McKenney 
215b8989b76SPaul E. McKenney /*
216b8989b76SPaul E. McKenney  * Dump the ftrace buffer, but only one time per callsite per boot.
217b8989b76SPaul E. McKenney  */
218b8989b76SPaul E. McKenney #define rcu_ftrace_dump(oops_dump_mode) \
219b8989b76SPaul E. McKenney do { \
220b8989b76SPaul E. McKenney 	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
221b8989b76SPaul E. McKenney 	\
222b8989b76SPaul E. McKenney 	if (!atomic_read(&___rfd_beenhere) && \
223b8989b76SPaul E. McKenney 	    !atomic_xchg(&___rfd_beenhere, 1)) \
224b8989b76SPaul E. McKenney 		ftrace_dump(oops_dump_mode); \
225b8989b76SPaul E. McKenney } while (0)
226b8989b76SPaul E. McKenney 
227aa23c6fbSPranith Kumar void rcu_early_boot_tests(void);
22852d7e48bSPaul E. McKenney void rcu_test_sync_prims(void);
229aa23c6fbSPranith Kumar 
2305f6130faSLai Jiangshan /*
2315f6130faSLai Jiangshan  * This function really isn't for public consumption, but RCU is special in
2325f6130faSLai Jiangshan  * that context switches can allow the state machine to make progress.
2335f6130faSLai Jiangshan  */
2345f6130faSLai Jiangshan extern void resched_cpu(int cpu);
2355f6130faSLai Jiangshan 
2362b34c43cSPaul E. McKenney #if defined(SRCU) || !defined(TINY_RCU)
2372b34c43cSPaul E. McKenney 
2382b34c43cSPaul E. McKenney #include <linux/rcu_node_tree.h>
2392b34c43cSPaul E. McKenney 
2402b34c43cSPaul E. McKenney extern int rcu_num_lvls;
241e95d68d2SPaul E. McKenney extern int num_rcu_lvl[];
2422b34c43cSPaul E. McKenney extern int rcu_num_nodes;
2432b34c43cSPaul E. McKenney static bool rcu_fanout_exact;
2442b34c43cSPaul E. McKenney static int rcu_fanout_leaf;
2452b34c43cSPaul E. McKenney 
2462b34c43cSPaul E. McKenney /*
2472b34c43cSPaul E. McKenney  * Compute the per-level fanout, either using the exact fanout specified
2482b34c43cSPaul E. McKenney  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
2492b34c43cSPaul E. McKenney  */
2502b34c43cSPaul E. McKenney static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
2512b34c43cSPaul E. McKenney {
2522b34c43cSPaul E. McKenney 	int i;
2532b34c43cSPaul E. McKenney 
2542b34c43cSPaul E. McKenney 	if (rcu_fanout_exact) {
2552b34c43cSPaul E. McKenney 		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
2562b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 2; i >= 0; i--)
2572b34c43cSPaul E. McKenney 			levelspread[i] = RCU_FANOUT;
2582b34c43cSPaul E. McKenney 	} else {
2592b34c43cSPaul E. McKenney 		int ccur;
2602b34c43cSPaul E. McKenney 		int cprv;
2612b34c43cSPaul E. McKenney 
2622b34c43cSPaul E. McKenney 		cprv = nr_cpu_ids;
2632b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
2642b34c43cSPaul E. McKenney 			ccur = levelcnt[i];
2652b34c43cSPaul E. McKenney 			levelspread[i] = (cprv + ccur - 1) / ccur;
2662b34c43cSPaul E. McKenney 			cprv = ccur;
2672b34c43cSPaul E. McKenney 		}
2682b34c43cSPaul E. McKenney 	}
2692b34c43cSPaul E. McKenney }
2702b34c43cSPaul E. McKenney 
271efbe451dSPaul E. McKenney /*
272efbe451dSPaul E. McKenney  * Do a full breadth-first scan of the rcu_node structures for the
273efbe451dSPaul E. McKenney  * specified rcu_state structure.
274efbe451dSPaul E. McKenney  */
275efbe451dSPaul E. McKenney #define rcu_for_each_node_breadth_first(rsp, rnp) \
276efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
277efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
278efbe451dSPaul E. McKenney 
279efbe451dSPaul E. McKenney /*
280efbe451dSPaul E. McKenney  * Do a breadth-first scan of the non-leaf rcu_node structures for the
281efbe451dSPaul E. McKenney  * specified rcu_state structure.  Note that if there is a singleton
282efbe451dSPaul E. McKenney  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
283efbe451dSPaul E. McKenney  */
284efbe451dSPaul E. McKenney #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
285efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
286efbe451dSPaul E. McKenney 	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
287efbe451dSPaul E. McKenney 
288efbe451dSPaul E. McKenney /*
289efbe451dSPaul E. McKenney  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
290efbe451dSPaul E. McKenney  * structure.  Note that if there is a singleton rcu_node tree with but
291efbe451dSPaul E. McKenney  * one rcu_node structure, this loop -will- visit the rcu_node structure.
292efbe451dSPaul E. McKenney  * It is still a leaf node, even if it is also the root node.
293efbe451dSPaul E. McKenney  */
294efbe451dSPaul E. McKenney #define rcu_for_each_leaf_node(rsp, rnp) \
295efbe451dSPaul E. McKenney 	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
296efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
297efbe451dSPaul E. McKenney 
298efbe451dSPaul E. McKenney /*
299efbe451dSPaul E. McKenney  * Iterate over all possible CPUs in a leaf RCU node.
300efbe451dSPaul E. McKenney  */
301efbe451dSPaul E. McKenney #define for_each_leaf_node_possible_cpu(rnp, cpu) \
302efbe451dSPaul E. McKenney 	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
303efbe451dSPaul E. McKenney 	     cpu <= rnp->grphi; \
304efbe451dSPaul E. McKenney 	     cpu = cpumask_next((cpu), cpu_possible_mask))
305efbe451dSPaul E. McKenney 
306*83d40bd3SPaul E. McKenney /*
307*83d40bd3SPaul E. McKenney  * Wrappers for the rcu_node::lock acquire and release.
308*83d40bd3SPaul E. McKenney  *
309*83d40bd3SPaul E. McKenney  * Because the rcu_nodes form a tree, the tree traversal locking will observe
310*83d40bd3SPaul E. McKenney  * different lock values, this in turn means that an UNLOCK of one level
311*83d40bd3SPaul E. McKenney  * followed by a LOCK of another level does not imply a full memory barrier;
312*83d40bd3SPaul E. McKenney  * and most importantly transitivity is lost.
313*83d40bd3SPaul E. McKenney  *
314*83d40bd3SPaul E. McKenney  * In order to restore full ordering between tree levels, augment the regular
315*83d40bd3SPaul E. McKenney  * lock acquire functions with smp_mb__after_unlock_lock().
316*83d40bd3SPaul E. McKenney  *
317*83d40bd3SPaul E. McKenney  * As ->lock of struct rcu_node is a __private field, therefore one should use
318*83d40bd3SPaul E. McKenney  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
319*83d40bd3SPaul E. McKenney  */
320*83d40bd3SPaul E. McKenney #define raw_spin_lock_rcu_node(p)					\
321*83d40bd3SPaul E. McKenney do {									\
322*83d40bd3SPaul E. McKenney 	raw_spin_lock(&ACCESS_PRIVATE(p, lock));			\
323*83d40bd3SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
324*83d40bd3SPaul E. McKenney } while (0)
325*83d40bd3SPaul E. McKenney 
326*83d40bd3SPaul E. McKenney #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
327*83d40bd3SPaul E. McKenney 
328*83d40bd3SPaul E. McKenney #define raw_spin_lock_irq_rcu_node(p)					\
329*83d40bd3SPaul E. McKenney do {									\
330*83d40bd3SPaul E. McKenney 	raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
331*83d40bd3SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
332*83d40bd3SPaul E. McKenney } while (0)
333*83d40bd3SPaul E. McKenney 
334*83d40bd3SPaul E. McKenney #define raw_spin_unlock_irq_rcu_node(p)					\
335*83d40bd3SPaul E. McKenney 	raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
336*83d40bd3SPaul E. McKenney 
337*83d40bd3SPaul E. McKenney #define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
338*83d40bd3SPaul E. McKenney do {									\
339*83d40bd3SPaul E. McKenney 	typecheck(unsigned long, flags);				\
340*83d40bd3SPaul E. McKenney 	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
341*83d40bd3SPaul E. McKenney 	smp_mb__after_unlock_lock();					\
342*83d40bd3SPaul E. McKenney } while (0)
343*83d40bd3SPaul E. McKenney 
344*83d40bd3SPaul E. McKenney #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
345*83d40bd3SPaul E. McKenney do {									\
346*83d40bd3SPaul E. McKenney 	typecheck(unsigned long, flags);				\
347*83d40bd3SPaul E. McKenney 	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
348*83d40bd3SPaul E. McKenney } while (0)
349*83d40bd3SPaul E. McKenney 
350*83d40bd3SPaul E. McKenney #define raw_spin_trylock_rcu_node(p)					\
351*83d40bd3SPaul E. McKenney ({									\
352*83d40bd3SPaul E. McKenney 	bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));	\
353*83d40bd3SPaul E. McKenney 									\
354*83d40bd3SPaul E. McKenney 	if (___locked)							\
355*83d40bd3SPaul E. McKenney 		smp_mb__after_unlock_lock();				\
356*83d40bd3SPaul E. McKenney 	___locked;							\
357*83d40bd3SPaul E. McKenney })
358*83d40bd3SPaul E. McKenney 
3592b34c43cSPaul E. McKenney #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
3602b34c43cSPaul E. McKenney 
36125c36329SPaul E. McKenney #ifdef CONFIG_TINY_RCU
36225c36329SPaul E. McKenney /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
36325c36329SPaul E. McKenney static inline bool rcu_gp_is_normal(void)  /* Internal RCU use. */
36425c36329SPaul E. McKenney {
36525c36329SPaul E. McKenney 	return true;
36625c36329SPaul E. McKenney }
36725c36329SPaul E. McKenney static inline bool rcu_gp_is_expedited(void)  /* Internal RCU use. */
36825c36329SPaul E. McKenney {
36925c36329SPaul E. McKenney 	return false;
37025c36329SPaul E. McKenney }
37125c36329SPaul E. McKenney 
37225c36329SPaul E. McKenney static inline void rcu_expedite_gp(void)
37325c36329SPaul E. McKenney {
37425c36329SPaul E. McKenney }
37525c36329SPaul E. McKenney 
37625c36329SPaul E. McKenney static inline void rcu_unexpedite_gp(void)
37725c36329SPaul E. McKenney {
37825c36329SPaul E. McKenney }
37925c36329SPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
38025c36329SPaul E. McKenney bool rcu_gp_is_normal(void);     /* Internal RCU use. */
38125c36329SPaul E. McKenney bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
38225c36329SPaul E. McKenney void rcu_expedite_gp(void);
38325c36329SPaul E. McKenney void rcu_unexpedite_gp(void);
38425c36329SPaul E. McKenney void rcupdate_announce_bootup_oddness(void);
38525c36329SPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
38625c36329SPaul E. McKenney 
38782118249SPaul E. McKenney #define RCU_SCHEDULER_INACTIVE	0
38882118249SPaul E. McKenney #define RCU_SCHEDULER_INIT	1
38982118249SPaul E. McKenney #define RCU_SCHEDULER_RUNNING	2
39082118249SPaul E. McKenney 
391fe21a27eSPaul E. McKenney #ifdef CONFIG_TINY_RCU
392fe21a27eSPaul E. McKenney static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
393fe21a27eSPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
394fe21a27eSPaul E. McKenney void rcu_request_urgent_qs_task(struct task_struct *t);
395fe21a27eSPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
396fe21a27eSPaul E. McKenney 
397cad7b389SPaul E. McKenney enum rcutorture_type {
398cad7b389SPaul E. McKenney 	RCU_FLAVOR,
399cad7b389SPaul E. McKenney 	RCU_BH_FLAVOR,
400cad7b389SPaul E. McKenney 	RCU_SCHED_FLAVOR,
401cad7b389SPaul E. McKenney 	RCU_TASKS_FLAVOR,
402cad7b389SPaul E. McKenney 	SRCU_FLAVOR,
403cad7b389SPaul E. McKenney 	INVALID_RCU_FLAVOR
404cad7b389SPaul E. McKenney };
405cad7b389SPaul E. McKenney 
406cad7b389SPaul E. McKenney #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
407cad7b389SPaul E. McKenney void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
408cad7b389SPaul E. McKenney 			    unsigned long *gpnum, unsigned long *completed);
409cad7b389SPaul E. McKenney void rcutorture_record_test_transition(void);
410cad7b389SPaul E. McKenney void rcutorture_record_progress(unsigned long vernum);
411cad7b389SPaul E. McKenney void do_trace_rcu_torture_read(const char *rcutorturename,
412cad7b389SPaul E. McKenney 			       struct rcu_head *rhp,
413cad7b389SPaul E. McKenney 			       unsigned long secs,
414cad7b389SPaul E. McKenney 			       unsigned long c_old,
415cad7b389SPaul E. McKenney 			       unsigned long c);
416cad7b389SPaul E. McKenney #else
417cad7b389SPaul E. McKenney static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
418cad7b389SPaul E. McKenney 					  int *flags,
419cad7b389SPaul E. McKenney 					  unsigned long *gpnum,
420cad7b389SPaul E. McKenney 					  unsigned long *completed)
421cad7b389SPaul E. McKenney {
422cad7b389SPaul E. McKenney 	*flags = 0;
423cad7b389SPaul E. McKenney 	*gpnum = 0;
424cad7b389SPaul E. McKenney 	*completed = 0;
425cad7b389SPaul E. McKenney }
426cad7b389SPaul E. McKenney static inline void rcutorture_record_test_transition(void)
427cad7b389SPaul E. McKenney {
428cad7b389SPaul E. McKenney }
429cad7b389SPaul E. McKenney static inline void rcutorture_record_progress(unsigned long vernum)
430cad7b389SPaul E. McKenney {
431cad7b389SPaul E. McKenney }
432cad7b389SPaul E. McKenney #ifdef CONFIG_RCU_TRACE
433cad7b389SPaul E. McKenney void do_trace_rcu_torture_read(const char *rcutorturename,
434cad7b389SPaul E. McKenney 			       struct rcu_head *rhp,
435cad7b389SPaul E. McKenney 			       unsigned long secs,
436cad7b389SPaul E. McKenney 			       unsigned long c_old,
437cad7b389SPaul E. McKenney 			       unsigned long c);
438cad7b389SPaul E. McKenney #else
439cad7b389SPaul E. McKenney #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
440cad7b389SPaul E. McKenney 	do { } while (0)
441cad7b389SPaul E. McKenney #endif
442cad7b389SPaul E. McKenney #endif
443cad7b389SPaul E. McKenney 
444cad7b389SPaul E. McKenney #ifdef CONFIG_TINY_SRCU
445cad7b389SPaul E. McKenney 
446cad7b389SPaul E. McKenney static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
447cad7b389SPaul E. McKenney 					   struct srcu_struct *sp, int *flags,
448cad7b389SPaul E. McKenney 					   unsigned long *gpnum,
449cad7b389SPaul E. McKenney 					   unsigned long *completed)
450cad7b389SPaul E. McKenney {
451cad7b389SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
452cad7b389SPaul E. McKenney 		return;
453cad7b389SPaul E. McKenney 	*flags = 0;
4542464dd94SPaul E. McKenney 	*completed = sp->srcu_idx;
455cad7b389SPaul E. McKenney 	*gpnum = *completed;
456cad7b389SPaul E. McKenney }
457cad7b389SPaul E. McKenney 
458cad7b389SPaul E. McKenney #elif defined(CONFIG_TREE_SRCU)
459cad7b389SPaul E. McKenney 
460cad7b389SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type,
461cad7b389SPaul E. McKenney 			     struct srcu_struct *sp, int *flags,
462cad7b389SPaul E. McKenney 			     unsigned long *gpnum, unsigned long *completed);
463cad7b389SPaul E. McKenney 
464cad7b389SPaul E. McKenney #elif defined(CONFIG_CLASSIC_SRCU)
465cad7b389SPaul E. McKenney 
466cad7b389SPaul E. McKenney static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
467cad7b389SPaul E. McKenney 					   struct srcu_struct *sp, int *flags,
468cad7b389SPaul E. McKenney 					   unsigned long *gpnum,
469cad7b389SPaul E. McKenney 					   unsigned long *completed)
470cad7b389SPaul E. McKenney {
471cad7b389SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
472cad7b389SPaul E. McKenney 		return;
473cad7b389SPaul E. McKenney 	*flags = 0;
474cad7b389SPaul E. McKenney 	*completed = sp->completed;
475cad7b389SPaul E. McKenney 	*gpnum = *completed;
476cad7b389SPaul E. McKenney 	if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head)
477cad7b389SPaul E. McKenney 		(*gpnum)++;
478cad7b389SPaul E. McKenney }
479cad7b389SPaul E. McKenney 
480cad7b389SPaul E. McKenney #endif
481cad7b389SPaul E. McKenney 
482e3c8d51eSPaul E. McKenney #ifdef CONFIG_TINY_RCU
483e3c8d51eSPaul E. McKenney 
484e3c8d51eSPaul E. McKenney /*
485e3c8d51eSPaul E. McKenney  * Return the number of grace periods started.
486e3c8d51eSPaul E. McKenney  */
487e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_started(void)
488e3c8d51eSPaul E. McKenney {
489e3c8d51eSPaul E. McKenney 	return 0;
490e3c8d51eSPaul E. McKenney }
491e3c8d51eSPaul E. McKenney 
492e3c8d51eSPaul E. McKenney /*
493e3c8d51eSPaul E. McKenney  * Return the number of bottom-half grace periods started.
494e3c8d51eSPaul E. McKenney  */
495e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_started_bh(void)
496e3c8d51eSPaul E. McKenney {
497e3c8d51eSPaul E. McKenney 	return 0;
498e3c8d51eSPaul E. McKenney }
499e3c8d51eSPaul E. McKenney 
500e3c8d51eSPaul E. McKenney /*
501e3c8d51eSPaul E. McKenney  * Return the number of sched grace periods started.
502e3c8d51eSPaul E. McKenney  */
503e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_started_sched(void)
504e3c8d51eSPaul E. McKenney {
505e3c8d51eSPaul E. McKenney 	return 0;
506e3c8d51eSPaul E. McKenney }
507e3c8d51eSPaul E. McKenney 
508e3c8d51eSPaul E. McKenney /*
509e3c8d51eSPaul E. McKenney  * Return the number of grace periods completed.
510e3c8d51eSPaul E. McKenney  */
511e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_completed(void)
512e3c8d51eSPaul E. McKenney {
513e3c8d51eSPaul E. McKenney 	return 0;
514e3c8d51eSPaul E. McKenney }
515e3c8d51eSPaul E. McKenney 
516e3c8d51eSPaul E. McKenney /*
517e3c8d51eSPaul E. McKenney  * Return the number of bottom-half grace periods completed.
518e3c8d51eSPaul E. McKenney  */
519e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_completed_bh(void)
520e3c8d51eSPaul E. McKenney {
521e3c8d51eSPaul E. McKenney 	return 0;
522e3c8d51eSPaul E. McKenney }
523e3c8d51eSPaul E. McKenney 
524e3c8d51eSPaul E. McKenney /*
525e3c8d51eSPaul E. McKenney  * Return the number of sched grace periods completed.
526e3c8d51eSPaul E. McKenney  */
527e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_completed_sched(void)
528e3c8d51eSPaul E. McKenney {
529e3c8d51eSPaul E. McKenney 	return 0;
530e3c8d51eSPaul E. McKenney }
531e3c8d51eSPaul E. McKenney 
532e3c8d51eSPaul E. McKenney /*
533e3c8d51eSPaul E. McKenney  * Return the number of expedited grace periods completed.
534e3c8d51eSPaul E. McKenney  */
535e3c8d51eSPaul E. McKenney static inline unsigned long rcu_exp_batches_completed(void)
536e3c8d51eSPaul E. McKenney {
537e3c8d51eSPaul E. McKenney 	return 0;
538e3c8d51eSPaul E. McKenney }
539e3c8d51eSPaul E. McKenney 
540e3c8d51eSPaul E. McKenney /*
541e3c8d51eSPaul E. McKenney  * Return the number of expedited sched grace periods completed.
542e3c8d51eSPaul E. McKenney  */
543e3c8d51eSPaul E. McKenney static inline unsigned long rcu_exp_batches_completed_sched(void)
544e3c8d51eSPaul E. McKenney {
545e3c8d51eSPaul E. McKenney 	return 0;
546e3c8d51eSPaul E. McKenney }
547e3c8d51eSPaul E. McKenney 
5485a0465e1SPaul E. McKenney static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
5495a0465e1SPaul E. McKenney {
5505a0465e1SPaul E. McKenney 	return 0;
5515a0465e1SPaul E. McKenney }
5525a0465e1SPaul E. McKenney 
553e3c8d51eSPaul E. McKenney static inline void rcu_force_quiescent_state(void)
554e3c8d51eSPaul E. McKenney {
555e3c8d51eSPaul E. McKenney }
556e3c8d51eSPaul E. McKenney 
557e3c8d51eSPaul E. McKenney static inline void rcu_bh_force_quiescent_state(void)
558e3c8d51eSPaul E. McKenney {
559e3c8d51eSPaul E. McKenney }
560e3c8d51eSPaul E. McKenney 
561e3c8d51eSPaul E. McKenney static inline void rcu_sched_force_quiescent_state(void)
562e3c8d51eSPaul E. McKenney {
563e3c8d51eSPaul E. McKenney }
564e3c8d51eSPaul E. McKenney 
565e3c8d51eSPaul E. McKenney static inline void show_rcu_gp_kthreads(void)
566e3c8d51eSPaul E. McKenney {
567e3c8d51eSPaul E. McKenney }
568e3c8d51eSPaul E. McKenney 
569e3c8d51eSPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
570e3c8d51eSPaul E. McKenney extern unsigned long rcutorture_testseq;
571e3c8d51eSPaul E. McKenney extern unsigned long rcutorture_vernum;
572e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started(void);
573e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started_bh(void);
574e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started_sched(void);
575e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed(void);
576e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed_bh(void);
577e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed_sched(void);
578e3c8d51eSPaul E. McKenney unsigned long rcu_exp_batches_completed(void);
579e3c8d51eSPaul E. McKenney unsigned long rcu_exp_batches_completed_sched(void);
5805a0465e1SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *sp);
581e3c8d51eSPaul E. McKenney void show_rcu_gp_kthreads(void);
582e3c8d51eSPaul E. McKenney void rcu_force_quiescent_state(void);
583e3c8d51eSPaul E. McKenney void rcu_bh_force_quiescent_state(void);
584e3c8d51eSPaul E. McKenney void rcu_sched_force_quiescent_state(void);
585e3c8d51eSPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
586e3c8d51eSPaul E. McKenney 
5873d54f798SPaul E. McKenney #if defined(CONFIG_RCU_NOCB_CPU_ALL)
5883d54f798SPaul E. McKenney static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
5893d54f798SPaul E. McKenney #elif defined(CONFIG_RCU_NOCB_CPU)
5903d54f798SPaul E. McKenney bool rcu_is_nocb_cpu(int cpu);
5913d54f798SPaul E. McKenney #else
5923d54f798SPaul E. McKenney static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
5933d54f798SPaul E. McKenney #endif
5943d54f798SPaul E. McKenney 
5954102adabSPaul E. McKenney #endif /* __LINUX_RCU_H */
596