xref: /openbmc/linux/kernel/rcu/rcu.h (revision 2464dd940e23bad227c387a40eec99f7aa02ed96)
14102adabSPaul E. McKenney /*
24102adabSPaul E. McKenney  * Read-Copy Update definitions shared among RCU implementations.
34102adabSPaul E. McKenney  *
44102adabSPaul E. McKenney  * This program is free software; you can redistribute it and/or modify
54102adabSPaul E. McKenney  * it under the terms of the GNU General Public License as published by
64102adabSPaul E. McKenney  * the Free Software Foundation; either version 2 of the License, or
74102adabSPaul E. McKenney  * (at your option) any later version.
84102adabSPaul E. McKenney  *
94102adabSPaul E. McKenney  * This program is distributed in the hope that it will be useful,
104102adabSPaul E. McKenney  * but WITHOUT ANY WARRANTY; without even the implied warranty of
114102adabSPaul E. McKenney  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
124102adabSPaul E. McKenney  * GNU General Public License for more details.
134102adabSPaul E. McKenney  *
144102adabSPaul E. McKenney  * You should have received a copy of the GNU General Public License
1587de1cfdSPaul E. McKenney  * along with this program; if not, you can access it online at
1687de1cfdSPaul E. McKenney  * http://www.gnu.org/licenses/gpl-2.0.html.
174102adabSPaul E. McKenney  *
184102adabSPaul E. McKenney  * Copyright IBM Corporation, 2011
194102adabSPaul E. McKenney  *
204102adabSPaul E. McKenney  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
214102adabSPaul E. McKenney  */
224102adabSPaul E. McKenney 
234102adabSPaul E. McKenney #ifndef __LINUX_RCU_H
244102adabSPaul E. McKenney #define __LINUX_RCU_H
254102adabSPaul E. McKenney 
265cb5c6e1SPaul Gortmaker #include <trace/events/rcu.h>
274102adabSPaul E. McKenney #ifdef CONFIG_RCU_TRACE
284102adabSPaul E. McKenney #define RCU_TRACE(stmt) stmt
294102adabSPaul E. McKenney #else /* #ifdef CONFIG_RCU_TRACE */
304102adabSPaul E. McKenney #define RCU_TRACE(stmt)
314102adabSPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_TRACE */
324102adabSPaul E. McKenney 
334102adabSPaul E. McKenney /*
344102adabSPaul E. McKenney  * Process-level increment to ->dynticks_nesting field.  This allows for
354102adabSPaul E. McKenney  * architectures that use half-interrupts and half-exceptions from
364102adabSPaul E. McKenney  * process context.
374102adabSPaul E. McKenney  *
384102adabSPaul E. McKenney  * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
394102adabSPaul E. McKenney  * that counts the number of process-based reasons why RCU cannot
404102adabSPaul E. McKenney  * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
414102adabSPaul E. McKenney  * is the value used to increment or decrement this field.
424102adabSPaul E. McKenney  *
434102adabSPaul E. McKenney  * The rest of the bits could in principle be used to count interrupts,
444102adabSPaul E. McKenney  * but this would mean that a negative-one value in the interrupt
454102adabSPaul E. McKenney  * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
464102adabSPaul E. McKenney  * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
474102adabSPaul E. McKenney  * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
484102adabSPaul E. McKenney  * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
494102adabSPaul E. McKenney  * initial exit from idle.
504102adabSPaul E. McKenney  */
514102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_WIDTH 7
524102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
534102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
544102adabSPaul E. McKenney #define DYNTICK_TASK_FLAG	   ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
554102adabSPaul E. McKenney #define DYNTICK_TASK_MASK	   ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
564102adabSPaul E. McKenney #define DYNTICK_TASK_EXIT_IDLE	   (DYNTICK_TASK_NEST_VALUE + \
574102adabSPaul E. McKenney 				    DYNTICK_TASK_FLAG)
584102adabSPaul E. McKenney 
592e8c28c2SPaul E. McKenney 
602e8c28c2SPaul E. McKenney /*
612e8c28c2SPaul E. McKenney  * Grace-period counter management.
622e8c28c2SPaul E. McKenney  */
632e8c28c2SPaul E. McKenney 
64f1ec57a4SPaul E. McKenney #define RCU_SEQ_CTR_SHIFT	2
65031aeee0SPaul E. McKenney #define RCU_SEQ_STATE_MASK	((1 << RCU_SEQ_CTR_SHIFT) - 1)
66031aeee0SPaul E. McKenney 
67031aeee0SPaul E. McKenney /*
68031aeee0SPaul E. McKenney  * Return the counter portion of a sequence number previously returned
69031aeee0SPaul E. McKenney  * by rcu_seq_snap() or rcu_seq_current().
70031aeee0SPaul E. McKenney  */
71031aeee0SPaul E. McKenney static inline unsigned long rcu_seq_ctr(unsigned long s)
72031aeee0SPaul E. McKenney {
73031aeee0SPaul E. McKenney 	return s >> RCU_SEQ_CTR_SHIFT;
74031aeee0SPaul E. McKenney }
75031aeee0SPaul E. McKenney 
76031aeee0SPaul E. McKenney /*
77031aeee0SPaul E. McKenney  * Return the state portion of a sequence number previously returned
78031aeee0SPaul E. McKenney  * by rcu_seq_snap() or rcu_seq_current().
79031aeee0SPaul E. McKenney  */
80031aeee0SPaul E. McKenney static inline int rcu_seq_state(unsigned long s)
81031aeee0SPaul E. McKenney {
82031aeee0SPaul E. McKenney 	return s & RCU_SEQ_STATE_MASK;
83031aeee0SPaul E. McKenney }
84031aeee0SPaul E. McKenney 
8580a7956fSPaul E. McKenney /*
8680a7956fSPaul E. McKenney  * Set the state portion of the pointed-to sequence number.
8780a7956fSPaul E. McKenney  * The caller is responsible for preventing conflicting updates.
8880a7956fSPaul E. McKenney  */
8980a7956fSPaul E. McKenney static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
9080a7956fSPaul E. McKenney {
9180a7956fSPaul E. McKenney 	WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
9280a7956fSPaul E. McKenney 	WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
9380a7956fSPaul E. McKenney }
9480a7956fSPaul E. McKenney 
952e8c28c2SPaul E. McKenney /* Adjust sequence number for start of update-side operation. */
962e8c28c2SPaul E. McKenney static inline void rcu_seq_start(unsigned long *sp)
972e8c28c2SPaul E. McKenney {
982e8c28c2SPaul E. McKenney 	WRITE_ONCE(*sp, *sp + 1);
992e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation after counter increment. */
100031aeee0SPaul E. McKenney 	WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
1012e8c28c2SPaul E. McKenney }
1022e8c28c2SPaul E. McKenney 
1032e8c28c2SPaul E. McKenney /* Adjust sequence number for end of update-side operation. */
1042e8c28c2SPaul E. McKenney static inline void rcu_seq_end(unsigned long *sp)
1052e8c28c2SPaul E. McKenney {
1062e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation before counter increment. */
107031aeee0SPaul E. McKenney 	WARN_ON_ONCE(!rcu_seq_state(*sp));
108031aeee0SPaul E. McKenney 	WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
1092e8c28c2SPaul E. McKenney }
1102e8c28c2SPaul E. McKenney 
1112e8c28c2SPaul E. McKenney /* Take a snapshot of the update side's sequence number. */
1122e8c28c2SPaul E. McKenney static inline unsigned long rcu_seq_snap(unsigned long *sp)
1132e8c28c2SPaul E. McKenney {
1142e8c28c2SPaul E. McKenney 	unsigned long s;
1152e8c28c2SPaul E. McKenney 
116031aeee0SPaul E. McKenney 	s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
1172e8c28c2SPaul E. McKenney 	smp_mb(); /* Above access must not bleed into critical section. */
1182e8c28c2SPaul E. McKenney 	return s;
1192e8c28c2SPaul E. McKenney }
1202e8c28c2SPaul E. McKenney 
1218660b7d8SPaul E. McKenney /* Return the current value the update side's sequence number, no ordering. */
1228660b7d8SPaul E. McKenney static inline unsigned long rcu_seq_current(unsigned long *sp)
1238660b7d8SPaul E. McKenney {
1248660b7d8SPaul E. McKenney 	return READ_ONCE(*sp);
1258660b7d8SPaul E. McKenney }
1268660b7d8SPaul E. McKenney 
1272e8c28c2SPaul E. McKenney /*
1282e8c28c2SPaul E. McKenney  * Given a snapshot from rcu_seq_snap(), determine whether or not a
1292e8c28c2SPaul E. McKenney  * full update-side operation has occurred.
1302e8c28c2SPaul E. McKenney  */
1312e8c28c2SPaul E. McKenney static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
1322e8c28c2SPaul E. McKenney {
1332e8c28c2SPaul E. McKenney 	return ULONG_CMP_GE(READ_ONCE(*sp), s);
1342e8c28c2SPaul E. McKenney }
1352e8c28c2SPaul E. McKenney 
1364102adabSPaul E. McKenney /*
1374102adabSPaul E. McKenney  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
1384102adabSPaul E. McKenney  * by call_rcu() and rcu callback execution, and are therefore not part of the
1394102adabSPaul E. McKenney  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
1404102adabSPaul E. McKenney  */
1414102adabSPaul E. McKenney 
1424102adabSPaul E. McKenney #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1434102adabSPaul E. McKenney # define STATE_RCU_HEAD_READY	0
1444102adabSPaul E. McKenney # define STATE_RCU_HEAD_QUEUED	1
1454102adabSPaul E. McKenney 
1464102adabSPaul E. McKenney extern struct debug_obj_descr rcuhead_debug_descr;
1474102adabSPaul E. McKenney 
1484102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1494102adabSPaul E. McKenney {
1504102adabSPaul E. McKenney 	int r1;
1514102adabSPaul E. McKenney 
1524102adabSPaul E. McKenney 	r1 = debug_object_activate(head, &rcuhead_debug_descr);
1534102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1544102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY,
1554102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED);
1564102adabSPaul E. McKenney 	return r1;
1574102adabSPaul E. McKenney }
1584102adabSPaul E. McKenney 
1594102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1604102adabSPaul E. McKenney {
1614102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1624102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED,
1634102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY);
1644102adabSPaul E. McKenney 	debug_object_deactivate(head, &rcuhead_debug_descr);
1654102adabSPaul E. McKenney }
1664102adabSPaul E. McKenney #else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1674102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1684102adabSPaul E. McKenney {
1694102adabSPaul E. McKenney 	return 0;
1704102adabSPaul E. McKenney }
1714102adabSPaul E. McKenney 
1724102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1734102adabSPaul E. McKenney {
1744102adabSPaul E. McKenney }
1754102adabSPaul E. McKenney #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1764102adabSPaul E. McKenney 
177bd73a7f5STeodora Baluta void kfree(const void *);
1784102adabSPaul E. McKenney 
179406e3e53SPaul E. McKenney /*
180406e3e53SPaul E. McKenney  * Reclaim the specified callback, either by invoking it (non-lazy case)
181406e3e53SPaul E. McKenney  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
182406e3e53SPaul E. McKenney  */
1834102adabSPaul E. McKenney static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
1844102adabSPaul E. McKenney {
1854102adabSPaul E. McKenney 	unsigned long offset = (unsigned long)head->func;
1864102adabSPaul E. McKenney 
18724ef659aSPaul E. McKenney 	rcu_lock_acquire(&rcu_callback_map);
1884102adabSPaul E. McKenney 	if (__is_kfree_rcu_offset(offset)) {
189dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
1904102adabSPaul E. McKenney 		kfree((void *)head - offset);
19124ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
192406e3e53SPaul E. McKenney 		return true;
1934102adabSPaul E. McKenney 	} else {
194dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
1954102adabSPaul E. McKenney 		head->func(head);
19624ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
197406e3e53SPaul E. McKenney 		return false;
1984102adabSPaul E. McKenney 	}
1994102adabSPaul E. McKenney }
2004102adabSPaul E. McKenney 
2014102adabSPaul E. McKenney #ifdef CONFIG_RCU_STALL_COMMON
2024102adabSPaul E. McKenney 
2034102adabSPaul E. McKenney extern int rcu_cpu_stall_suppress;
2044102adabSPaul E. McKenney int rcu_jiffies_till_stall_check(void);
2054102adabSPaul E. McKenney 
2064102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
2074102adabSPaul E. McKenney 
2084102adabSPaul E. McKenney /*
2094102adabSPaul E. McKenney  * Strings used in tracepoints need to be exported via the
2104102adabSPaul E. McKenney  * tracing system such that tools like perf and trace-cmd can
2114102adabSPaul E. McKenney  * translate the string address pointers to actual text.
2124102adabSPaul E. McKenney  */
2134102adabSPaul E. McKenney #define TPS(x)  tracepoint_string(x)
2144102adabSPaul E. McKenney 
215b8989b76SPaul E. McKenney /*
216b8989b76SPaul E. McKenney  * Dump the ftrace buffer, but only one time per callsite per boot.
217b8989b76SPaul E. McKenney  */
218b8989b76SPaul E. McKenney #define rcu_ftrace_dump(oops_dump_mode) \
219b8989b76SPaul E. McKenney do { \
220b8989b76SPaul E. McKenney 	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
221b8989b76SPaul E. McKenney 	\
222b8989b76SPaul E. McKenney 	if (!atomic_read(&___rfd_beenhere) && \
223b8989b76SPaul E. McKenney 	    !atomic_xchg(&___rfd_beenhere, 1)) \
224b8989b76SPaul E. McKenney 		ftrace_dump(oops_dump_mode); \
225b8989b76SPaul E. McKenney } while (0)
226b8989b76SPaul E. McKenney 
227aa23c6fbSPranith Kumar void rcu_early_boot_tests(void);
22852d7e48bSPaul E. McKenney void rcu_test_sync_prims(void);
229aa23c6fbSPranith Kumar 
2305f6130faSLai Jiangshan /*
2315f6130faSLai Jiangshan  * This function really isn't for public consumption, but RCU is special in
2325f6130faSLai Jiangshan  * that context switches can allow the state machine to make progress.
2335f6130faSLai Jiangshan  */
2345f6130faSLai Jiangshan extern void resched_cpu(int cpu);
2355f6130faSLai Jiangshan 
2362b34c43cSPaul E. McKenney #if defined(SRCU) || !defined(TINY_RCU)
2372b34c43cSPaul E. McKenney 
2382b34c43cSPaul E. McKenney #include <linux/rcu_node_tree.h>
2392b34c43cSPaul E. McKenney 
2402b34c43cSPaul E. McKenney extern int rcu_num_lvls;
241e95d68d2SPaul E. McKenney extern int num_rcu_lvl[];
2422b34c43cSPaul E. McKenney extern int rcu_num_nodes;
2432b34c43cSPaul E. McKenney static bool rcu_fanout_exact;
2442b34c43cSPaul E. McKenney static int rcu_fanout_leaf;
2452b34c43cSPaul E. McKenney 
2462b34c43cSPaul E. McKenney /*
2472b34c43cSPaul E. McKenney  * Compute the per-level fanout, either using the exact fanout specified
2482b34c43cSPaul E. McKenney  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
2492b34c43cSPaul E. McKenney  */
2502b34c43cSPaul E. McKenney static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
2512b34c43cSPaul E. McKenney {
2522b34c43cSPaul E. McKenney 	int i;
2532b34c43cSPaul E. McKenney 
2542b34c43cSPaul E. McKenney 	if (rcu_fanout_exact) {
2552b34c43cSPaul E. McKenney 		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
2562b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 2; i >= 0; i--)
2572b34c43cSPaul E. McKenney 			levelspread[i] = RCU_FANOUT;
2582b34c43cSPaul E. McKenney 	} else {
2592b34c43cSPaul E. McKenney 		int ccur;
2602b34c43cSPaul E. McKenney 		int cprv;
2612b34c43cSPaul E. McKenney 
2622b34c43cSPaul E. McKenney 		cprv = nr_cpu_ids;
2632b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
2642b34c43cSPaul E. McKenney 			ccur = levelcnt[i];
2652b34c43cSPaul E. McKenney 			levelspread[i] = (cprv + ccur - 1) / ccur;
2662b34c43cSPaul E. McKenney 			cprv = ccur;
2672b34c43cSPaul E. McKenney 		}
2682b34c43cSPaul E. McKenney 	}
2692b34c43cSPaul E. McKenney }
2702b34c43cSPaul E. McKenney 
271efbe451dSPaul E. McKenney /*
272efbe451dSPaul E. McKenney  * Do a full breadth-first scan of the rcu_node structures for the
273efbe451dSPaul E. McKenney  * specified rcu_state structure.
274efbe451dSPaul E. McKenney  */
275efbe451dSPaul E. McKenney #define rcu_for_each_node_breadth_first(rsp, rnp) \
276efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
277efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
278efbe451dSPaul E. McKenney 
279efbe451dSPaul E. McKenney /*
280efbe451dSPaul E. McKenney  * Do a breadth-first scan of the non-leaf rcu_node structures for the
281efbe451dSPaul E. McKenney  * specified rcu_state structure.  Note that if there is a singleton
282efbe451dSPaul E. McKenney  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
283efbe451dSPaul E. McKenney  */
284efbe451dSPaul E. McKenney #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
285efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
286efbe451dSPaul E. McKenney 	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
287efbe451dSPaul E. McKenney 
288efbe451dSPaul E. McKenney /*
289efbe451dSPaul E. McKenney  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
290efbe451dSPaul E. McKenney  * structure.  Note that if there is a singleton rcu_node tree with but
291efbe451dSPaul E. McKenney  * one rcu_node structure, this loop -will- visit the rcu_node structure.
292efbe451dSPaul E. McKenney  * It is still a leaf node, even if it is also the root node.
293efbe451dSPaul E. McKenney  */
294efbe451dSPaul E. McKenney #define rcu_for_each_leaf_node(rsp, rnp) \
295efbe451dSPaul E. McKenney 	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
296efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
297efbe451dSPaul E. McKenney 
298efbe451dSPaul E. McKenney /*
299efbe451dSPaul E. McKenney  * Iterate over all possible CPUs in a leaf RCU node.
300efbe451dSPaul E. McKenney  */
301efbe451dSPaul E. McKenney #define for_each_leaf_node_possible_cpu(rnp, cpu) \
302efbe451dSPaul E. McKenney 	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
303efbe451dSPaul E. McKenney 	     cpu <= rnp->grphi; \
304efbe451dSPaul E. McKenney 	     cpu = cpumask_next((cpu), cpu_possible_mask))
305efbe451dSPaul E. McKenney 
3062b34c43cSPaul E. McKenney #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
3072b34c43cSPaul E. McKenney 
30825c36329SPaul E. McKenney #ifdef CONFIG_TINY_RCU
30925c36329SPaul E. McKenney /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
31025c36329SPaul E. McKenney static inline bool rcu_gp_is_normal(void)  /* Internal RCU use. */
31125c36329SPaul E. McKenney {
31225c36329SPaul E. McKenney 	return true;
31325c36329SPaul E. McKenney }
31425c36329SPaul E. McKenney static inline bool rcu_gp_is_expedited(void)  /* Internal RCU use. */
31525c36329SPaul E. McKenney {
31625c36329SPaul E. McKenney 	return false;
31725c36329SPaul E. McKenney }
31825c36329SPaul E. McKenney 
31925c36329SPaul E. McKenney static inline void rcu_expedite_gp(void)
32025c36329SPaul E. McKenney {
32125c36329SPaul E. McKenney }
32225c36329SPaul E. McKenney 
32325c36329SPaul E. McKenney static inline void rcu_unexpedite_gp(void)
32425c36329SPaul E. McKenney {
32525c36329SPaul E. McKenney }
32625c36329SPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
32725c36329SPaul E. McKenney bool rcu_gp_is_normal(void);     /* Internal RCU use. */
32825c36329SPaul E. McKenney bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
32925c36329SPaul E. McKenney void rcu_expedite_gp(void);
33025c36329SPaul E. McKenney void rcu_unexpedite_gp(void);
33125c36329SPaul E. McKenney void rcupdate_announce_bootup_oddness(void);
33225c36329SPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
33325c36329SPaul E. McKenney 
33482118249SPaul E. McKenney #define RCU_SCHEDULER_INACTIVE	0
33582118249SPaul E. McKenney #define RCU_SCHEDULER_INIT	1
33682118249SPaul E. McKenney #define RCU_SCHEDULER_RUNNING	2
33782118249SPaul E. McKenney 
338fe21a27eSPaul E. McKenney #ifdef CONFIG_TINY_RCU
339fe21a27eSPaul E. McKenney static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
340fe21a27eSPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
341fe21a27eSPaul E. McKenney void rcu_request_urgent_qs_task(struct task_struct *t);
342fe21a27eSPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
343fe21a27eSPaul E. McKenney 
344cad7b389SPaul E. McKenney enum rcutorture_type {
345cad7b389SPaul E. McKenney 	RCU_FLAVOR,
346cad7b389SPaul E. McKenney 	RCU_BH_FLAVOR,
347cad7b389SPaul E. McKenney 	RCU_SCHED_FLAVOR,
348cad7b389SPaul E. McKenney 	RCU_TASKS_FLAVOR,
349cad7b389SPaul E. McKenney 	SRCU_FLAVOR,
350cad7b389SPaul E. McKenney 	INVALID_RCU_FLAVOR
351cad7b389SPaul E. McKenney };
352cad7b389SPaul E. McKenney 
353cad7b389SPaul E. McKenney #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
354cad7b389SPaul E. McKenney void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
355cad7b389SPaul E. McKenney 			    unsigned long *gpnum, unsigned long *completed);
356cad7b389SPaul E. McKenney void rcutorture_record_test_transition(void);
357cad7b389SPaul E. McKenney void rcutorture_record_progress(unsigned long vernum);
358cad7b389SPaul E. McKenney void do_trace_rcu_torture_read(const char *rcutorturename,
359cad7b389SPaul E. McKenney 			       struct rcu_head *rhp,
360cad7b389SPaul E. McKenney 			       unsigned long secs,
361cad7b389SPaul E. McKenney 			       unsigned long c_old,
362cad7b389SPaul E. McKenney 			       unsigned long c);
363cad7b389SPaul E. McKenney #else
364cad7b389SPaul E. McKenney static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
365cad7b389SPaul E. McKenney 					  int *flags,
366cad7b389SPaul E. McKenney 					  unsigned long *gpnum,
367cad7b389SPaul E. McKenney 					  unsigned long *completed)
368cad7b389SPaul E. McKenney {
369cad7b389SPaul E. McKenney 	*flags = 0;
370cad7b389SPaul E. McKenney 	*gpnum = 0;
371cad7b389SPaul E. McKenney 	*completed = 0;
372cad7b389SPaul E. McKenney }
373cad7b389SPaul E. McKenney static inline void rcutorture_record_test_transition(void)
374cad7b389SPaul E. McKenney {
375cad7b389SPaul E. McKenney }
376cad7b389SPaul E. McKenney static inline void rcutorture_record_progress(unsigned long vernum)
377cad7b389SPaul E. McKenney {
378cad7b389SPaul E. McKenney }
379cad7b389SPaul E. McKenney #ifdef CONFIG_RCU_TRACE
380cad7b389SPaul E. McKenney void do_trace_rcu_torture_read(const char *rcutorturename,
381cad7b389SPaul E. McKenney 			       struct rcu_head *rhp,
382cad7b389SPaul E. McKenney 			       unsigned long secs,
383cad7b389SPaul E. McKenney 			       unsigned long c_old,
384cad7b389SPaul E. McKenney 			       unsigned long c);
385cad7b389SPaul E. McKenney #else
386cad7b389SPaul E. McKenney #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
387cad7b389SPaul E. McKenney 	do { } while (0)
388cad7b389SPaul E. McKenney #endif
389cad7b389SPaul E. McKenney #endif
390cad7b389SPaul E. McKenney 
391cad7b389SPaul E. McKenney #ifdef CONFIG_TINY_SRCU
392cad7b389SPaul E. McKenney 
393cad7b389SPaul E. McKenney static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
394cad7b389SPaul E. McKenney 					   struct srcu_struct *sp, int *flags,
395cad7b389SPaul E. McKenney 					   unsigned long *gpnum,
396cad7b389SPaul E. McKenney 					   unsigned long *completed)
397cad7b389SPaul E. McKenney {
398cad7b389SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
399cad7b389SPaul E. McKenney 		return;
400cad7b389SPaul E. McKenney 	*flags = 0;
401*2464dd94SPaul E. McKenney 	*completed = sp->srcu_idx;
402cad7b389SPaul E. McKenney 	*gpnum = *completed;
403cad7b389SPaul E. McKenney }
404cad7b389SPaul E. McKenney 
405cad7b389SPaul E. McKenney #elif defined(CONFIG_TREE_SRCU)
406cad7b389SPaul E. McKenney 
407cad7b389SPaul E. McKenney void srcutorture_get_gp_data(enum rcutorture_type test_type,
408cad7b389SPaul E. McKenney 			     struct srcu_struct *sp, int *flags,
409cad7b389SPaul E. McKenney 			     unsigned long *gpnum, unsigned long *completed);
410cad7b389SPaul E. McKenney 
411cad7b389SPaul E. McKenney #elif defined(CONFIG_CLASSIC_SRCU)
412cad7b389SPaul E. McKenney 
413cad7b389SPaul E. McKenney static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
414cad7b389SPaul E. McKenney 					   struct srcu_struct *sp, int *flags,
415cad7b389SPaul E. McKenney 					   unsigned long *gpnum,
416cad7b389SPaul E. McKenney 					   unsigned long *completed)
417cad7b389SPaul E. McKenney {
418cad7b389SPaul E. McKenney 	if (test_type != SRCU_FLAVOR)
419cad7b389SPaul E. McKenney 		return;
420cad7b389SPaul E. McKenney 	*flags = 0;
421cad7b389SPaul E. McKenney 	*completed = sp->completed;
422cad7b389SPaul E. McKenney 	*gpnum = *completed;
423cad7b389SPaul E. McKenney 	if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head)
424cad7b389SPaul E. McKenney 		(*gpnum)++;
425cad7b389SPaul E. McKenney }
426cad7b389SPaul E. McKenney 
427cad7b389SPaul E. McKenney #endif
428cad7b389SPaul E. McKenney 
429e3c8d51eSPaul E. McKenney #ifdef CONFIG_TINY_RCU
430e3c8d51eSPaul E. McKenney 
431e3c8d51eSPaul E. McKenney /*
432e3c8d51eSPaul E. McKenney  * Return the number of grace periods started.
433e3c8d51eSPaul E. McKenney  */
434e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_started(void)
435e3c8d51eSPaul E. McKenney {
436e3c8d51eSPaul E. McKenney 	return 0;
437e3c8d51eSPaul E. McKenney }
438e3c8d51eSPaul E. McKenney 
439e3c8d51eSPaul E. McKenney /*
440e3c8d51eSPaul E. McKenney  * Return the number of bottom-half grace periods started.
441e3c8d51eSPaul E. McKenney  */
442e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_started_bh(void)
443e3c8d51eSPaul E. McKenney {
444e3c8d51eSPaul E. McKenney 	return 0;
445e3c8d51eSPaul E. McKenney }
446e3c8d51eSPaul E. McKenney 
447e3c8d51eSPaul E. McKenney /*
448e3c8d51eSPaul E. McKenney  * Return the number of sched grace periods started.
449e3c8d51eSPaul E. McKenney  */
450e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_started_sched(void)
451e3c8d51eSPaul E. McKenney {
452e3c8d51eSPaul E. McKenney 	return 0;
453e3c8d51eSPaul E. McKenney }
454e3c8d51eSPaul E. McKenney 
455e3c8d51eSPaul E. McKenney /*
456e3c8d51eSPaul E. McKenney  * Return the number of grace periods completed.
457e3c8d51eSPaul E. McKenney  */
458e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_completed(void)
459e3c8d51eSPaul E. McKenney {
460e3c8d51eSPaul E. McKenney 	return 0;
461e3c8d51eSPaul E. McKenney }
462e3c8d51eSPaul E. McKenney 
463e3c8d51eSPaul E. McKenney /*
464e3c8d51eSPaul E. McKenney  * Return the number of bottom-half grace periods completed.
465e3c8d51eSPaul E. McKenney  */
466e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_completed_bh(void)
467e3c8d51eSPaul E. McKenney {
468e3c8d51eSPaul E. McKenney 	return 0;
469e3c8d51eSPaul E. McKenney }
470e3c8d51eSPaul E. McKenney 
471e3c8d51eSPaul E. McKenney /*
472e3c8d51eSPaul E. McKenney  * Return the number of sched grace periods completed.
473e3c8d51eSPaul E. McKenney  */
474e3c8d51eSPaul E. McKenney static inline unsigned long rcu_batches_completed_sched(void)
475e3c8d51eSPaul E. McKenney {
476e3c8d51eSPaul E. McKenney 	return 0;
477e3c8d51eSPaul E. McKenney }
478e3c8d51eSPaul E. McKenney 
479e3c8d51eSPaul E. McKenney /*
480e3c8d51eSPaul E. McKenney  * Return the number of expedited grace periods completed.
481e3c8d51eSPaul E. McKenney  */
482e3c8d51eSPaul E. McKenney static inline unsigned long rcu_exp_batches_completed(void)
483e3c8d51eSPaul E. McKenney {
484e3c8d51eSPaul E. McKenney 	return 0;
485e3c8d51eSPaul E. McKenney }
486e3c8d51eSPaul E. McKenney 
487e3c8d51eSPaul E. McKenney /*
488e3c8d51eSPaul E. McKenney  * Return the number of expedited sched grace periods completed.
489e3c8d51eSPaul E. McKenney  */
490e3c8d51eSPaul E. McKenney static inline unsigned long rcu_exp_batches_completed_sched(void)
491e3c8d51eSPaul E. McKenney {
492e3c8d51eSPaul E. McKenney 	return 0;
493e3c8d51eSPaul E. McKenney }
494e3c8d51eSPaul E. McKenney 
4955a0465e1SPaul E. McKenney static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
4965a0465e1SPaul E. McKenney {
4975a0465e1SPaul E. McKenney 	return 0;
4985a0465e1SPaul E. McKenney }
4995a0465e1SPaul E. McKenney 
500e3c8d51eSPaul E. McKenney static inline void rcu_force_quiescent_state(void)
501e3c8d51eSPaul E. McKenney {
502e3c8d51eSPaul E. McKenney }
503e3c8d51eSPaul E. McKenney 
504e3c8d51eSPaul E. McKenney static inline void rcu_bh_force_quiescent_state(void)
505e3c8d51eSPaul E. McKenney {
506e3c8d51eSPaul E. McKenney }
507e3c8d51eSPaul E. McKenney 
508e3c8d51eSPaul E. McKenney static inline void rcu_sched_force_quiescent_state(void)
509e3c8d51eSPaul E. McKenney {
510e3c8d51eSPaul E. McKenney }
511e3c8d51eSPaul E. McKenney 
512e3c8d51eSPaul E. McKenney static inline void show_rcu_gp_kthreads(void)
513e3c8d51eSPaul E. McKenney {
514e3c8d51eSPaul E. McKenney }
515e3c8d51eSPaul E. McKenney 
516e3c8d51eSPaul E. McKenney #else /* #ifdef CONFIG_TINY_RCU */
517e3c8d51eSPaul E. McKenney extern unsigned long rcutorture_testseq;
518e3c8d51eSPaul E. McKenney extern unsigned long rcutorture_vernum;
519e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started(void);
520e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started_bh(void);
521e3c8d51eSPaul E. McKenney unsigned long rcu_batches_started_sched(void);
522e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed(void);
523e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed_bh(void);
524e3c8d51eSPaul E. McKenney unsigned long rcu_batches_completed_sched(void);
525e3c8d51eSPaul E. McKenney unsigned long rcu_exp_batches_completed(void);
526e3c8d51eSPaul E. McKenney unsigned long rcu_exp_batches_completed_sched(void);
5275a0465e1SPaul E. McKenney unsigned long srcu_batches_completed(struct srcu_struct *sp);
528e3c8d51eSPaul E. McKenney void show_rcu_gp_kthreads(void);
529e3c8d51eSPaul E. McKenney void rcu_force_quiescent_state(void);
530e3c8d51eSPaul E. McKenney void rcu_bh_force_quiescent_state(void);
531e3c8d51eSPaul E. McKenney void rcu_sched_force_quiescent_state(void);
532e3c8d51eSPaul E. McKenney #endif /* #else #ifdef CONFIG_TINY_RCU */
533e3c8d51eSPaul E. McKenney 
5343d54f798SPaul E. McKenney #if defined(CONFIG_RCU_NOCB_CPU_ALL)
5353d54f798SPaul E. McKenney static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
5363d54f798SPaul E. McKenney #elif defined(CONFIG_RCU_NOCB_CPU)
5373d54f798SPaul E. McKenney bool rcu_is_nocb_cpu(int cpu);
5383d54f798SPaul E. McKenney #else
5393d54f798SPaul E. McKenney static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
5403d54f798SPaul E. McKenney #endif
5413d54f798SPaul E. McKenney 
5424102adabSPaul E. McKenney #endif /* __LINUX_RCU_H */
543