xref: /openbmc/linux/kernel/rcu/rcu.h (revision efbe451d46af62369226e42b98dbcd95b6940a63)
14102adabSPaul E. McKenney /*
24102adabSPaul E. McKenney  * Read-Copy Update definitions shared among RCU implementations.
34102adabSPaul E. McKenney  *
44102adabSPaul E. McKenney  * This program is free software; you can redistribute it and/or modify
54102adabSPaul E. McKenney  * it under the terms of the GNU General Public License as published by
64102adabSPaul E. McKenney  * the Free Software Foundation; either version 2 of the License, or
74102adabSPaul E. McKenney  * (at your option) any later version.
84102adabSPaul E. McKenney  *
94102adabSPaul E. McKenney  * This program is distributed in the hope that it will be useful,
104102adabSPaul E. McKenney  * but WITHOUT ANY WARRANTY; without even the implied warranty of
114102adabSPaul E. McKenney  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
124102adabSPaul E. McKenney  * GNU General Public License for more details.
134102adabSPaul E. McKenney  *
144102adabSPaul E. McKenney  * You should have received a copy of the GNU General Public License
1587de1cfdSPaul E. McKenney  * along with this program; if not, you can access it online at
1687de1cfdSPaul E. McKenney  * http://www.gnu.org/licenses/gpl-2.0.html.
174102adabSPaul E. McKenney  *
184102adabSPaul E. McKenney  * Copyright IBM Corporation, 2011
194102adabSPaul E. McKenney  *
204102adabSPaul E. McKenney  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
214102adabSPaul E. McKenney  */
224102adabSPaul E. McKenney 
234102adabSPaul E. McKenney #ifndef __LINUX_RCU_H
244102adabSPaul E. McKenney #define __LINUX_RCU_H
254102adabSPaul E. McKenney 
265cb5c6e1SPaul Gortmaker #include <trace/events/rcu.h>
274102adabSPaul E. McKenney #ifdef CONFIG_RCU_TRACE
284102adabSPaul E. McKenney #define RCU_TRACE(stmt) stmt
294102adabSPaul E. McKenney #else /* #ifdef CONFIG_RCU_TRACE */
304102adabSPaul E. McKenney #define RCU_TRACE(stmt)
314102adabSPaul E. McKenney #endif /* #else #ifdef CONFIG_RCU_TRACE */
324102adabSPaul E. McKenney 
334102adabSPaul E. McKenney /*
344102adabSPaul E. McKenney  * Process-level increment to ->dynticks_nesting field.  This allows for
354102adabSPaul E. McKenney  * architectures that use half-interrupts and half-exceptions from
364102adabSPaul E. McKenney  * process context.
374102adabSPaul E. McKenney  *
384102adabSPaul E. McKenney  * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
394102adabSPaul E. McKenney  * that counts the number of process-based reasons why RCU cannot
404102adabSPaul E. McKenney  * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
414102adabSPaul E. McKenney  * is the value used to increment or decrement this field.
424102adabSPaul E. McKenney  *
434102adabSPaul E. McKenney  * The rest of the bits could in principle be used to count interrupts,
444102adabSPaul E. McKenney  * but this would mean that a negative-one value in the interrupt
454102adabSPaul E. McKenney  * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
464102adabSPaul E. McKenney  * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
474102adabSPaul E. McKenney  * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
484102adabSPaul E. McKenney  * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
494102adabSPaul E. McKenney  * initial exit from idle.
504102adabSPaul E. McKenney  */
514102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_WIDTH 7
524102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
534102adabSPaul E. McKenney #define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
544102adabSPaul E. McKenney #define DYNTICK_TASK_FLAG	   ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
554102adabSPaul E. McKenney #define DYNTICK_TASK_MASK	   ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
564102adabSPaul E. McKenney #define DYNTICK_TASK_EXIT_IDLE	   (DYNTICK_TASK_NEST_VALUE + \
574102adabSPaul E. McKenney 				    DYNTICK_TASK_FLAG)
584102adabSPaul E. McKenney 
592e8c28c2SPaul E. McKenney 
602e8c28c2SPaul E. McKenney /*
612e8c28c2SPaul E. McKenney  * Grace-period counter management.
622e8c28c2SPaul E. McKenney  */
632e8c28c2SPaul E. McKenney 
642e8c28c2SPaul E. McKenney /* Adjust sequence number for start of update-side operation. */
652e8c28c2SPaul E. McKenney static inline void rcu_seq_start(unsigned long *sp)
662e8c28c2SPaul E. McKenney {
672e8c28c2SPaul E. McKenney 	WRITE_ONCE(*sp, *sp + 1);
682e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation after counter increment. */
692e8c28c2SPaul E. McKenney 	WARN_ON_ONCE(!(*sp & 0x1));
702e8c28c2SPaul E. McKenney }
712e8c28c2SPaul E. McKenney 
722e8c28c2SPaul E. McKenney /* Adjust sequence number for end of update-side operation. */
732e8c28c2SPaul E. McKenney static inline void rcu_seq_end(unsigned long *sp)
742e8c28c2SPaul E. McKenney {
752e8c28c2SPaul E. McKenney 	smp_mb(); /* Ensure update-side operation before counter increment. */
76f010ed82SDmitry Vyukov 	WARN_ON_ONCE(!(*sp & 0x1));
772e8c28c2SPaul E. McKenney 	WRITE_ONCE(*sp, *sp + 1);
782e8c28c2SPaul E. McKenney }
792e8c28c2SPaul E. McKenney 
802e8c28c2SPaul E. McKenney /* Take a snapshot of the update side's sequence number. */
812e8c28c2SPaul E. McKenney static inline unsigned long rcu_seq_snap(unsigned long *sp)
822e8c28c2SPaul E. McKenney {
832e8c28c2SPaul E. McKenney 	unsigned long s;
842e8c28c2SPaul E. McKenney 
852e8c28c2SPaul E. McKenney 	s = (READ_ONCE(*sp) + 3) & ~0x1;
862e8c28c2SPaul E. McKenney 	smp_mb(); /* Above access must not bleed into critical section. */
872e8c28c2SPaul E. McKenney 	return s;
882e8c28c2SPaul E. McKenney }
892e8c28c2SPaul E. McKenney 
908660b7d8SPaul E. McKenney /* Return the current value the update side's sequence number, no ordering. */
918660b7d8SPaul E. McKenney static inline unsigned long rcu_seq_current(unsigned long *sp)
928660b7d8SPaul E. McKenney {
938660b7d8SPaul E. McKenney 	return READ_ONCE(*sp);
948660b7d8SPaul E. McKenney }
958660b7d8SPaul E. McKenney 
962e8c28c2SPaul E. McKenney /*
972e8c28c2SPaul E. McKenney  * Given a snapshot from rcu_seq_snap(), determine whether or not a
982e8c28c2SPaul E. McKenney  * full update-side operation has occurred.
992e8c28c2SPaul E. McKenney  */
1002e8c28c2SPaul E. McKenney static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
1012e8c28c2SPaul E. McKenney {
1022e8c28c2SPaul E. McKenney 	return ULONG_CMP_GE(READ_ONCE(*sp), s);
1032e8c28c2SPaul E. McKenney }
1042e8c28c2SPaul E. McKenney 
1054102adabSPaul E. McKenney /*
1064102adabSPaul E. McKenney  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
1074102adabSPaul E. McKenney  * by call_rcu() and rcu callback execution, and are therefore not part of the
1084102adabSPaul E. McKenney  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
1094102adabSPaul E. McKenney  */
1104102adabSPaul E. McKenney 
1114102adabSPaul E. McKenney #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1124102adabSPaul E. McKenney # define STATE_RCU_HEAD_READY	0
1134102adabSPaul E. McKenney # define STATE_RCU_HEAD_QUEUED	1
1144102adabSPaul E. McKenney 
1154102adabSPaul E. McKenney extern struct debug_obj_descr rcuhead_debug_descr;
1164102adabSPaul E. McKenney 
1174102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1184102adabSPaul E. McKenney {
1194102adabSPaul E. McKenney 	int r1;
1204102adabSPaul E. McKenney 
1214102adabSPaul E. McKenney 	r1 = debug_object_activate(head, &rcuhead_debug_descr);
1224102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1234102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY,
1244102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED);
1254102adabSPaul E. McKenney 	return r1;
1264102adabSPaul E. McKenney }
1274102adabSPaul E. McKenney 
1284102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1294102adabSPaul E. McKenney {
1304102adabSPaul E. McKenney 	debug_object_active_state(head, &rcuhead_debug_descr,
1314102adabSPaul E. McKenney 				  STATE_RCU_HEAD_QUEUED,
1324102adabSPaul E. McKenney 				  STATE_RCU_HEAD_READY);
1334102adabSPaul E. McKenney 	debug_object_deactivate(head, &rcuhead_debug_descr);
1344102adabSPaul E. McKenney }
1354102adabSPaul E. McKenney #else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1364102adabSPaul E. McKenney static inline int debug_rcu_head_queue(struct rcu_head *head)
1374102adabSPaul E. McKenney {
1384102adabSPaul E. McKenney 	return 0;
1394102adabSPaul E. McKenney }
1404102adabSPaul E. McKenney 
1414102adabSPaul E. McKenney static inline void debug_rcu_head_unqueue(struct rcu_head *head)
1424102adabSPaul E. McKenney {
1434102adabSPaul E. McKenney }
1444102adabSPaul E. McKenney #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1454102adabSPaul E. McKenney 
146bd73a7f5STeodora Baluta void kfree(const void *);
1474102adabSPaul E. McKenney 
148406e3e53SPaul E. McKenney /*
149406e3e53SPaul E. McKenney  * Reclaim the specified callback, either by invoking it (non-lazy case)
150406e3e53SPaul E. McKenney  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
151406e3e53SPaul E. McKenney  */
1524102adabSPaul E. McKenney static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
1534102adabSPaul E. McKenney {
1544102adabSPaul E. McKenney 	unsigned long offset = (unsigned long)head->func;
1554102adabSPaul E. McKenney 
15624ef659aSPaul E. McKenney 	rcu_lock_acquire(&rcu_callback_map);
1574102adabSPaul E. McKenney 	if (__is_kfree_rcu_offset(offset)) {
158dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
1594102adabSPaul E. McKenney 		kfree((void *)head - offset);
16024ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
161406e3e53SPaul E. McKenney 		return true;
1624102adabSPaul E. McKenney 	} else {
163dffd06a7SPaul E. McKenney 		RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
1644102adabSPaul E. McKenney 		head->func(head);
16524ef659aSPaul E. McKenney 		rcu_lock_release(&rcu_callback_map);
166406e3e53SPaul E. McKenney 		return false;
1674102adabSPaul E. McKenney 	}
1684102adabSPaul E. McKenney }
1694102adabSPaul E. McKenney 
1704102adabSPaul E. McKenney #ifdef CONFIG_RCU_STALL_COMMON
1714102adabSPaul E. McKenney 
1724102adabSPaul E. McKenney extern int rcu_cpu_stall_suppress;
1734102adabSPaul E. McKenney int rcu_jiffies_till_stall_check(void);
1744102adabSPaul E. McKenney 
1754102adabSPaul E. McKenney #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
1764102adabSPaul E. McKenney 
1774102adabSPaul E. McKenney /*
1784102adabSPaul E. McKenney  * Strings used in tracepoints need to be exported via the
1794102adabSPaul E. McKenney  * tracing system such that tools like perf and trace-cmd can
1804102adabSPaul E. McKenney  * translate the string address pointers to actual text.
1814102adabSPaul E. McKenney  */
1824102adabSPaul E. McKenney #define TPS(x)  tracepoint_string(x)
1834102adabSPaul E. McKenney 
184aa23c6fbSPranith Kumar void rcu_early_boot_tests(void);
18552d7e48bSPaul E. McKenney void rcu_test_sync_prims(void);
186aa23c6fbSPranith Kumar 
1875f6130faSLai Jiangshan /*
1885f6130faSLai Jiangshan  * This function really isn't for public consumption, but RCU is special in
1895f6130faSLai Jiangshan  * that context switches can allow the state machine to make progress.
1905f6130faSLai Jiangshan  */
1915f6130faSLai Jiangshan extern void resched_cpu(int cpu);
1925f6130faSLai Jiangshan 
1932b34c43cSPaul E. McKenney #if defined(SRCU) || !defined(TINY_RCU)
1942b34c43cSPaul E. McKenney 
1952b34c43cSPaul E. McKenney #include <linux/rcu_node_tree.h>
1962b34c43cSPaul E. McKenney 
1972b34c43cSPaul E. McKenney extern int rcu_num_lvls;
1982b34c43cSPaul E. McKenney extern int rcu_num_nodes;
1992b34c43cSPaul E. McKenney static bool rcu_fanout_exact;
2002b34c43cSPaul E. McKenney static int rcu_fanout_leaf;
2012b34c43cSPaul E. McKenney 
2022b34c43cSPaul E. McKenney /*
2032b34c43cSPaul E. McKenney  * Compute the per-level fanout, either using the exact fanout specified
2042b34c43cSPaul E. McKenney  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
2052b34c43cSPaul E. McKenney  */
2062b34c43cSPaul E. McKenney static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
2072b34c43cSPaul E. McKenney {
2082b34c43cSPaul E. McKenney 	int i;
2092b34c43cSPaul E. McKenney 
2102b34c43cSPaul E. McKenney 	if (rcu_fanout_exact) {
2112b34c43cSPaul E. McKenney 		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
2122b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 2; i >= 0; i--)
2132b34c43cSPaul E. McKenney 			levelspread[i] = RCU_FANOUT;
2142b34c43cSPaul E. McKenney 	} else {
2152b34c43cSPaul E. McKenney 		int ccur;
2162b34c43cSPaul E. McKenney 		int cprv;
2172b34c43cSPaul E. McKenney 
2182b34c43cSPaul E. McKenney 		cprv = nr_cpu_ids;
2192b34c43cSPaul E. McKenney 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
2202b34c43cSPaul E. McKenney 			ccur = levelcnt[i];
2212b34c43cSPaul E. McKenney 			levelspread[i] = (cprv + ccur - 1) / ccur;
2222b34c43cSPaul E. McKenney 			cprv = ccur;
2232b34c43cSPaul E. McKenney 		}
2242b34c43cSPaul E. McKenney 	}
2252b34c43cSPaul E. McKenney }
2262b34c43cSPaul E. McKenney 
227*efbe451dSPaul E. McKenney /*
228*efbe451dSPaul E. McKenney  * Do a full breadth-first scan of the rcu_node structures for the
229*efbe451dSPaul E. McKenney  * specified rcu_state structure.
230*efbe451dSPaul E. McKenney  */
231*efbe451dSPaul E. McKenney #define rcu_for_each_node_breadth_first(rsp, rnp) \
232*efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
233*efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
234*efbe451dSPaul E. McKenney 
235*efbe451dSPaul E. McKenney /*
236*efbe451dSPaul E. McKenney  * Do a breadth-first scan of the non-leaf rcu_node structures for the
237*efbe451dSPaul E. McKenney  * specified rcu_state structure.  Note that if there is a singleton
238*efbe451dSPaul E. McKenney  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
239*efbe451dSPaul E. McKenney  */
240*efbe451dSPaul E. McKenney #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
241*efbe451dSPaul E. McKenney 	for ((rnp) = &(rsp)->node[0]; \
242*efbe451dSPaul E. McKenney 	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
243*efbe451dSPaul E. McKenney 
244*efbe451dSPaul E. McKenney /*
245*efbe451dSPaul E. McKenney  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
246*efbe451dSPaul E. McKenney  * structure.  Note that if there is a singleton rcu_node tree with but
247*efbe451dSPaul E. McKenney  * one rcu_node structure, this loop -will- visit the rcu_node structure.
248*efbe451dSPaul E. McKenney  * It is still a leaf node, even if it is also the root node.
249*efbe451dSPaul E. McKenney  */
250*efbe451dSPaul E. McKenney #define rcu_for_each_leaf_node(rsp, rnp) \
251*efbe451dSPaul E. McKenney 	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
252*efbe451dSPaul E. McKenney 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
253*efbe451dSPaul E. McKenney 
254*efbe451dSPaul E. McKenney /*
255*efbe451dSPaul E. McKenney  * Iterate over all possible CPUs in a leaf RCU node.
256*efbe451dSPaul E. McKenney  */
257*efbe451dSPaul E. McKenney #define for_each_leaf_node_possible_cpu(rnp, cpu) \
258*efbe451dSPaul E. McKenney 	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
259*efbe451dSPaul E. McKenney 	     cpu <= rnp->grphi; \
260*efbe451dSPaul E. McKenney 	     cpu = cpumask_next((cpu), cpu_possible_mask))
261*efbe451dSPaul E. McKenney 
2622b34c43cSPaul E. McKenney #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
2632b34c43cSPaul E. McKenney 
2644102adabSPaul E. McKenney #endif /* __LINUX_RCU_H */
265