xref: /openbmc/linux/kernel/rcu/rcu.h (revision e95d68d2127716c7d6fb144bb19ef48ce9f37393)
1 /*
2  * Read-Copy Update definitions shared among RCU implementations.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2011
19  *
20  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21  */
22 
23 #ifndef __LINUX_RCU_H
24 #define __LINUX_RCU_H
25 
26 #include <trace/events/rcu.h>
27 #ifdef CONFIG_RCU_TRACE
28 #define RCU_TRACE(stmt) stmt
29 #else /* #ifdef CONFIG_RCU_TRACE */
30 #define RCU_TRACE(stmt)
31 #endif /* #else #ifdef CONFIG_RCU_TRACE */
32 
33 /*
34  * Process-level increment to ->dynticks_nesting field.  This allows for
35  * architectures that use half-interrupts and half-exceptions from
36  * process context.
37  *
38  * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
39  * that counts the number of process-based reasons why RCU cannot
40  * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
41  * is the value used to increment or decrement this field.
42  *
43  * The rest of the bits could in principle be used to count interrupts,
44  * but this would mean that a negative-one value in the interrupt
45  * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
46  * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
47  * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
48  * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
49  * initial exit from idle.
50  */
51 #define DYNTICK_TASK_NEST_WIDTH 7
52 #define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
53 #define DYNTICK_TASK_NEST_MASK  (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
54 #define DYNTICK_TASK_FLAG	   ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
55 #define DYNTICK_TASK_MASK	   ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
56 #define DYNTICK_TASK_EXIT_IDLE	   (DYNTICK_TASK_NEST_VALUE + \
57 				    DYNTICK_TASK_FLAG)
58 
59 
60 /*
61  * Grace-period counter management.
62  */
63 
64 /* Adjust sequence number for start of update-side operation. */
65 static inline void rcu_seq_start(unsigned long *sp)
66 {
67 	WRITE_ONCE(*sp, *sp + 1);
68 	smp_mb(); /* Ensure update-side operation after counter increment. */
69 	WARN_ON_ONCE(!(*sp & 0x1));
70 }
71 
72 /* Adjust sequence number for end of update-side operation. */
73 static inline void rcu_seq_end(unsigned long *sp)
74 {
75 	smp_mb(); /* Ensure update-side operation before counter increment. */
76 	WARN_ON_ONCE(!(*sp & 0x1));
77 	WRITE_ONCE(*sp, *sp + 1);
78 }
79 
80 /* Take a snapshot of the update side's sequence number. */
81 static inline unsigned long rcu_seq_snap(unsigned long *sp)
82 {
83 	unsigned long s;
84 
85 	s = (READ_ONCE(*sp) + 3) & ~0x1;
86 	smp_mb(); /* Above access must not bleed into critical section. */
87 	return s;
88 }
89 
90 /* Return the current value the update side's sequence number, no ordering. */
91 static inline unsigned long rcu_seq_current(unsigned long *sp)
92 {
93 	return READ_ONCE(*sp);
94 }
95 
96 /*
97  * Given a snapshot from rcu_seq_snap(), determine whether or not a
98  * full update-side operation has occurred.
99  */
100 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
101 {
102 	return ULONG_CMP_GE(READ_ONCE(*sp), s);
103 }
104 
105 /*
106  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
107  * by call_rcu() and rcu callback execution, and are therefore not part of the
108  * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
109  */
110 
111 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
112 # define STATE_RCU_HEAD_READY	0
113 # define STATE_RCU_HEAD_QUEUED	1
114 
115 extern struct debug_obj_descr rcuhead_debug_descr;
116 
117 static inline int debug_rcu_head_queue(struct rcu_head *head)
118 {
119 	int r1;
120 
121 	r1 = debug_object_activate(head, &rcuhead_debug_descr);
122 	debug_object_active_state(head, &rcuhead_debug_descr,
123 				  STATE_RCU_HEAD_READY,
124 				  STATE_RCU_HEAD_QUEUED);
125 	return r1;
126 }
127 
128 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
129 {
130 	debug_object_active_state(head, &rcuhead_debug_descr,
131 				  STATE_RCU_HEAD_QUEUED,
132 				  STATE_RCU_HEAD_READY);
133 	debug_object_deactivate(head, &rcuhead_debug_descr);
134 }
135 #else	/* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
136 static inline int debug_rcu_head_queue(struct rcu_head *head)
137 {
138 	return 0;
139 }
140 
141 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
142 {
143 }
144 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
145 
146 void kfree(const void *);
147 
148 /*
149  * Reclaim the specified callback, either by invoking it (non-lazy case)
150  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
151  */
152 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
153 {
154 	unsigned long offset = (unsigned long)head->func;
155 
156 	rcu_lock_acquire(&rcu_callback_map);
157 	if (__is_kfree_rcu_offset(offset)) {
158 		RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
159 		kfree((void *)head - offset);
160 		rcu_lock_release(&rcu_callback_map);
161 		return true;
162 	} else {
163 		RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
164 		head->func(head);
165 		rcu_lock_release(&rcu_callback_map);
166 		return false;
167 	}
168 }
169 
170 #ifdef CONFIG_RCU_STALL_COMMON
171 
172 extern int rcu_cpu_stall_suppress;
173 int rcu_jiffies_till_stall_check(void);
174 
175 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
176 
177 /*
178  * Strings used in tracepoints need to be exported via the
179  * tracing system such that tools like perf and trace-cmd can
180  * translate the string address pointers to actual text.
181  */
182 #define TPS(x)  tracepoint_string(x)
183 
184 void rcu_early_boot_tests(void);
185 void rcu_test_sync_prims(void);
186 
187 /*
188  * This function really isn't for public consumption, but RCU is special in
189  * that context switches can allow the state machine to make progress.
190  */
191 extern void resched_cpu(int cpu);
192 
193 #if defined(SRCU) || !defined(TINY_RCU)
194 
195 #include <linux/rcu_node_tree.h>
196 
197 extern int rcu_num_lvls;
198 extern int num_rcu_lvl[];
199 extern int rcu_num_nodes;
200 static bool rcu_fanout_exact;
201 static int rcu_fanout_leaf;
202 
203 /*
204  * Compute the per-level fanout, either using the exact fanout specified
205  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
206  */
207 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
208 {
209 	int i;
210 
211 	if (rcu_fanout_exact) {
212 		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
213 		for (i = rcu_num_lvls - 2; i >= 0; i--)
214 			levelspread[i] = RCU_FANOUT;
215 	} else {
216 		int ccur;
217 		int cprv;
218 
219 		cprv = nr_cpu_ids;
220 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
221 			ccur = levelcnt[i];
222 			levelspread[i] = (cprv + ccur - 1) / ccur;
223 			cprv = ccur;
224 		}
225 	}
226 }
227 
228 /*
229  * Do a full breadth-first scan of the rcu_node structures for the
230  * specified rcu_state structure.
231  */
232 #define rcu_for_each_node_breadth_first(rsp, rnp) \
233 	for ((rnp) = &(rsp)->node[0]; \
234 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
235 
236 /*
237  * Do a breadth-first scan of the non-leaf rcu_node structures for the
238  * specified rcu_state structure.  Note that if there is a singleton
239  * rcu_node tree with but one rcu_node structure, this loop is a no-op.
240  */
241 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
242 	for ((rnp) = &(rsp)->node[0]; \
243 	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
244 
245 /*
246  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
247  * structure.  Note that if there is a singleton rcu_node tree with but
248  * one rcu_node structure, this loop -will- visit the rcu_node structure.
249  * It is still a leaf node, even if it is also the root node.
250  */
251 #define rcu_for_each_leaf_node(rsp, rnp) \
252 	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
253 	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
254 
255 /*
256  * Iterate over all possible CPUs in a leaf RCU node.
257  */
258 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
259 	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
260 	     cpu <= rnp->grphi; \
261 	     cpu = cpumask_next((cpu), cpu_possible_mask))
262 
263 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
264 
265 #endif /* __LINUX_RCU_H */
266