1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 #ifndef __LINUX_RCU_H
11 #define __LINUX_RCU_H
12
13 #include <linux/slab.h>
14 #include <trace/events/rcu.h>
15
16 /*
17 * Grace-period counter management.
18 *
19 * The two least significant bits contain the control flags.
20 * The most significant bits contain the grace-period sequence counter.
21 *
22 * When both control flags are zero, no grace period is in progress.
23 * When either bit is non-zero, a grace period has started and is in
24 * progress. When the grace period completes, the control flags are reset
25 * to 0 and the grace-period sequence counter is incremented.
26 *
27 * However some specific RCU usages make use of custom values.
28 *
29 * SRCU special control values:
30 *
31 * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node
32 * is initialized.
33 *
34 * SRCU_STATE_IDLE : No SRCU gp is in progress
35 *
36 * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates
37 * we are scanning the readers on the slot
38 * defined as inactive (there might well
39 * be pending readers that will use that
40 * index, but their number is bounded).
41 *
42 * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state()
43 * Indicates we are flipping the readers
44 * index and then scanning the readers on the
45 * slot newly designated as inactive (again,
46 * the number of pending readers that will use
47 * this inactive index is bounded).
48 *
49 * RCU polled GP special control value:
50 *
51 * RCU_GET_STATE_COMPLETED : State value indicating an already-completed
52 * polled GP has completed. This value covers
53 * both the state and the counter of the
54 * grace-period sequence number.
55 */
56
57 #define RCU_SEQ_CTR_SHIFT 2
58 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
59
60 /* Low-order bit definition for polled grace-period APIs. */
61 #define RCU_GET_STATE_COMPLETED 0x1
62
63 extern int sysctl_sched_rt_runtime;
64
65 /*
66 * Return the counter portion of a sequence number previously returned
67 * by rcu_seq_snap() or rcu_seq_current().
68 */
rcu_seq_ctr(unsigned long s)69 static inline unsigned long rcu_seq_ctr(unsigned long s)
70 {
71 return s >> RCU_SEQ_CTR_SHIFT;
72 }
73
74 /*
75 * Return the state portion of a sequence number previously returned
76 * by rcu_seq_snap() or rcu_seq_current().
77 */
rcu_seq_state(unsigned long s)78 static inline int rcu_seq_state(unsigned long s)
79 {
80 return s & RCU_SEQ_STATE_MASK;
81 }
82
83 /*
84 * Set the state portion of the pointed-to sequence number.
85 * The caller is responsible for preventing conflicting updates.
86 */
rcu_seq_set_state(unsigned long * sp,int newstate)87 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
88 {
89 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
90 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
91 }
92
93 /* Adjust sequence number for start of update-side operation. */
rcu_seq_start(unsigned long * sp)94 static inline void rcu_seq_start(unsigned long *sp)
95 {
96 WRITE_ONCE(*sp, *sp + 1);
97 smp_mb(); /* Ensure update-side operation after counter increment. */
98 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
99 }
100
101 /* Compute the end-of-grace-period value for the specified sequence number. */
rcu_seq_endval(unsigned long * sp)102 static inline unsigned long rcu_seq_endval(unsigned long *sp)
103 {
104 return (*sp | RCU_SEQ_STATE_MASK) + 1;
105 }
106
107 /* Adjust sequence number for end of update-side operation. */
rcu_seq_end(unsigned long * sp)108 static inline void rcu_seq_end(unsigned long *sp)
109 {
110 smp_mb(); /* Ensure update-side operation before counter increment. */
111 WARN_ON_ONCE(!rcu_seq_state(*sp));
112 WRITE_ONCE(*sp, rcu_seq_endval(sp));
113 }
114
115 /*
116 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
117 *
118 * This function returns the earliest value of the grace-period sequence number
119 * that will indicate that a full grace period has elapsed since the current
120 * time. Once the grace-period sequence number has reached this value, it will
121 * be safe to invoke all callbacks that have been registered prior to the
122 * current time. This value is the current grace-period number plus two to the
123 * power of the number of low-order bits reserved for state, then rounded up to
124 * the next value in which the state bits are all zero.
125 */
rcu_seq_snap(unsigned long * sp)126 static inline unsigned long rcu_seq_snap(unsigned long *sp)
127 {
128 unsigned long s;
129
130 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
131 smp_mb(); /* Above access must not bleed into critical section. */
132 return s;
133 }
134
135 /* Return the current value the update side's sequence number, no ordering. */
rcu_seq_current(unsigned long * sp)136 static inline unsigned long rcu_seq_current(unsigned long *sp)
137 {
138 return READ_ONCE(*sp);
139 }
140
141 /*
142 * Given a snapshot from rcu_seq_snap(), determine whether or not the
143 * corresponding update-side operation has started.
144 */
rcu_seq_started(unsigned long * sp,unsigned long s)145 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
146 {
147 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
148 }
149
150 /*
151 * Given a snapshot from rcu_seq_snap(), determine whether or not a
152 * full update-side operation has occurred.
153 */
rcu_seq_done(unsigned long * sp,unsigned long s)154 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
155 {
156 return ULONG_CMP_GE(READ_ONCE(*sp), s);
157 }
158
159 /*
160 * Given a snapshot from rcu_seq_snap(), determine whether or not a
161 * full update-side operation has occurred, but do not allow the
162 * (ULONG_MAX / 2) safety-factor/guard-band.
163 */
rcu_seq_done_exact(unsigned long * sp,unsigned long s)164 static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
165 {
166 unsigned long cur_s = READ_ONCE(*sp);
167
168 return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
169 }
170
171 /*
172 * Has a grace period completed since the time the old gp_seq was collected?
173 */
rcu_seq_completed_gp(unsigned long old,unsigned long new)174 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
175 {
176 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
177 }
178
179 /*
180 * Has a grace period started since the time the old gp_seq was collected?
181 */
rcu_seq_new_gp(unsigned long old,unsigned long new)182 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
183 {
184 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
185 new);
186 }
187
188 /*
189 * Roughly how many full grace periods have elapsed between the collection
190 * of the two specified grace periods?
191 */
rcu_seq_diff(unsigned long new,unsigned long old)192 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
193 {
194 unsigned long rnd_diff;
195
196 if (old == new)
197 return 0;
198 /*
199 * Compute the number of grace periods (still shifted up), plus
200 * one if either of new and old is not an exact grace period.
201 */
202 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
203 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
204 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
205 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
206 return 1; /* Definitely no grace period has elapsed. */
207 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
208 }
209
210 /*
211 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
212 * by call_rcu() and rcu callback execution, and are therefore not part
213 * of the RCU API. These are in rcupdate.h because they are used by all
214 * RCU implementations.
215 */
216
217 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
218 # define STATE_RCU_HEAD_READY 0
219 # define STATE_RCU_HEAD_QUEUED 1
220
221 extern const struct debug_obj_descr rcuhead_debug_descr;
222
debug_rcu_head_queue(struct rcu_head * head)223 static inline int debug_rcu_head_queue(struct rcu_head *head)
224 {
225 int r1;
226
227 r1 = debug_object_activate(head, &rcuhead_debug_descr);
228 debug_object_active_state(head, &rcuhead_debug_descr,
229 STATE_RCU_HEAD_READY,
230 STATE_RCU_HEAD_QUEUED);
231 return r1;
232 }
233
debug_rcu_head_unqueue(struct rcu_head * head)234 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
235 {
236 debug_object_active_state(head, &rcuhead_debug_descr,
237 STATE_RCU_HEAD_QUEUED,
238 STATE_RCU_HEAD_READY);
239 debug_object_deactivate(head, &rcuhead_debug_descr);
240 }
241 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
debug_rcu_head_queue(struct rcu_head * head)242 static inline int debug_rcu_head_queue(struct rcu_head *head)
243 {
244 return 0;
245 }
246
debug_rcu_head_unqueue(struct rcu_head * head)247 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
248 {
249 }
250 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
251
debug_rcu_head_callback(struct rcu_head * rhp)252 static inline void debug_rcu_head_callback(struct rcu_head *rhp)
253 {
254 if (unlikely(!rhp->func))
255 kmem_dump_obj(rhp);
256 }
257
258 extern int rcu_cpu_stall_suppress_at_boot;
259
rcu_stall_is_suppressed_at_boot(void)260 static inline bool rcu_stall_is_suppressed_at_boot(void)
261 {
262 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
263 }
264
265 #ifdef CONFIG_RCU_STALL_COMMON
266
267 extern int rcu_cpu_stall_ftrace_dump;
268 extern int rcu_cpu_stall_suppress;
269 extern int rcu_cpu_stall_timeout;
270 extern int rcu_exp_cpu_stall_timeout;
271 extern int rcu_cpu_stall_cputime;
272 extern bool rcu_exp_stall_task_details __read_mostly;
273 int rcu_jiffies_till_stall_check(void);
274 int rcu_exp_jiffies_till_stall_check(void);
275
rcu_stall_is_suppressed(void)276 static inline bool rcu_stall_is_suppressed(void)
277 {
278 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
279 }
280
281 #define rcu_ftrace_dump_stall_suppress() \
282 do { \
283 if (!rcu_cpu_stall_suppress) \
284 rcu_cpu_stall_suppress = 3; \
285 } while (0)
286
287 #define rcu_ftrace_dump_stall_unsuppress() \
288 do { \
289 if (rcu_cpu_stall_suppress == 3) \
290 rcu_cpu_stall_suppress = 0; \
291 } while (0)
292
293 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
294
rcu_stall_is_suppressed(void)295 static inline bool rcu_stall_is_suppressed(void)
296 {
297 return rcu_stall_is_suppressed_at_boot();
298 }
299 #define rcu_ftrace_dump_stall_suppress()
300 #define rcu_ftrace_dump_stall_unsuppress()
301 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
302
303 /*
304 * Strings used in tracepoints need to be exported via the
305 * tracing system such that tools like perf and trace-cmd can
306 * translate the string address pointers to actual text.
307 */
308 #define TPS(x) tracepoint_string(x)
309
310 /*
311 * Dump the ftrace buffer, but only one time per callsite per boot.
312 */
313 #define rcu_ftrace_dump(oops_dump_mode) \
314 do { \
315 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
316 \
317 if (!atomic_read(&___rfd_beenhere) && \
318 !atomic_xchg(&___rfd_beenhere, 1)) { \
319 tracing_off(); \
320 rcu_ftrace_dump_stall_suppress(); \
321 ftrace_dump(oops_dump_mode); \
322 rcu_ftrace_dump_stall_unsuppress(); \
323 } \
324 } while (0)
325
326 void rcu_early_boot_tests(void);
327 void rcu_test_sync_prims(void);
328
329 /*
330 * This function really isn't for public consumption, but RCU is special in
331 * that context switches can allow the state machine to make progress.
332 */
333 extern void resched_cpu(int cpu);
334
335 #if !defined(CONFIG_TINY_RCU)
336
337 #include <linux/rcu_node_tree.h>
338
339 extern int rcu_num_lvls;
340 extern int num_rcu_lvl[];
341 extern int rcu_num_nodes;
342 static bool rcu_fanout_exact;
343 static int rcu_fanout_leaf;
344
345 /*
346 * Compute the per-level fanout, either using the exact fanout specified
347 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
348 */
rcu_init_levelspread(int * levelspread,const int * levelcnt)349 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
350 {
351 int i;
352
353 for (i = 0; i < RCU_NUM_LVLS; i++)
354 levelspread[i] = INT_MIN;
355 if (rcu_fanout_exact) {
356 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
357 for (i = rcu_num_lvls - 2; i >= 0; i--)
358 levelspread[i] = RCU_FANOUT;
359 } else {
360 int ccur;
361 int cprv;
362
363 cprv = nr_cpu_ids;
364 for (i = rcu_num_lvls - 1; i >= 0; i--) {
365 ccur = levelcnt[i];
366 levelspread[i] = (cprv + ccur - 1) / ccur;
367 cprv = ccur;
368 }
369 }
370 }
371
372 extern void rcu_init_geometry(void);
373
374 /* Returns a pointer to the first leaf rcu_node structure. */
375 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
376
377 /* Is this rcu_node a leaf? */
378 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
379
380 /* Is this rcu_node the last leaf? */
381 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
382
383 /*
384 * Do a full breadth-first scan of the {s,}rcu_node structures for the
385 * specified state structure (for SRCU) or the only rcu_state structure
386 * (for RCU).
387 */
388 #define _rcu_for_each_node_breadth_first(sp, rnp) \
389 for ((rnp) = &(sp)->node[0]; \
390 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
391 #define rcu_for_each_node_breadth_first(rnp) \
392 _rcu_for_each_node_breadth_first(&rcu_state, rnp)
393 #define srcu_for_each_node_breadth_first(ssp, rnp) \
394 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp)
395
396 /*
397 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
398 * Note that if there is a singleton rcu_node tree with but one rcu_node
399 * structure, this loop -will- visit the rcu_node structure. It is still
400 * a leaf node, even if it is also the root node.
401 */
402 #define rcu_for_each_leaf_node(rnp) \
403 for ((rnp) = rcu_first_leaf_node(); \
404 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
405
406 /*
407 * Iterate over all possible CPUs in a leaf RCU node.
408 */
409 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
410 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
411 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
412 (cpu) <= rnp->grphi; \
413 (cpu) = cpumask_next((cpu), cpu_possible_mask))
414
415 /*
416 * Iterate over all CPUs in a leaf RCU node's specified mask.
417 */
418 #define rcu_find_next_bit(rnp, cpu, mask) \
419 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
420 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
421 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
422 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
423 (cpu) <= rnp->grphi; \
424 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
425
426 #endif /* !defined(CONFIG_TINY_RCU) */
427
428 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
429
430 /*
431 * Wrappers for the rcu_node::lock acquire and release.
432 *
433 * Because the rcu_nodes form a tree, the tree traversal locking will observe
434 * different lock values, this in turn means that an UNLOCK of one level
435 * followed by a LOCK of another level does not imply a full memory barrier;
436 * and most importantly transitivity is lost.
437 *
438 * In order to restore full ordering between tree levels, augment the regular
439 * lock acquire functions with smp_mb__after_unlock_lock().
440 *
441 * As ->lock of struct rcu_node is a __private field, therefore one should use
442 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
443 */
444 #define raw_spin_lock_rcu_node(p) \
445 do { \
446 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
447 smp_mb__after_unlock_lock(); \
448 } while (0)
449
450 #define raw_spin_unlock_rcu_node(p) \
451 do { \
452 lockdep_assert_irqs_disabled(); \
453 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
454 } while (0)
455
456 #define raw_spin_lock_irq_rcu_node(p) \
457 do { \
458 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
459 smp_mb__after_unlock_lock(); \
460 } while (0)
461
462 #define raw_spin_unlock_irq_rcu_node(p) \
463 do { \
464 lockdep_assert_irqs_disabled(); \
465 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
466 } while (0)
467
468 #define raw_spin_lock_irqsave_rcu_node(p, flags) \
469 do { \
470 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
471 smp_mb__after_unlock_lock(); \
472 } while (0)
473
474 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
475 do { \
476 lockdep_assert_irqs_disabled(); \
477 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
478 } while (0)
479
480 #define raw_spin_trylock_rcu_node(p) \
481 ({ \
482 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
483 \
484 if (___locked) \
485 smp_mb__after_unlock_lock(); \
486 ___locked; \
487 })
488
489 #define raw_lockdep_assert_held_rcu_node(p) \
490 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
491
492 #endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC)
493
494 #ifdef CONFIG_TINY_RCU
495 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
rcu_gp_is_normal(void)496 static inline bool rcu_gp_is_normal(void) { return true; }
rcu_gp_is_expedited(void)497 static inline bool rcu_gp_is_expedited(void) { return false; }
rcu_async_should_hurry(void)498 static inline bool rcu_async_should_hurry(void) { return false; }
rcu_expedite_gp(void)499 static inline void rcu_expedite_gp(void) { }
rcu_unexpedite_gp(void)500 static inline void rcu_unexpedite_gp(void) { }
rcu_async_hurry(void)501 static inline void rcu_async_hurry(void) { }
rcu_async_relax(void)502 static inline void rcu_async_relax(void) { }
rcu_cpu_online(int cpu)503 static inline bool rcu_cpu_online(int cpu) { return true; }
504 #else /* #ifdef CONFIG_TINY_RCU */
505 bool rcu_gp_is_normal(void); /* Internal RCU use. */
506 bool rcu_gp_is_expedited(void); /* Internal RCU use. */
507 bool rcu_async_should_hurry(void); /* Internal RCU use. */
508 void rcu_expedite_gp(void);
509 void rcu_unexpedite_gp(void);
510 void rcu_async_hurry(void);
511 void rcu_async_relax(void);
512 void rcupdate_announce_bootup_oddness(void);
513 bool rcu_cpu_online(int cpu);
514 #ifdef CONFIG_TASKS_RCU_GENERIC
515 void show_rcu_tasks_gp_kthreads(void);
516 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
show_rcu_tasks_gp_kthreads(void)517 static inline void show_rcu_tasks_gp_kthreads(void) {}
518 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
519 #endif /* #else #ifdef CONFIG_TINY_RCU */
520
521 #ifdef CONFIG_TASKS_RCU
522 struct task_struct *get_rcu_tasks_gp_kthread(void);
523 #endif // # ifdef CONFIG_TASKS_RCU
524
525 #ifdef CONFIG_TASKS_RUDE_RCU
526 struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
527 #endif // # ifdef CONFIG_TASKS_RUDE_RCU
528
529 #define RCU_SCHEDULER_INACTIVE 0
530 #define RCU_SCHEDULER_INIT 1
531 #define RCU_SCHEDULER_RUNNING 2
532
533 enum rcutorture_type {
534 RCU_FLAVOR,
535 RCU_TASKS_FLAVOR,
536 RCU_TASKS_RUDE_FLAVOR,
537 RCU_TASKS_TRACING_FLAVOR,
538 RCU_TRIVIAL_FLAVOR,
539 SRCU_FLAVOR,
540 INVALID_RCU_FLAVOR
541 };
542
543 #if defined(CONFIG_RCU_LAZY)
544 unsigned long rcu_lazy_get_jiffies_till_flush(void);
545 void rcu_lazy_set_jiffies_till_flush(unsigned long j);
546 #else
rcu_lazy_get_jiffies_till_flush(void)547 static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
rcu_lazy_set_jiffies_till_flush(unsigned long j)548 static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
549 #endif
550
551 #if defined(CONFIG_TREE_RCU)
552 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
553 unsigned long *gp_seq);
554 void do_trace_rcu_torture_read(const char *rcutorturename,
555 struct rcu_head *rhp,
556 unsigned long secs,
557 unsigned long c_old,
558 unsigned long c);
559 void rcu_gp_set_torture_wait(int duration);
560 #else
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)561 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
562 int *flags, unsigned long *gp_seq)
563 {
564 *flags = 0;
565 *gp_seq = 0;
566 }
567 #ifdef CONFIG_RCU_TRACE
568 void do_trace_rcu_torture_read(const char *rcutorturename,
569 struct rcu_head *rhp,
570 unsigned long secs,
571 unsigned long c_old,
572 unsigned long c);
573 #else
574 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
575 do { } while (0)
576 #endif
rcu_gp_set_torture_wait(int duration)577 static inline void rcu_gp_set_torture_wait(int duration) { }
578 #endif
579
580 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
581 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
582 #endif
583
584 #ifdef CONFIG_TINY_SRCU
585
srcutorture_get_gp_data(enum rcutorture_type test_type,struct srcu_struct * sp,int * flags,unsigned long * gp_seq)586 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
587 struct srcu_struct *sp, int *flags,
588 unsigned long *gp_seq)
589 {
590 if (test_type != SRCU_FLAVOR)
591 return;
592 *flags = 0;
593 *gp_seq = sp->srcu_idx;
594 }
595
596 #elif defined(CONFIG_TREE_SRCU)
597
598 void srcutorture_get_gp_data(enum rcutorture_type test_type,
599 struct srcu_struct *sp, int *flags,
600 unsigned long *gp_seq);
601
602 #endif
603
604 #ifdef CONFIG_TINY_RCU
rcu_dynticks_zero_in_eqs(int cpu,int * vp)605 static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
rcu_get_gp_seq(void)606 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
rcu_exp_batches_completed(void)607 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
608 static inline unsigned long
srcu_batches_completed(struct srcu_struct * sp)609 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
rcu_force_quiescent_state(void)610 static inline void rcu_force_quiescent_state(void) { }
rcu_check_boost_fail(unsigned long gp_state,int * cpup)611 static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
show_rcu_gp_kthreads(void)612 static inline void show_rcu_gp_kthreads(void) { }
rcu_get_gp_kthreads_prio(void)613 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
rcu_fwd_progress_check(unsigned long j)614 static inline void rcu_fwd_progress_check(unsigned long j) { }
rcu_gp_slow_register(atomic_t * rgssp)615 static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
rcu_gp_slow_unregister(atomic_t * rgssp)616 static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
617 #else /* #ifdef CONFIG_TINY_RCU */
618 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
619 unsigned long rcu_get_gp_seq(void);
620 unsigned long rcu_exp_batches_completed(void);
621 unsigned long srcu_batches_completed(struct srcu_struct *sp);
622 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
623 void show_rcu_gp_kthreads(void);
624 int rcu_get_gp_kthreads_prio(void);
625 void rcu_fwd_progress_check(unsigned long j);
626 void rcu_force_quiescent_state(void);
627 extern struct workqueue_struct *rcu_gp_wq;
628 #ifdef CONFIG_RCU_EXP_KTHREAD
629 extern struct kthread_worker *rcu_exp_gp_kworker;
630 extern struct kthread_worker *rcu_exp_par_gp_kworker;
631 #else /* !CONFIG_RCU_EXP_KTHREAD */
632 extern struct workqueue_struct *rcu_par_gp_wq;
633 #endif /* CONFIG_RCU_EXP_KTHREAD */
634 void rcu_gp_slow_register(atomic_t *rgssp);
635 void rcu_gp_slow_unregister(atomic_t *rgssp);
636 #endif /* #else #ifdef CONFIG_TINY_RCU */
637
638 #ifdef CONFIG_RCU_NOCB_CPU
639 void rcu_bind_current_to_nocb(void);
640 #else
rcu_bind_current_to_nocb(void)641 static inline void rcu_bind_current_to_nocb(void) { }
642 #endif
643
644 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
645 void show_rcu_tasks_classic_gp_kthread(void);
646 #else
show_rcu_tasks_classic_gp_kthread(void)647 static inline void show_rcu_tasks_classic_gp_kthread(void) {}
648 #endif
649 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
650 void show_rcu_tasks_rude_gp_kthread(void);
651 #else
show_rcu_tasks_rude_gp_kthread(void)652 static inline void show_rcu_tasks_rude_gp_kthread(void) {}
653 #endif
654 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
655 void show_rcu_tasks_trace_gp_kthread(void);
656 #else
show_rcu_tasks_trace_gp_kthread(void)657 static inline void show_rcu_tasks_trace_gp_kthread(void) {}
658 #endif
659
660 #ifdef CONFIG_TINY_RCU
rcu_cpu_beenfullyonline(int cpu)661 static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
662 #else
663 bool rcu_cpu_beenfullyonline(int cpu);
664 #endif
665
666 #endif /* __LINUX_RCU_H */
667