xref: /openbmc/linux/kernel/rcu/srcutree.c (revision 9a29ad52)
1 /*
2  * Sleepable Read-Copy Update mechanism for mutual exclusion.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (C) IBM Corporation, 2006
19  * Copyright (C) Fujitsu, 2012
20  *
21  * Author: Paul McKenney <paulmck@us.ibm.com>
22  *	   Lai Jiangshan <laijs@cn.fujitsu.com>
23  *
24  * For detailed explanation of Read-Copy Update mechanism see -
25  *		Documentation/RCU/ *.txt
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/mutex.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate_wait.h>
34 #include <linux/sched.h>
35 #include <linux/smp.h>
36 #include <linux/delay.h>
37 #include <linux/module.h>
38 #include <linux/srcu.h>
39 
40 #include "rcu.h"
41 #include "rcu_segcblist.h"
42 
43 /* Holdoff in nanoseconds for auto-expediting. */
44 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
45 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
46 module_param(exp_holdoff, ulong, 0444);
47 
48 /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
49 static ulong counter_wrap_check = (ULONG_MAX >> 2);
50 module_param(counter_wrap_check, ulong, 0444);
51 
52 static void srcu_invoke_callbacks(struct work_struct *work);
53 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
54 static void process_srcu(struct work_struct *work);
55 
56 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
57 #define spin_lock_rcu_node(p)					\
58 do {									\
59 	spin_lock(&ACCESS_PRIVATE(p, lock));			\
60 	smp_mb__after_unlock_lock();					\
61 } while (0)
62 
63 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
64 
65 #define spin_lock_irq_rcu_node(p)					\
66 do {									\
67 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
68 	smp_mb__after_unlock_lock();					\
69 } while (0)
70 
71 #define spin_unlock_irq_rcu_node(p)					\
72 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
73 
74 #define spin_lock_irqsave_rcu_node(p, flags)			\
75 do {									\
76 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
77 	smp_mb__after_unlock_lock();					\
78 } while (0)
79 
80 #define spin_unlock_irqrestore_rcu_node(p, flags)			\
81 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)	\
82 
83 /*
84  * Initialize SRCU combining tree.  Note that statically allocated
85  * srcu_struct structures might already have srcu_read_lock() and
86  * srcu_read_unlock() running against them.  So if the is_static parameter
87  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
88  */
89 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
90 {
91 	int cpu;
92 	int i;
93 	int level = 0;
94 	int levelspread[RCU_NUM_LVLS];
95 	struct srcu_data *sdp;
96 	struct srcu_node *snp;
97 	struct srcu_node *snp_first;
98 
99 	/* Work out the overall tree geometry. */
100 	sp->level[0] = &sp->node[0];
101 	for (i = 1; i < rcu_num_lvls; i++)
102 		sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
103 	rcu_init_levelspread(levelspread, num_rcu_lvl);
104 
105 	/* Each pass through this loop initializes one srcu_node structure. */
106 	rcu_for_each_node_breadth_first(sp, snp) {
107 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
108 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
109 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
110 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
111 			snp->srcu_have_cbs[i] = 0;
112 			snp->srcu_data_have_cbs[i] = 0;
113 		}
114 		snp->srcu_gp_seq_needed_exp = 0;
115 		snp->grplo = -1;
116 		snp->grphi = -1;
117 		if (snp == &sp->node[0]) {
118 			/* Root node, special case. */
119 			snp->srcu_parent = NULL;
120 			continue;
121 		}
122 
123 		/* Non-root node. */
124 		if (snp == sp->level[level + 1])
125 			level++;
126 		snp->srcu_parent = sp->level[level - 1] +
127 				   (snp - sp->level[level]) /
128 				   levelspread[level - 1];
129 	}
130 
131 	/*
132 	 * Initialize the per-CPU srcu_data array, which feeds into the
133 	 * leaves of the srcu_node tree.
134 	 */
135 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
136 		     ARRAY_SIZE(sdp->srcu_unlock_count));
137 	level = rcu_num_lvls - 1;
138 	snp_first = sp->level[level];
139 	for_each_possible_cpu(cpu) {
140 		sdp = per_cpu_ptr(sp->sda, cpu);
141 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
142 		rcu_segcblist_init(&sdp->srcu_cblist);
143 		sdp->srcu_cblist_invoking = false;
144 		sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
145 		sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
146 		sdp->mynode = &snp_first[cpu / levelspread[level]];
147 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
148 			if (snp->grplo < 0)
149 				snp->grplo = cpu;
150 			snp->grphi = cpu;
151 		}
152 		sdp->cpu = cpu;
153 		INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
154 		sdp->sp = sp;
155 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
156 		if (is_static)
157 			continue;
158 
159 		/* Dynamically allocated, better be no srcu_read_locks()! */
160 		for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
161 			sdp->srcu_lock_count[i] = 0;
162 			sdp->srcu_unlock_count[i] = 0;
163 		}
164 	}
165 }
166 
167 /*
168  * Initialize non-compile-time initialized fields, including the
169  * associated srcu_node and srcu_data structures.  The is_static
170  * parameter is passed through to init_srcu_struct_nodes(), and
171  * also tells us that ->sda has already been wired up to srcu_data.
172  */
173 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
174 {
175 	mutex_init(&sp->srcu_cb_mutex);
176 	mutex_init(&sp->srcu_gp_mutex);
177 	sp->srcu_idx = 0;
178 	sp->srcu_gp_seq = 0;
179 	sp->srcu_barrier_seq = 0;
180 	mutex_init(&sp->srcu_barrier_mutex);
181 	atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
182 	INIT_DELAYED_WORK(&sp->work, process_srcu);
183 	if (!is_static)
184 		sp->sda = alloc_percpu(struct srcu_data);
185 	init_srcu_struct_nodes(sp, is_static);
186 	sp->srcu_gp_seq_needed_exp = 0;
187 	sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
188 	smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
189 	return sp->sda ? 0 : -ENOMEM;
190 }
191 
192 #ifdef CONFIG_DEBUG_LOCK_ALLOC
193 
194 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
195 		       struct lock_class_key *key)
196 {
197 	/* Don't re-initialize a lock while it is held. */
198 	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
199 	lockdep_init_map(&sp->dep_map, name, key, 0);
200 	spin_lock_init(&ACCESS_PRIVATE(sp, lock));
201 	return init_srcu_struct_fields(sp, false);
202 }
203 EXPORT_SYMBOL_GPL(__init_srcu_struct);
204 
205 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
206 
207 /**
208  * init_srcu_struct - initialize a sleep-RCU structure
209  * @sp: structure to initialize.
210  *
211  * Must invoke this on a given srcu_struct before passing that srcu_struct
212  * to any other function.  Each srcu_struct represents a separate domain
213  * of SRCU protection.
214  */
215 int init_srcu_struct(struct srcu_struct *sp)
216 {
217 	spin_lock_init(&ACCESS_PRIVATE(sp, lock));
218 	return init_srcu_struct_fields(sp, false);
219 }
220 EXPORT_SYMBOL_GPL(init_srcu_struct);
221 
222 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
223 
224 /*
225  * First-use initialization of statically allocated srcu_struct
226  * structure.  Wiring up the combining tree is more than can be
227  * done with compile-time initialization, so this check is added
228  * to each update-side SRCU primitive.  Use sp->lock, which -is-
229  * compile-time initialized, to resolve races involving multiple
230  * CPUs trying to garner first-use privileges.
231  */
232 static void check_init_srcu_struct(struct srcu_struct *sp)
233 {
234 	unsigned long flags;
235 
236 	WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
237 	/* The smp_load_acquire() pairs with the smp_store_release(). */
238 	if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
239 		return; /* Already initialized. */
240 	spin_lock_irqsave_rcu_node(sp, flags);
241 	if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
242 		spin_unlock_irqrestore_rcu_node(sp, flags);
243 		return;
244 	}
245 	init_srcu_struct_fields(sp, true);
246 	spin_unlock_irqrestore_rcu_node(sp, flags);
247 }
248 
249 /*
250  * Returns approximate total of the readers' ->srcu_lock_count[] values
251  * for the rank of per-CPU counters specified by idx.
252  */
253 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
254 {
255 	int cpu;
256 	unsigned long sum = 0;
257 
258 	for_each_possible_cpu(cpu) {
259 		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
260 
261 		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
262 	}
263 	return sum;
264 }
265 
266 /*
267  * Returns approximate total of the readers' ->srcu_unlock_count[] values
268  * for the rank of per-CPU counters specified by idx.
269  */
270 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
271 {
272 	int cpu;
273 	unsigned long sum = 0;
274 
275 	for_each_possible_cpu(cpu) {
276 		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
277 
278 		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
279 	}
280 	return sum;
281 }
282 
283 /*
284  * Return true if the number of pre-existing readers is determined to
285  * be zero.
286  */
287 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
288 {
289 	unsigned long unlocks;
290 
291 	unlocks = srcu_readers_unlock_idx(sp, idx);
292 
293 	/*
294 	 * Make sure that a lock is always counted if the corresponding
295 	 * unlock is counted. Needs to be a smp_mb() as the read side may
296 	 * contain a read from a variable that is written to before the
297 	 * synchronize_srcu() in the write side. In this case smp_mb()s
298 	 * A and B act like the store buffering pattern.
299 	 *
300 	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
301 	 * after the synchronize_srcu() from being executed before the
302 	 * grace period ends.
303 	 */
304 	smp_mb(); /* A */
305 
306 	/*
307 	 * If the locks are the same as the unlocks, then there must have
308 	 * been no readers on this index at some time in between. This does
309 	 * not mean that there are no more readers, as one could have read
310 	 * the current index but not have incremented the lock counter yet.
311 	 *
312 	 * So suppose that the updater is preempted here for so long
313 	 * that more than ULONG_MAX non-nested readers come and go in
314 	 * the meantime.  It turns out that this cannot result in overflow
315 	 * because if a reader modifies its unlock count after we read it
316 	 * above, then that reader's next load of ->srcu_idx is guaranteed
317 	 * to get the new value, which will cause it to operate on the
318 	 * other bank of counters, where it cannot contribute to the
319 	 * overflow of these counters.  This means that there is a maximum
320 	 * of 2*NR_CPUS increments, which cannot overflow given current
321 	 * systems, especially not on 64-bit systems.
322 	 *
323 	 * OK, how about nesting?  This does impose a limit on nesting
324 	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
325 	 * especially on 64-bit systems.
326 	 */
327 	return srcu_readers_lock_idx(sp, idx) == unlocks;
328 }
329 
330 /**
331  * srcu_readers_active - returns true if there are readers. and false
332  *                       otherwise
333  * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
334  *
335  * Note that this is not an atomic primitive, and can therefore suffer
336  * severe errors when invoked on an active srcu_struct.  That said, it
337  * can be useful as an error check at cleanup time.
338  */
339 static bool srcu_readers_active(struct srcu_struct *sp)
340 {
341 	int cpu;
342 	unsigned long sum = 0;
343 
344 	for_each_possible_cpu(cpu) {
345 		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
346 
347 		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
348 		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
349 		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
350 		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
351 	}
352 	return sum;
353 }
354 
355 #define SRCU_INTERVAL		1
356 
357 /*
358  * Return grace-period delay, zero if there are expedited grace
359  * periods pending, SRCU_INTERVAL otherwise.
360  */
361 static unsigned long srcu_get_delay(struct srcu_struct *sp)
362 {
363 	if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
364 			 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
365 		return 0;
366 	return SRCU_INTERVAL;
367 }
368 
369 /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
370 void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
371 {
372 	int cpu;
373 
374 	if (WARN_ON(!srcu_get_delay(sp)))
375 		return; /* Just leak it! */
376 	if (WARN_ON(srcu_readers_active(sp)))
377 		return; /* Just leak it! */
378 	if (quiesced) {
379 		if (WARN_ON(delayed_work_pending(&sp->work)))
380 			return; /* Just leak it! */
381 	} else {
382 		flush_delayed_work(&sp->work);
383 	}
384 	for_each_possible_cpu(cpu)
385 		if (quiesced) {
386 			if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
387 				return; /* Just leak it! */
388 		} else {
389 			flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
390 		}
391 	if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
392 	    WARN_ON(srcu_readers_active(sp))) {
393 		pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
394 		return; /* Caller forgot to stop doing call_srcu()? */
395 	}
396 	free_percpu(sp->sda);
397 	sp->sda = NULL;
398 }
399 EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
400 
401 /*
402  * Counts the new reader in the appropriate per-CPU element of the
403  * srcu_struct.
404  * Returns an index that must be passed to the matching srcu_read_unlock().
405  */
406 int __srcu_read_lock(struct srcu_struct *sp)
407 {
408 	int idx;
409 
410 	idx = READ_ONCE(sp->srcu_idx) & 0x1;
411 	this_cpu_inc(sp->sda->srcu_lock_count[idx]);
412 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
413 	return idx;
414 }
415 EXPORT_SYMBOL_GPL(__srcu_read_lock);
416 
417 /*
418  * Removes the count for the old reader from the appropriate per-CPU
419  * element of the srcu_struct.  Note that this may well be a different
420  * CPU than that which was incremented by the corresponding srcu_read_lock().
421  */
422 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
423 {
424 	smp_mb(); /* C */  /* Avoid leaking the critical section. */
425 	this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
426 }
427 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
428 
429 /*
430  * We use an adaptive strategy for synchronize_srcu() and especially for
431  * synchronize_srcu_expedited().  We spin for a fixed time period
432  * (defined below) to allow SRCU readers to exit their read-side critical
433  * sections.  If there are still some readers after a few microseconds,
434  * we repeatedly block for 1-millisecond time periods.
435  */
436 #define SRCU_RETRY_CHECK_DELAY		5
437 
438 /*
439  * Start an SRCU grace period.
440  */
441 static void srcu_gp_start(struct srcu_struct *sp)
442 {
443 	struct srcu_data *sdp = this_cpu_ptr(sp->sda);
444 	int state;
445 
446 	lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
447 	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
448 	rcu_segcblist_advance(&sdp->srcu_cblist,
449 			      rcu_seq_current(&sp->srcu_gp_seq));
450 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
451 				       rcu_seq_snap(&sp->srcu_gp_seq));
452 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
453 	rcu_seq_start(&sp->srcu_gp_seq);
454 	state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
455 	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
456 }
457 
458 /*
459  * Track online CPUs to guide callback workqueue placement.
460  */
461 DEFINE_PER_CPU(bool, srcu_online);
462 
463 void srcu_online_cpu(unsigned int cpu)
464 {
465 	WRITE_ONCE(per_cpu(srcu_online, cpu), true);
466 }
467 
468 void srcu_offline_cpu(unsigned int cpu)
469 {
470 	WRITE_ONCE(per_cpu(srcu_online, cpu), false);
471 }
472 
473 /*
474  * Place the workqueue handler on the specified CPU if online, otherwise
475  * just run it whereever.  This is useful for placing workqueue handlers
476  * that are to invoke the specified CPU's callbacks.
477  */
478 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
479 				       struct delayed_work *dwork,
480 				       unsigned long delay)
481 {
482 	bool ret;
483 
484 	preempt_disable();
485 	if (READ_ONCE(per_cpu(srcu_online, cpu)))
486 		ret = queue_delayed_work_on(cpu, wq, dwork, delay);
487 	else
488 		ret = queue_delayed_work(wq, dwork, delay);
489 	preempt_enable();
490 	return ret;
491 }
492 
493 /*
494  * Schedule callback invocation for the specified srcu_data structure,
495  * if possible, on the corresponding CPU.
496  */
497 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
498 {
499 	srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
500 }
501 
502 /*
503  * Schedule callback invocation for all srcu_data structures associated
504  * with the specified srcu_node structure that have callbacks for the
505  * just-completed grace period, the one corresponding to idx.  If possible,
506  * schedule this invocation on the corresponding CPUs.
507  */
508 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
509 				  unsigned long mask, unsigned long delay)
510 {
511 	int cpu;
512 
513 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
514 		if (!(mask & (1 << (cpu - snp->grplo))))
515 			continue;
516 		srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
517 	}
518 }
519 
520 /*
521  * Note the end of an SRCU grace period.  Initiates callback invocation
522  * and starts a new grace period if needed.
523  *
524  * The ->srcu_cb_mutex acquisition does not protect any data, but
525  * instead prevents more than one grace period from starting while we
526  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
527  * array to have a finite number of elements.
528  */
529 static void srcu_gp_end(struct srcu_struct *sp)
530 {
531 	unsigned long cbdelay;
532 	bool cbs;
533 	bool last_lvl;
534 	int cpu;
535 	unsigned long flags;
536 	unsigned long gpseq;
537 	int idx;
538 	unsigned long mask;
539 	struct srcu_data *sdp;
540 	struct srcu_node *snp;
541 
542 	/* Prevent more than one additional grace period. */
543 	mutex_lock(&sp->srcu_cb_mutex);
544 
545 	/* End the current grace period. */
546 	spin_lock_irq_rcu_node(sp);
547 	idx = rcu_seq_state(sp->srcu_gp_seq);
548 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
549 	cbdelay = srcu_get_delay(sp);
550 	sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
551 	rcu_seq_end(&sp->srcu_gp_seq);
552 	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
553 	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
554 		sp->srcu_gp_seq_needed_exp = gpseq;
555 	spin_unlock_irq_rcu_node(sp);
556 	mutex_unlock(&sp->srcu_gp_mutex);
557 	/* A new grace period can start at this point.  But only one. */
558 
559 	/* Initiate callback invocation as needed. */
560 	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
561 	rcu_for_each_node_breadth_first(sp, snp) {
562 		spin_lock_irq_rcu_node(snp);
563 		cbs = false;
564 		last_lvl = snp >= sp->level[rcu_num_lvls - 1];
565 		if (last_lvl)
566 			cbs = snp->srcu_have_cbs[idx] == gpseq;
567 		snp->srcu_have_cbs[idx] = gpseq;
568 		rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
569 		if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
570 			snp->srcu_gp_seq_needed_exp = gpseq;
571 		mask = snp->srcu_data_have_cbs[idx];
572 		snp->srcu_data_have_cbs[idx] = 0;
573 		spin_unlock_irq_rcu_node(snp);
574 		if (cbs)
575 			srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
576 
577 		/* Occasionally prevent srcu_data counter wrap. */
578 		if (!(gpseq & counter_wrap_check) && last_lvl)
579 			for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
580 				sdp = per_cpu_ptr(sp->sda, cpu);
581 				spin_lock_irqsave_rcu_node(sdp, flags);
582 				if (ULONG_CMP_GE(gpseq,
583 						 sdp->srcu_gp_seq_needed + 100))
584 					sdp->srcu_gp_seq_needed = gpseq;
585 				if (ULONG_CMP_GE(gpseq,
586 						 sdp->srcu_gp_seq_needed_exp + 100))
587 					sdp->srcu_gp_seq_needed_exp = gpseq;
588 				spin_unlock_irqrestore_rcu_node(sdp, flags);
589 			}
590 	}
591 
592 	/* Callback initiation done, allow grace periods after next. */
593 	mutex_unlock(&sp->srcu_cb_mutex);
594 
595 	/* Start a new grace period if needed. */
596 	spin_lock_irq_rcu_node(sp);
597 	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
598 	if (!rcu_seq_state(gpseq) &&
599 	    ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
600 		srcu_gp_start(sp);
601 		spin_unlock_irq_rcu_node(sp);
602 		srcu_reschedule(sp, 0);
603 	} else {
604 		spin_unlock_irq_rcu_node(sp);
605 	}
606 }
607 
608 /*
609  * Funnel-locking scheme to scalably mediate many concurrent expedited
610  * grace-period requests.  This function is invoked for the first known
611  * expedited request for a grace period that has already been requested,
612  * but without expediting.  To start a completely new grace period,
613  * whether expedited or not, use srcu_funnel_gp_start() instead.
614  */
615 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
616 				  unsigned long s)
617 {
618 	unsigned long flags;
619 
620 	for (; snp != NULL; snp = snp->srcu_parent) {
621 		if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
622 		    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
623 			return;
624 		spin_lock_irqsave_rcu_node(snp, flags);
625 		if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
626 			spin_unlock_irqrestore_rcu_node(snp, flags);
627 			return;
628 		}
629 		WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
630 		spin_unlock_irqrestore_rcu_node(snp, flags);
631 	}
632 	spin_lock_irqsave_rcu_node(sp, flags);
633 	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
634 		sp->srcu_gp_seq_needed_exp = s;
635 	spin_unlock_irqrestore_rcu_node(sp, flags);
636 }
637 
638 /*
639  * Funnel-locking scheme to scalably mediate many concurrent grace-period
640  * requests.  The winner has to do the work of actually starting grace
641  * period s.  Losers must either ensure that their desired grace-period
642  * number is recorded on at least their leaf srcu_node structure, or they
643  * must take steps to invoke their own callbacks.
644  */
645 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
646 				 unsigned long s, bool do_norm)
647 {
648 	unsigned long flags;
649 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
650 	struct srcu_node *snp = sdp->mynode;
651 	unsigned long snp_seq;
652 
653 	/* Each pass through the loop does one level of the srcu_node tree. */
654 	for (; snp != NULL; snp = snp->srcu_parent) {
655 		if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
656 			return; /* GP already done and CBs recorded. */
657 		spin_lock_irqsave_rcu_node(snp, flags);
658 		if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
659 			snp_seq = snp->srcu_have_cbs[idx];
660 			if (snp == sdp->mynode && snp_seq == s)
661 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
662 			spin_unlock_irqrestore_rcu_node(snp, flags);
663 			if (snp == sdp->mynode && snp_seq != s) {
664 				srcu_schedule_cbs_sdp(sdp, do_norm
665 							   ? SRCU_INTERVAL
666 							   : 0);
667 				return;
668 			}
669 			if (!do_norm)
670 				srcu_funnel_exp_start(sp, snp, s);
671 			return;
672 		}
673 		snp->srcu_have_cbs[idx] = s;
674 		if (snp == sdp->mynode)
675 			snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
676 		if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
677 			snp->srcu_gp_seq_needed_exp = s;
678 		spin_unlock_irqrestore_rcu_node(snp, flags);
679 	}
680 
681 	/* Top of tree, must ensure the grace period will be started. */
682 	spin_lock_irqsave_rcu_node(sp, flags);
683 	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
684 		/*
685 		 * Record need for grace period s.  Pair with load
686 		 * acquire setting up for initialization.
687 		 */
688 		smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
689 	}
690 	if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
691 		sp->srcu_gp_seq_needed_exp = s;
692 
693 	/* If grace period not already done and none in progress, start it. */
694 	if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
695 	    rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
696 		WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
697 		srcu_gp_start(sp);
698 		queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
699 	}
700 	spin_unlock_irqrestore_rcu_node(sp, flags);
701 }
702 
703 /*
704  * Wait until all readers counted by array index idx complete, but
705  * loop an additional time if there is an expedited grace period pending.
706  * The caller must ensure that ->srcu_idx is not changed while checking.
707  */
708 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
709 {
710 	for (;;) {
711 		if (srcu_readers_active_idx_check(sp, idx))
712 			return true;
713 		if (--trycount + !srcu_get_delay(sp) <= 0)
714 			return false;
715 		udelay(SRCU_RETRY_CHECK_DELAY);
716 	}
717 }
718 
719 /*
720  * Increment the ->srcu_idx counter so that future SRCU readers will
721  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
722  * us to wait for pre-existing readers in a starvation-free manner.
723  */
724 static void srcu_flip(struct srcu_struct *sp)
725 {
726 	/*
727 	 * Ensure that if this updater saw a given reader's increment
728 	 * from __srcu_read_lock(), that reader was using an old value
729 	 * of ->srcu_idx.  Also ensure that if a given reader sees the
730 	 * new value of ->srcu_idx, this updater's earlier scans cannot
731 	 * have seen that reader's increments (which is OK, because this
732 	 * grace period need not wait on that reader).
733 	 */
734 	smp_mb(); /* E */  /* Pairs with B and C. */
735 
736 	WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
737 
738 	/*
739 	 * Ensure that if the updater misses an __srcu_read_unlock()
740 	 * increment, that task's next __srcu_read_lock() will see the
741 	 * above counter update.  Note that both this memory barrier
742 	 * and the one in srcu_readers_active_idx_check() provide the
743 	 * guarantee for __srcu_read_lock().
744 	 */
745 	smp_mb(); /* D */  /* Pairs with C. */
746 }
747 
748 /*
749  * If SRCU is likely idle, return true, otherwise return false.
750  *
751  * Note that it is OK for several current from-idle requests for a new
752  * grace period from idle to specify expediting because they will all end
753  * up requesting the same grace period anyhow.  So no loss.
754  *
755  * Note also that if any CPU (including the current one) is still invoking
756  * callbacks, this function will nevertheless say "idle".  This is not
757  * ideal, but the overhead of checking all CPUs' callback lists is even
758  * less ideal, especially on large systems.  Furthermore, the wakeup
759  * can happen before the callback is fully removed, so we have no choice
760  * but to accept this type of error.
761  *
762  * This function is also subject to counter-wrap errors, but let's face
763  * it, if this function was preempted for enough time for the counters
764  * to wrap, it really doesn't matter whether or not we expedite the grace
765  * period.  The extra overhead of a needlessly expedited grace period is
766  * negligible when amoritized over that time period, and the extra latency
767  * of a needlessly non-expedited grace period is similarly negligible.
768  */
769 static bool srcu_might_be_idle(struct srcu_struct *sp)
770 {
771 	unsigned long curseq;
772 	unsigned long flags;
773 	struct srcu_data *sdp;
774 	unsigned long t;
775 
776 	/* If the local srcu_data structure has callbacks, not idle.  */
777 	local_irq_save(flags);
778 	sdp = this_cpu_ptr(sp->sda);
779 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
780 		local_irq_restore(flags);
781 		return false; /* Callbacks already present, so not idle. */
782 	}
783 	local_irq_restore(flags);
784 
785 	/*
786 	 * No local callbacks, so probabalistically probe global state.
787 	 * Exact information would require acquiring locks, which would
788 	 * kill scalability, hence the probabalistic nature of the probe.
789 	 */
790 
791 	/* First, see if enough time has passed since the last GP. */
792 	t = ktime_get_mono_fast_ns();
793 	if (exp_holdoff == 0 ||
794 	    time_in_range_open(t, sp->srcu_last_gp_end,
795 			       sp->srcu_last_gp_end + exp_holdoff))
796 		return false; /* Too soon after last GP. */
797 
798 	/* Next, check for probable idleness. */
799 	curseq = rcu_seq_current(&sp->srcu_gp_seq);
800 	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
801 	if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
802 		return false; /* Grace period in progress, so not idle. */
803 	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
804 	if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
805 		return false; /* GP # changed, so not idle. */
806 	return true; /* With reasonable probability, idle! */
807 }
808 
809 /*
810  * SRCU callback function to leak a callback.
811  */
812 static void srcu_leak_callback(struct rcu_head *rhp)
813 {
814 }
815 
816 /*
817  * Enqueue an SRCU callback on the srcu_data structure associated with
818  * the current CPU and the specified srcu_struct structure, initiating
819  * grace-period processing if it is not already running.
820  *
821  * Note that all CPUs must agree that the grace period extended beyond
822  * all pre-existing SRCU read-side critical section.  On systems with
823  * more than one CPU, this means that when "func()" is invoked, each CPU
824  * is guaranteed to have executed a full memory barrier since the end of
825  * its last corresponding SRCU read-side critical section whose beginning
826  * preceded the call to call_rcu().  It also means that each CPU executing
827  * an SRCU read-side critical section that continues beyond the start of
828  * "func()" must have executed a memory barrier after the call_rcu()
829  * but before the beginning of that SRCU read-side critical section.
830  * Note that these guarantees include CPUs that are offline, idle, or
831  * executing in user mode, as well as CPUs that are executing in the kernel.
832  *
833  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
834  * resulting SRCU callback function "func()", then both CPU A and CPU
835  * B are guaranteed to execute a full memory barrier during the time
836  * interval between the call to call_rcu() and the invocation of "func()".
837  * This guarantee applies even if CPU A and CPU B are the same CPU (but
838  * again only if the system has more than one CPU).
839  *
840  * Of course, these guarantees apply only for invocations of call_srcu(),
841  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
842  * srcu_struct structure.
843  */
844 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
845 		 rcu_callback_t func, bool do_norm)
846 {
847 	unsigned long flags;
848 	bool needexp = false;
849 	bool needgp = false;
850 	unsigned long s;
851 	struct srcu_data *sdp;
852 
853 	check_init_srcu_struct(sp);
854 	if (debug_rcu_head_queue(rhp)) {
855 		/* Probable double call_srcu(), so leak the callback. */
856 		WRITE_ONCE(rhp->func, srcu_leak_callback);
857 		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
858 		return;
859 	}
860 	rhp->func = func;
861 	local_irq_save(flags);
862 	sdp = this_cpu_ptr(sp->sda);
863 	spin_lock_rcu_node(sdp);
864 	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
865 	rcu_segcblist_advance(&sdp->srcu_cblist,
866 			      rcu_seq_current(&sp->srcu_gp_seq));
867 	s = rcu_seq_snap(&sp->srcu_gp_seq);
868 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
869 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
870 		sdp->srcu_gp_seq_needed = s;
871 		needgp = true;
872 	}
873 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
874 		sdp->srcu_gp_seq_needed_exp = s;
875 		needexp = true;
876 	}
877 	spin_unlock_irqrestore_rcu_node(sdp, flags);
878 	if (needgp)
879 		srcu_funnel_gp_start(sp, sdp, s, do_norm);
880 	else if (needexp)
881 		srcu_funnel_exp_start(sp, sdp->mynode, s);
882 }
883 
884 /**
885  * call_srcu() - Queue a callback for invocation after an SRCU grace period
886  * @sp: srcu_struct in queue the callback
887  * @rhp: structure to be used for queueing the SRCU callback.
888  * @func: function to be invoked after the SRCU grace period
889  *
890  * The callback function will be invoked some time after a full SRCU
891  * grace period elapses, in other words after all pre-existing SRCU
892  * read-side critical sections have completed.  However, the callback
893  * function might well execute concurrently with other SRCU read-side
894  * critical sections that started after call_srcu() was invoked.  SRCU
895  * read-side critical sections are delimited by srcu_read_lock() and
896  * srcu_read_unlock(), and may be nested.
897  *
898  * The callback will be invoked from process context, but must nevertheless
899  * be fast and must not block.
900  */
901 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
902 	       rcu_callback_t func)
903 {
904 	__call_srcu(sp, rhp, func, true);
905 }
906 EXPORT_SYMBOL_GPL(call_srcu);
907 
908 /*
909  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
910  */
911 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
912 {
913 	struct rcu_synchronize rcu;
914 
915 	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
916 			 lock_is_held(&rcu_bh_lock_map) ||
917 			 lock_is_held(&rcu_lock_map) ||
918 			 lock_is_held(&rcu_sched_lock_map),
919 			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
920 
921 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
922 		return;
923 	might_sleep();
924 	check_init_srcu_struct(sp);
925 	init_completion(&rcu.completion);
926 	init_rcu_head_on_stack(&rcu.head);
927 	__call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
928 	wait_for_completion(&rcu.completion);
929 	destroy_rcu_head_on_stack(&rcu.head);
930 
931 	/*
932 	 * Make sure that later code is ordered after the SRCU grace
933 	 * period.  This pairs with the spin_lock_irq_rcu_node()
934 	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
935 	 * because the current CPU might have been totally uninvolved with
936 	 * (and thus unordered against) that grace period.
937 	 */
938 	smp_mb();
939 }
940 
941 /**
942  * synchronize_srcu_expedited - Brute-force SRCU grace period
943  * @sp: srcu_struct with which to synchronize.
944  *
945  * Wait for an SRCU grace period to elapse, but be more aggressive about
946  * spinning rather than blocking when waiting.
947  *
948  * Note that synchronize_srcu_expedited() has the same deadlock and
949  * memory-ordering properties as does synchronize_srcu().
950  */
951 void synchronize_srcu_expedited(struct srcu_struct *sp)
952 {
953 	__synchronize_srcu(sp, rcu_gp_is_normal());
954 }
955 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
956 
957 /**
958  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
959  * @sp: srcu_struct with which to synchronize.
960  *
961  * Wait for the count to drain to zero of both indexes. To avoid the
962  * possible starvation of synchronize_srcu(), it waits for the count of
963  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
964  * and then flip the srcu_idx and wait for the count of the other index.
965  *
966  * Can block; must be called from process context.
967  *
968  * Note that it is illegal to call synchronize_srcu() from the corresponding
969  * SRCU read-side critical section; doing so will result in deadlock.
970  * However, it is perfectly legal to call synchronize_srcu() on one
971  * srcu_struct from some other srcu_struct's read-side critical section,
972  * as long as the resulting graph of srcu_structs is acyclic.
973  *
974  * There are memory-ordering constraints implied by synchronize_srcu().
975  * On systems with more than one CPU, when synchronize_srcu() returns,
976  * each CPU is guaranteed to have executed a full memory barrier since
977  * the end of its last corresponding SRCU-sched read-side critical section
978  * whose beginning preceded the call to synchronize_srcu().  In addition,
979  * each CPU having an SRCU read-side critical section that extends beyond
980  * the return from synchronize_srcu() is guaranteed to have executed a
981  * full memory barrier after the beginning of synchronize_srcu() and before
982  * the beginning of that SRCU read-side critical section.  Note that these
983  * guarantees include CPUs that are offline, idle, or executing in user mode,
984  * as well as CPUs that are executing in the kernel.
985  *
986  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
987  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
988  * to have executed a full memory barrier during the execution of
989  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
990  * are the same CPU, but again only if the system has more than one CPU.
991  *
992  * Of course, these memory-ordering guarantees apply only when
993  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
994  * passed the same srcu_struct structure.
995  *
996  * If SRCU is likely idle, expedite the first request.  This semantic
997  * was provided by Classic SRCU, and is relied upon by its users, so TREE
998  * SRCU must also provide it.  Note that detecting idleness is heuristic
999  * and subject to both false positives and negatives.
1000  */
1001 void synchronize_srcu(struct srcu_struct *sp)
1002 {
1003 	if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
1004 		synchronize_srcu_expedited(sp);
1005 	else
1006 		__synchronize_srcu(sp, true);
1007 }
1008 EXPORT_SYMBOL_GPL(synchronize_srcu);
1009 
1010 /*
1011  * Callback function for srcu_barrier() use.
1012  */
1013 static void srcu_barrier_cb(struct rcu_head *rhp)
1014 {
1015 	struct srcu_data *sdp;
1016 	struct srcu_struct *sp;
1017 
1018 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1019 	sp = sdp->sp;
1020 	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1021 		complete(&sp->srcu_barrier_completion);
1022 }
1023 
1024 /**
1025  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1026  * @sp: srcu_struct on which to wait for in-flight callbacks.
1027  */
1028 void srcu_barrier(struct srcu_struct *sp)
1029 {
1030 	int cpu;
1031 	struct srcu_data *sdp;
1032 	unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1033 
1034 	check_init_srcu_struct(sp);
1035 	mutex_lock(&sp->srcu_barrier_mutex);
1036 	if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1037 		smp_mb(); /* Force ordering following return. */
1038 		mutex_unlock(&sp->srcu_barrier_mutex);
1039 		return; /* Someone else did our work for us. */
1040 	}
1041 	rcu_seq_start(&sp->srcu_barrier_seq);
1042 	init_completion(&sp->srcu_barrier_completion);
1043 
1044 	/* Initial count prevents reaching zero until all CBs are posted. */
1045 	atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1046 
1047 	/*
1048 	 * Each pass through this loop enqueues a callback, but only
1049 	 * on CPUs already having callbacks enqueued.  Note that if
1050 	 * a CPU already has callbacks enqueue, it must have already
1051 	 * registered the need for a future grace period, so all we
1052 	 * need do is enqueue a callback that will use the same
1053 	 * grace period as the last callback already in the queue.
1054 	 */
1055 	for_each_possible_cpu(cpu) {
1056 		sdp = per_cpu_ptr(sp->sda, cpu);
1057 		spin_lock_irq_rcu_node(sdp);
1058 		atomic_inc(&sp->srcu_barrier_cpu_cnt);
1059 		sdp->srcu_barrier_head.func = srcu_barrier_cb;
1060 		debug_rcu_head_queue(&sdp->srcu_barrier_head);
1061 		if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1062 					   &sdp->srcu_barrier_head, 0)) {
1063 			debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1064 			atomic_dec(&sp->srcu_barrier_cpu_cnt);
1065 		}
1066 		spin_unlock_irq_rcu_node(sdp);
1067 	}
1068 
1069 	/* Remove the initial count, at which point reaching zero can happen. */
1070 	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1071 		complete(&sp->srcu_barrier_completion);
1072 	wait_for_completion(&sp->srcu_barrier_completion);
1073 
1074 	rcu_seq_end(&sp->srcu_barrier_seq);
1075 	mutex_unlock(&sp->srcu_barrier_mutex);
1076 }
1077 EXPORT_SYMBOL_GPL(srcu_barrier);
1078 
1079 /**
1080  * srcu_batches_completed - return batches completed.
1081  * @sp: srcu_struct on which to report batch completion.
1082  *
1083  * Report the number of batches, correlated with, but not necessarily
1084  * precisely the same as, the number of grace periods that have elapsed.
1085  */
1086 unsigned long srcu_batches_completed(struct srcu_struct *sp)
1087 {
1088 	return sp->srcu_idx;
1089 }
1090 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1091 
1092 /*
1093  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1094  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1095  * completed in that state.
1096  */
1097 static void srcu_advance_state(struct srcu_struct *sp)
1098 {
1099 	int idx;
1100 
1101 	mutex_lock(&sp->srcu_gp_mutex);
1102 
1103 	/*
1104 	 * Because readers might be delayed for an extended period after
1105 	 * fetching ->srcu_idx for their index, at any point in time there
1106 	 * might well be readers using both idx=0 and idx=1.  We therefore
1107 	 * need to wait for readers to clear from both index values before
1108 	 * invoking a callback.
1109 	 *
1110 	 * The load-acquire ensures that we see the accesses performed
1111 	 * by the prior grace period.
1112 	 */
1113 	idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1114 	if (idx == SRCU_STATE_IDLE) {
1115 		spin_lock_irq_rcu_node(sp);
1116 		if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1117 			WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1118 			spin_unlock_irq_rcu_node(sp);
1119 			mutex_unlock(&sp->srcu_gp_mutex);
1120 			return;
1121 		}
1122 		idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1123 		if (idx == SRCU_STATE_IDLE)
1124 			srcu_gp_start(sp);
1125 		spin_unlock_irq_rcu_node(sp);
1126 		if (idx != SRCU_STATE_IDLE) {
1127 			mutex_unlock(&sp->srcu_gp_mutex);
1128 			return; /* Someone else started the grace period. */
1129 		}
1130 	}
1131 
1132 	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1133 		idx = 1 ^ (sp->srcu_idx & 1);
1134 		if (!try_check_zero(sp, idx, 1)) {
1135 			mutex_unlock(&sp->srcu_gp_mutex);
1136 			return; /* readers present, retry later. */
1137 		}
1138 		srcu_flip(sp);
1139 		rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1140 	}
1141 
1142 	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1143 
1144 		/*
1145 		 * SRCU read-side critical sections are normally short,
1146 		 * so check at least twice in quick succession after a flip.
1147 		 */
1148 		idx = 1 ^ (sp->srcu_idx & 1);
1149 		if (!try_check_zero(sp, idx, 2)) {
1150 			mutex_unlock(&sp->srcu_gp_mutex);
1151 			return; /* readers present, retry later. */
1152 		}
1153 		srcu_gp_end(sp);  /* Releases ->srcu_gp_mutex. */
1154 	}
1155 }
1156 
1157 /*
1158  * Invoke a limited number of SRCU callbacks that have passed through
1159  * their grace period.  If there are more to do, SRCU will reschedule
1160  * the workqueue.  Note that needed memory barriers have been executed
1161  * in this task's context by srcu_readers_active_idx_check().
1162  */
1163 static void srcu_invoke_callbacks(struct work_struct *work)
1164 {
1165 	bool more;
1166 	struct rcu_cblist ready_cbs;
1167 	struct rcu_head *rhp;
1168 	struct srcu_data *sdp;
1169 	struct srcu_struct *sp;
1170 
1171 	sdp = container_of(work, struct srcu_data, work.work);
1172 	sp = sdp->sp;
1173 	rcu_cblist_init(&ready_cbs);
1174 	spin_lock_irq_rcu_node(sdp);
1175 	rcu_segcblist_advance(&sdp->srcu_cblist,
1176 			      rcu_seq_current(&sp->srcu_gp_seq));
1177 	if (sdp->srcu_cblist_invoking ||
1178 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1179 		spin_unlock_irq_rcu_node(sdp);
1180 		return;  /* Someone else on the job or nothing to do. */
1181 	}
1182 
1183 	/* We are on the job!  Extract and invoke ready callbacks. */
1184 	sdp->srcu_cblist_invoking = true;
1185 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1186 	spin_unlock_irq_rcu_node(sdp);
1187 	rhp = rcu_cblist_dequeue(&ready_cbs);
1188 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1189 		debug_rcu_head_unqueue(rhp);
1190 		local_bh_disable();
1191 		rhp->func(rhp);
1192 		local_bh_enable();
1193 	}
1194 
1195 	/*
1196 	 * Update counts, accelerate new callbacks, and if needed,
1197 	 * schedule another round of callback invocation.
1198 	 */
1199 	spin_lock_irq_rcu_node(sdp);
1200 	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1201 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1202 				       rcu_seq_snap(&sp->srcu_gp_seq));
1203 	sdp->srcu_cblist_invoking = false;
1204 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1205 	spin_unlock_irq_rcu_node(sdp);
1206 	if (more)
1207 		srcu_schedule_cbs_sdp(sdp, 0);
1208 }
1209 
1210 /*
1211  * Finished one round of SRCU grace period.  Start another if there are
1212  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1213  */
1214 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1215 {
1216 	bool pushgp = true;
1217 
1218 	spin_lock_irq_rcu_node(sp);
1219 	if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1220 		if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1221 			/* All requests fulfilled, time to go idle. */
1222 			pushgp = false;
1223 		}
1224 	} else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1225 		/* Outstanding request and no GP.  Start one. */
1226 		srcu_gp_start(sp);
1227 	}
1228 	spin_unlock_irq_rcu_node(sp);
1229 
1230 	if (pushgp)
1231 		queue_delayed_work(rcu_gp_wq, &sp->work, delay);
1232 }
1233 
1234 /*
1235  * This is the work-queue function that handles SRCU grace periods.
1236  */
1237 static void process_srcu(struct work_struct *work)
1238 {
1239 	struct srcu_struct *sp;
1240 
1241 	sp = container_of(work, struct srcu_struct, work.work);
1242 
1243 	srcu_advance_state(sp);
1244 	srcu_reschedule(sp, srcu_get_delay(sp));
1245 }
1246 
1247 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1248 			     struct srcu_struct *sp, int *flags,
1249 			     unsigned long *gpnum, unsigned long *completed)
1250 {
1251 	if (test_type != SRCU_FLAVOR)
1252 		return;
1253 	*flags = 0;
1254 	*completed = rcu_seq_ctr(sp->srcu_gp_seq);
1255 	*gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
1256 }
1257 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1258 
1259 void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1260 {
1261 	int cpu;
1262 	int idx;
1263 	unsigned long s0 = 0, s1 = 0;
1264 
1265 	idx = sp->srcu_idx & 0x1;
1266 	pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
1267 	for_each_possible_cpu(cpu) {
1268 		unsigned long l0, l1;
1269 		unsigned long u0, u1;
1270 		long c0, c1;
1271 		struct srcu_data *counts;
1272 
1273 		counts = per_cpu_ptr(sp->sda, cpu);
1274 		u0 = counts->srcu_unlock_count[!idx];
1275 		u1 = counts->srcu_unlock_count[idx];
1276 
1277 		/*
1278 		 * Make sure that a lock is always counted if the corresponding
1279 		 * unlock is counted.
1280 		 */
1281 		smp_rmb();
1282 
1283 		l0 = counts->srcu_lock_count[!idx];
1284 		l1 = counts->srcu_lock_count[idx];
1285 
1286 		c0 = l0 - u0;
1287 		c1 = l1 - u1;
1288 		pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
1289 		s0 += c0;
1290 		s1 += c1;
1291 	}
1292 	pr_cont(" T(%ld,%ld)\n", s0, s1);
1293 }
1294 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1295 
1296 static int __init srcu_bootup_announce(void)
1297 {
1298 	pr_info("Hierarchical SRCU implementation.\n");
1299 	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1300 		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1301 	return 0;
1302 }
1303 early_initcall(srcu_bootup_announce);
1304