1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * RCU expedited grace periods
4 *
5 * Copyright IBM Corporation, 2016
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 #include <linux/lockdep.h>
11
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
15
16 /*
17 * Record the start of an expedited grace period.
18 */
rcu_exp_gp_seq_start(void)19 static void rcu_exp_gp_seq_start(void)
20 {
21 rcu_seq_start(&rcu_state.expedited_sequence);
22 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
23 }
24
25 /*
26 * Return the value that the expedited-grace-period counter will have
27 * at the end of the current grace period.
28 */
rcu_exp_gp_seq_endval(void)29 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
30 {
31 return rcu_seq_endval(&rcu_state.expedited_sequence);
32 }
33
34 /*
35 * Record the end of an expedited grace period.
36 */
rcu_exp_gp_seq_end(void)37 static void rcu_exp_gp_seq_end(void)
38 {
39 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
40 rcu_seq_end(&rcu_state.expedited_sequence);
41 smp_mb(); /* Ensure that consecutive grace periods serialize. */
42 }
43
44 /*
45 * Take a snapshot of the expedited-grace-period counter, which is the
46 * earliest value that will indicate that a full grace period has
47 * elapsed since the current time.
48 */
rcu_exp_gp_seq_snap(void)49 static unsigned long rcu_exp_gp_seq_snap(void)
50 {
51 unsigned long s;
52
53 smp_mb(); /* Caller's modifications seen first by other CPUs. */
54 s = rcu_seq_snap(&rcu_state.expedited_sequence);
55 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
56 return s;
57 }
58
59 /*
60 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
61 * if a full expedited grace period has elapsed since that snapshot
62 * was taken.
63 */
rcu_exp_gp_seq_done(unsigned long s)64 static bool rcu_exp_gp_seq_done(unsigned long s)
65 {
66 return rcu_seq_done(&rcu_state.expedited_sequence, s);
67 }
68
69 /*
70 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
71 * recent CPU-online activity. Note that these masks are not cleared
72 * when CPUs go offline, so they reflect the union of all CPUs that have
73 * ever been online. This means that this function normally takes its
74 * no-work-to-do fastpath.
75 */
sync_exp_reset_tree_hotplug(void)76 static void sync_exp_reset_tree_hotplug(void)
77 {
78 bool done;
79 unsigned long flags;
80 unsigned long mask;
81 unsigned long oldmask;
82 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
83 struct rcu_node *rnp;
84 struct rcu_node *rnp_up;
85
86 /* If no new CPUs onlined since last time, nothing to do. */
87 if (likely(ncpus == rcu_state.ncpus_snap))
88 return;
89 rcu_state.ncpus_snap = ncpus;
90
91 /*
92 * Each pass through the following loop propagates newly onlined
93 * CPUs for the current rcu_node structure up the rcu_node tree.
94 */
95 rcu_for_each_leaf_node(rnp) {
96 raw_spin_lock_irqsave_rcu_node(rnp, flags);
97 if (rnp->expmaskinit == rnp->expmaskinitnext) {
98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
99 continue; /* No new CPUs, nothing to do. */
100 }
101
102 /* Update this node's mask, track old value for propagation. */
103 oldmask = rnp->expmaskinit;
104 rnp->expmaskinit = rnp->expmaskinitnext;
105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
106
107 /* If was already nonzero, nothing to propagate. */
108 if (oldmask)
109 continue;
110
111 /* Propagate the new CPU up the tree. */
112 mask = rnp->grpmask;
113 rnp_up = rnp->parent;
114 done = false;
115 while (rnp_up) {
116 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
117 if (rnp_up->expmaskinit)
118 done = true;
119 rnp_up->expmaskinit |= mask;
120 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
121 if (done)
122 break;
123 mask = rnp_up->grpmask;
124 rnp_up = rnp_up->parent;
125 }
126 }
127 }
128
129 /*
130 * Reset the ->expmask values in the rcu_node tree in preparation for
131 * a new expedited grace period.
132 */
sync_exp_reset_tree(void)133 static void __maybe_unused sync_exp_reset_tree(void)
134 {
135 unsigned long flags;
136 struct rcu_node *rnp;
137
138 sync_exp_reset_tree_hotplug();
139 rcu_for_each_node_breadth_first(rnp) {
140 raw_spin_lock_irqsave_rcu_node(rnp, flags);
141 WARN_ON_ONCE(rnp->expmask);
142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
144 }
145 }
146
147 /*
148 * Return non-zero if there is no RCU expedited grace period in progress
149 * for the specified rcu_node structure, in other words, if all CPUs and
150 * tasks covered by the specified rcu_node structure have done their bit
151 * for the current expedited grace period.
152 */
sync_rcu_exp_done(struct rcu_node * rnp)153 static bool sync_rcu_exp_done(struct rcu_node *rnp)
154 {
155 raw_lockdep_assert_held_rcu_node(rnp);
156 return READ_ONCE(rnp->exp_tasks) == NULL &&
157 READ_ONCE(rnp->expmask) == 0;
158 }
159
160 /*
161 * Like sync_rcu_exp_done(), but where the caller does not hold the
162 * rcu_node's ->lock.
163 */
sync_rcu_exp_done_unlocked(struct rcu_node * rnp)164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
165 {
166 unsigned long flags;
167 bool ret;
168
169 raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 ret = sync_rcu_exp_done(rnp);
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
172
173 return ret;
174 }
175
176 /*
177 * Report the exit from RCU read-side critical section for the last task
178 * that queued itself during or before the current expedited preemptible-RCU
179 * grace period. This event is reported either to the rcu_node structure on
180 * which the task was queued or to one of that rcu_node structure's ancestors,
181 * recursively up the tree. (Calm down, calm down, we do the recursion
182 * iteratively!)
183 */
__rcu_report_exp_rnp(struct rcu_node * rnp,bool wake,unsigned long flags)184 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
185 bool wake, unsigned long flags)
186 __releases(rnp->lock)
187 {
188 unsigned long mask;
189
190 raw_lockdep_assert_held_rcu_node(rnp);
191 for (;;) {
192 if (!sync_rcu_exp_done(rnp)) {
193 if (!rnp->expmask)
194 rcu_initiate_boost(rnp, flags);
195 else
196 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
197 break;
198 }
199 if (rnp->parent == NULL) {
200 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
201 if (wake) {
202 smp_mb(); /* EGP done before wake_up(). */
203 swake_up_one_online(&rcu_state.expedited_wq);
204 }
205 break;
206 }
207 mask = rnp->grpmask;
208 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
209 rnp = rnp->parent;
210 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
211 WARN_ON_ONCE(!(rnp->expmask & mask));
212 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
213 }
214 }
215
216 /*
217 * Report expedited quiescent state for specified node. This is a
218 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
219 */
rcu_report_exp_rnp(struct rcu_node * rnp,bool wake)220 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
221 {
222 unsigned long flags;
223
224 raw_spin_lock_irqsave_rcu_node(rnp, flags);
225 __rcu_report_exp_rnp(rnp, wake, flags);
226 }
227
228 /*
229 * Report expedited quiescent state for multiple CPUs, all covered by the
230 * specified leaf rcu_node structure.
231 */
rcu_report_exp_cpu_mult(struct rcu_node * rnp,unsigned long mask,bool wake)232 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
233 unsigned long mask, bool wake)
234 {
235 int cpu;
236 unsigned long flags;
237 struct rcu_data *rdp;
238
239 raw_spin_lock_irqsave_rcu_node(rnp, flags);
240 if (!(rnp->expmask & mask)) {
241 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242 return;
243 }
244 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
245 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
246 rdp = per_cpu_ptr(&rcu_data, cpu);
247 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
248 continue;
249 rdp->rcu_forced_tick_exp = false;
250 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
251 }
252 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
253 }
254
255 /*
256 * Report expedited quiescent state for specified rcu_data (CPU).
257 */
rcu_report_exp_rdp(struct rcu_data * rdp)258 static void rcu_report_exp_rdp(struct rcu_data *rdp)
259 {
260 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
261 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
262 }
263
264 /* Common code for work-done checking. */
sync_exp_work_done(unsigned long s)265 static bool sync_exp_work_done(unsigned long s)
266 {
267 if (rcu_exp_gp_seq_done(s)) {
268 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
269 smp_mb(); /* Ensure test happens before caller kfree(). */
270 return true;
271 }
272 return false;
273 }
274
275 /*
276 * Funnel-lock acquisition for expedited grace periods. Returns true
277 * if some other task completed an expedited grace period that this task
278 * can piggy-back on, and with no mutex held. Otherwise, returns false
279 * with the mutex held, indicating that the caller must actually do the
280 * expedited grace period.
281 */
exp_funnel_lock(unsigned long s)282 static bool exp_funnel_lock(unsigned long s)
283 {
284 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
285 struct rcu_node *rnp = rdp->mynode;
286 struct rcu_node *rnp_root = rcu_get_root();
287
288 /* Low-contention fastpath. */
289 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
290 (rnp == rnp_root ||
291 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
292 mutex_trylock(&rcu_state.exp_mutex))
293 goto fastpath;
294
295 /*
296 * Each pass through the following loop works its way up
297 * the rcu_node tree, returning if others have done the work or
298 * otherwise falls through to acquire ->exp_mutex. The mapping
299 * from CPU to rcu_node structure can be inexact, as it is just
300 * promoting locality and is not strictly needed for correctness.
301 */
302 for (; rnp != NULL; rnp = rnp->parent) {
303 if (sync_exp_work_done(s))
304 return true;
305
306 /* Work not done, either wait here or go up. */
307 spin_lock(&rnp->exp_lock);
308 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
309
310 /* Someone else doing GP, so wait for them. */
311 spin_unlock(&rnp->exp_lock);
312 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
313 rnp->grplo, rnp->grphi,
314 TPS("wait"));
315 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
316 sync_exp_work_done(s));
317 return true;
318 }
319 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
320 spin_unlock(&rnp->exp_lock);
321 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
322 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
323 }
324 mutex_lock(&rcu_state.exp_mutex);
325 fastpath:
326 if (sync_exp_work_done(s)) {
327 mutex_unlock(&rcu_state.exp_mutex);
328 return true;
329 }
330 rcu_exp_gp_seq_start();
331 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
332 return false;
333 }
334
335 /*
336 * Select the CPUs within the specified rcu_node that the upcoming
337 * expedited grace period needs to wait for.
338 */
__sync_rcu_exp_select_node_cpus(struct rcu_exp_work * rewp)339 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
340 {
341 int cpu;
342 unsigned long flags;
343 unsigned long mask_ofl_test;
344 unsigned long mask_ofl_ipi;
345 int ret;
346 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347
348 raw_spin_lock_irqsave_rcu_node(rnp, flags);
349
350 /* Each pass checks a CPU for identity, offline, and idle. */
351 mask_ofl_test = 0;
352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354 unsigned long mask = rdp->grpmask;
355 int snap;
356
357 if (raw_smp_processor_id() == cpu ||
358 !(rnp->qsmaskinitnext & mask)) {
359 mask_ofl_test |= mask;
360 } else {
361 snap = rcu_dynticks_snap(cpu);
362 if (rcu_dynticks_in_eqs(snap))
363 mask_ofl_test |= mask;
364 else
365 rdp->exp_dynticks_snap = snap;
366 }
367 }
368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369
370 /*
371 * Need to wait for any blocked tasks as well. Note that
372 * additional blocking tasks will also block the expedited GP
373 * until such time as the ->expmask bits are cleared.
374 */
375 if (rcu_preempt_has_tasks(rnp))
376 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379 /* IPI the remaining CPUs for expedited quiescent state. */
380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382 unsigned long mask = rdp->grpmask;
383
384 retry_ipi:
385 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386 mask_ofl_test |= mask;
387 continue;
388 }
389 if (get_cpu() == cpu) {
390 mask_ofl_test |= mask;
391 put_cpu();
392 continue;
393 }
394 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
395 put_cpu();
396 /* The CPU will report the QS in response to the IPI. */
397 if (!ret)
398 continue;
399
400 /* Failed, raced with CPU hotplug operation. */
401 raw_spin_lock_irqsave_rcu_node(rnp, flags);
402 if ((rnp->qsmaskinitnext & mask) &&
403 (rnp->expmask & mask)) {
404 /* Online, so delay for a bit and try again. */
405 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
406 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
407 schedule_timeout_idle(1);
408 goto retry_ipi;
409 }
410 /* CPU really is offline, so we must report its QS. */
411 if (rnp->expmask & mask)
412 mask_ofl_test |= mask;
413 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
414 }
415 /* Report quiescent states for those that went offline. */
416 if (mask_ofl_test)
417 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
418 }
419
420 static void rcu_exp_sel_wait_wake(unsigned long s);
421
422 #ifdef CONFIG_RCU_EXP_KTHREAD
sync_rcu_exp_select_node_cpus(struct kthread_work * wp)423 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
424 {
425 struct rcu_exp_work *rewp =
426 container_of(wp, struct rcu_exp_work, rew_work);
427
428 __sync_rcu_exp_select_node_cpus(rewp);
429 }
430
rcu_exp_worker_started(void)431 static inline bool rcu_exp_worker_started(void)
432 {
433 return !!READ_ONCE(rcu_exp_gp_kworker);
434 }
435
rcu_exp_par_worker_started(void)436 static inline bool rcu_exp_par_worker_started(void)
437 {
438 return !!READ_ONCE(rcu_exp_par_gp_kworker);
439 }
440
sync_rcu_exp_select_cpus_queue_work(struct rcu_node * rnp)441 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
442 {
443 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
444 /*
445 * Use rcu_exp_par_gp_kworker, because flushing a work item from
446 * another work item on the same kthread worker can result in
447 * deadlock.
448 */
449 kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
450 }
451
sync_rcu_exp_select_cpus_flush_work(struct rcu_node * rnp)452 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
453 {
454 kthread_flush_work(&rnp->rew.rew_work);
455 }
456
457 /*
458 * Work-queue handler to drive an expedited grace period forward.
459 */
wait_rcu_exp_gp(struct kthread_work * wp)460 static void wait_rcu_exp_gp(struct kthread_work *wp)
461 {
462 struct rcu_exp_work *rewp;
463
464 rewp = container_of(wp, struct rcu_exp_work, rew_work);
465 rcu_exp_sel_wait_wake(rewp->rew_s);
466 }
467
synchronize_rcu_expedited_queue_work(struct rcu_exp_work * rew)468 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
469 {
470 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
471 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
472 }
473
synchronize_rcu_expedited_destroy_work(struct rcu_exp_work * rew)474 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
475 {
476 }
477 #else /* !CONFIG_RCU_EXP_KTHREAD */
sync_rcu_exp_select_node_cpus(struct work_struct * wp)478 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
479 {
480 struct rcu_exp_work *rewp =
481 container_of(wp, struct rcu_exp_work, rew_work);
482
483 __sync_rcu_exp_select_node_cpus(rewp);
484 }
485
rcu_exp_worker_started(void)486 static inline bool rcu_exp_worker_started(void)
487 {
488 return !!READ_ONCE(rcu_gp_wq);
489 }
490
rcu_exp_par_worker_started(void)491 static inline bool rcu_exp_par_worker_started(void)
492 {
493 return !!READ_ONCE(rcu_par_gp_wq);
494 }
495
sync_rcu_exp_select_cpus_queue_work(struct rcu_node * rnp)496 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
497 {
498 int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
499
500 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
501 /* If all offline, queue the work on an unbound CPU. */
502 if (unlikely(cpu > rnp->grphi - rnp->grplo))
503 cpu = WORK_CPU_UNBOUND;
504 else
505 cpu += rnp->grplo;
506 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
507 }
508
sync_rcu_exp_select_cpus_flush_work(struct rcu_node * rnp)509 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
510 {
511 flush_work(&rnp->rew.rew_work);
512 }
513
514 /*
515 * Work-queue handler to drive an expedited grace period forward.
516 */
wait_rcu_exp_gp(struct work_struct * wp)517 static void wait_rcu_exp_gp(struct work_struct *wp)
518 {
519 struct rcu_exp_work *rewp;
520
521 rewp = container_of(wp, struct rcu_exp_work, rew_work);
522 rcu_exp_sel_wait_wake(rewp->rew_s);
523 }
524
synchronize_rcu_expedited_queue_work(struct rcu_exp_work * rew)525 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
526 {
527 INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
528 queue_work(rcu_gp_wq, &rew->rew_work);
529 }
530
synchronize_rcu_expedited_destroy_work(struct rcu_exp_work * rew)531 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
532 {
533 destroy_work_on_stack(&rew->rew_work);
534 }
535 #endif /* CONFIG_RCU_EXP_KTHREAD */
536
537 /*
538 * Select the nodes that the upcoming expedited grace period needs
539 * to wait for.
540 */
sync_rcu_exp_select_cpus(void)541 static void sync_rcu_exp_select_cpus(void)
542 {
543 struct rcu_node *rnp;
544
545 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
546 sync_exp_reset_tree();
547 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
548
549 /* Schedule work for each leaf rcu_node structure. */
550 rcu_for_each_leaf_node(rnp) {
551 rnp->exp_need_flush = false;
552 if (!READ_ONCE(rnp->expmask))
553 continue; /* Avoid early boot non-existent wq. */
554 if (!rcu_exp_par_worker_started() ||
555 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
556 rcu_is_last_leaf_node(rnp)) {
557 /* No worker started yet or last leaf, do direct call. */
558 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
559 continue;
560 }
561 sync_rcu_exp_select_cpus_queue_work(rnp);
562 rnp->exp_need_flush = true;
563 }
564
565 /* Wait for jobs (if any) to complete. */
566 rcu_for_each_leaf_node(rnp)
567 if (rnp->exp_need_flush)
568 sync_rcu_exp_select_cpus_flush_work(rnp);
569 }
570
571 /*
572 * Wait for the expedited grace period to elapse, within time limit.
573 * If the time limit is exceeded without the grace period elapsing,
574 * return false, otherwise return true.
575 */
synchronize_rcu_expedited_wait_once(long tlimit)576 static bool synchronize_rcu_expedited_wait_once(long tlimit)
577 {
578 int t;
579 struct rcu_node *rnp_root = rcu_get_root();
580
581 t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
582 sync_rcu_exp_done_unlocked(rnp_root),
583 tlimit);
584 // Workqueues should not be signaled.
585 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
586 return true;
587 WARN_ON(t < 0); /* workqueues should not be signaled. */
588 return false;
589 }
590
591 /*
592 * Wait for the expedited grace period to elapse, issuing any needed
593 * RCU CPU stall warnings along the way.
594 */
synchronize_rcu_expedited_wait(void)595 static void synchronize_rcu_expedited_wait(void)
596 {
597 int cpu;
598 unsigned long j;
599 unsigned long jiffies_stall;
600 unsigned long jiffies_start;
601 unsigned long mask;
602 int ndetected;
603 struct rcu_data *rdp;
604 struct rcu_node *rnp;
605 struct rcu_node *rnp_root = rcu_get_root();
606 unsigned long flags;
607
608 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
609 jiffies_stall = rcu_exp_jiffies_till_stall_check();
610 jiffies_start = jiffies;
611 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
612 if (synchronize_rcu_expedited_wait_once(1))
613 return;
614 rcu_for_each_leaf_node(rnp) {
615 raw_spin_lock_irqsave_rcu_node(rnp, flags);
616 mask = READ_ONCE(rnp->expmask);
617 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
618 rdp = per_cpu_ptr(&rcu_data, cpu);
619 if (rdp->rcu_forced_tick_exp)
620 continue;
621 rdp->rcu_forced_tick_exp = true;
622 if (cpu_online(cpu))
623 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
624 }
625 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
626 }
627 j = READ_ONCE(jiffies_till_first_fqs);
628 if (synchronize_rcu_expedited_wait_once(j + HZ))
629 return;
630 }
631
632 for (;;) {
633 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
634 return;
635 if (rcu_stall_is_suppressed())
636 continue;
637 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
638 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
639 rcu_state.name);
640 ndetected = 0;
641 rcu_for_each_leaf_node(rnp) {
642 ndetected += rcu_print_task_exp_stall(rnp);
643 for_each_leaf_node_possible_cpu(rnp, cpu) {
644 struct rcu_data *rdp;
645
646 mask = leaf_node_cpu_bit(rnp, cpu);
647 if (!(READ_ONCE(rnp->expmask) & mask))
648 continue;
649 ndetected++;
650 rdp = per_cpu_ptr(&rcu_data, cpu);
651 pr_cont(" %d-%c%c%c%c", cpu,
652 "O."[!!cpu_online(cpu)],
653 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
654 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
655 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
656 }
657 }
658 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
659 jiffies - jiffies_start, rcu_state.expedited_sequence,
660 data_race(rnp_root->expmask),
661 ".T"[!!data_race(rnp_root->exp_tasks)]);
662 if (ndetected) {
663 pr_err("blocking rcu_node structures (internal RCU debug):");
664 rcu_for_each_node_breadth_first(rnp) {
665 if (rnp == rnp_root)
666 continue; /* printed unconditionally */
667 if (sync_rcu_exp_done_unlocked(rnp))
668 continue;
669 pr_cont(" l=%u:%d-%d:%#lx/%c",
670 rnp->level, rnp->grplo, rnp->grphi,
671 data_race(rnp->expmask),
672 ".T"[!!data_race(rnp->exp_tasks)]);
673 }
674 pr_cont("\n");
675 }
676 rcu_for_each_leaf_node(rnp) {
677 for_each_leaf_node_possible_cpu(rnp, cpu) {
678 mask = leaf_node_cpu_bit(rnp, cpu);
679 if (!(READ_ONCE(rnp->expmask) & mask))
680 continue;
681 preempt_disable(); // For smp_processor_id() in dump_cpu_task().
682 dump_cpu_task(cpu);
683 preempt_enable();
684 }
685 rcu_exp_print_detail_task_stall_rnp(rnp);
686 }
687 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
688 panic_on_rcu_stall();
689 }
690 }
691
692 /*
693 * Wait for the current expedited grace period to complete, and then
694 * wake up everyone who piggybacked on the just-completed expedited
695 * grace period. Also update all the ->exp_seq_rq counters as needed
696 * in order to avoid counter-wrap problems.
697 */
rcu_exp_wait_wake(unsigned long s)698 static void rcu_exp_wait_wake(unsigned long s)
699 {
700 struct rcu_node *rnp;
701
702 synchronize_rcu_expedited_wait();
703
704 // Switch over to wakeup mode, allowing the next GP to proceed.
705 // End the previous grace period only after acquiring the mutex
706 // to ensure that only one GP runs concurrently with wakeups.
707 mutex_lock(&rcu_state.exp_wake_mutex);
708 rcu_exp_gp_seq_end();
709 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
710
711 rcu_for_each_node_breadth_first(rnp) {
712 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
713 spin_lock(&rnp->exp_lock);
714 /* Recheck, avoid hang in case someone just arrived. */
715 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
716 WRITE_ONCE(rnp->exp_seq_rq, s);
717 spin_unlock(&rnp->exp_lock);
718 }
719 smp_mb(); /* All above changes before wakeup. */
720 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
721 }
722 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
723 mutex_unlock(&rcu_state.exp_wake_mutex);
724 }
725
726 /*
727 * Common code to drive an expedited grace period forward, used by
728 * workqueues and mid-boot-time tasks.
729 */
rcu_exp_sel_wait_wake(unsigned long s)730 static void rcu_exp_sel_wait_wake(unsigned long s)
731 {
732 /* Initialize the rcu_node tree in preparation for the wait. */
733 sync_rcu_exp_select_cpus();
734
735 /* Wait and clean up, including waking everyone. */
736 rcu_exp_wait_wake(s);
737 }
738
739 #ifdef CONFIG_PREEMPT_RCU
740
741 /*
742 * Remote handler for smp_call_function_single(). If there is an
743 * RCU read-side critical section in effect, request that the
744 * next rcu_read_unlock() record the quiescent state up the
745 * ->expmask fields in the rcu_node tree. Otherwise, immediately
746 * report the quiescent state.
747 */
rcu_exp_handler(void * unused)748 static void rcu_exp_handler(void *unused)
749 {
750 int depth = rcu_preempt_depth();
751 unsigned long flags;
752 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
753 struct rcu_node *rnp = rdp->mynode;
754 struct task_struct *t = current;
755
756 /*
757 * First, the common case of not being in an RCU read-side
758 * critical section. If also enabled or idle, immediately
759 * report the quiescent state, otherwise defer.
760 */
761 if (!depth) {
762 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
763 rcu_is_cpu_rrupt_from_idle()) {
764 rcu_report_exp_rdp(rdp);
765 } else {
766 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
767 set_tsk_need_resched(t);
768 set_preempt_need_resched();
769 }
770 return;
771 }
772
773 /*
774 * Second, the less-common case of being in an RCU read-side
775 * critical section. In this case we can count on a future
776 * rcu_read_unlock(). However, this rcu_read_unlock() might
777 * execute on some other CPU, but in that case there will be
778 * a future context switch. Either way, if the expedited
779 * grace period is still waiting on this CPU, set ->deferred_qs
780 * so that the eventual quiescent state will be reported.
781 * Note that there is a large group of race conditions that
782 * can have caused this quiescent state to already have been
783 * reported, so we really do need to check ->expmask.
784 */
785 if (depth > 0) {
786 raw_spin_lock_irqsave_rcu_node(rnp, flags);
787 if (rnp->expmask & rdp->grpmask) {
788 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
789 t->rcu_read_unlock_special.b.exp_hint = true;
790 }
791 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
792 return;
793 }
794
795 // Finally, negative nesting depth should not happen.
796 WARN_ON_ONCE(1);
797 }
798
799 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
sync_sched_exp_online_cleanup(int cpu)800 static void sync_sched_exp_online_cleanup(int cpu)
801 {
802 }
803
804 /*
805 * Scan the current list of tasks blocked within RCU read-side critical
806 * sections, printing out the tid of each that is blocking the current
807 * expedited grace period.
808 */
rcu_print_task_exp_stall(struct rcu_node * rnp)809 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
810 {
811 unsigned long flags;
812 int ndetected = 0;
813 struct task_struct *t;
814
815 raw_spin_lock_irqsave_rcu_node(rnp, flags);
816 if (!rnp->exp_tasks) {
817 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
818 return 0;
819 }
820 t = list_entry(rnp->exp_tasks->prev,
821 struct task_struct, rcu_node_entry);
822 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
823 pr_cont(" P%d", t->pid);
824 ndetected++;
825 }
826 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
827 return ndetected;
828 }
829
830 /*
831 * Scan the current list of tasks blocked within RCU read-side critical
832 * sections, dumping the stack of each that is blocking the current
833 * expedited grace period.
834 */
rcu_exp_print_detail_task_stall_rnp(struct rcu_node * rnp)835 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
836 {
837 unsigned long flags;
838 struct task_struct *t;
839
840 if (!rcu_exp_stall_task_details)
841 return;
842 raw_spin_lock_irqsave_rcu_node(rnp, flags);
843 if (!READ_ONCE(rnp->exp_tasks)) {
844 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
845 return;
846 }
847 t = list_entry(rnp->exp_tasks->prev,
848 struct task_struct, rcu_node_entry);
849 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
850 /*
851 * We could be printing a lot while holding a spinlock.
852 * Avoid triggering hard lockup.
853 */
854 touch_nmi_watchdog();
855 sched_show_task(t);
856 }
857 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
858 }
859
860 #else /* #ifdef CONFIG_PREEMPT_RCU */
861
862 /* Request an expedited quiescent state. */
rcu_exp_need_qs(void)863 static void rcu_exp_need_qs(void)
864 {
865 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
866 /* Store .exp before .rcu_urgent_qs. */
867 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
868 set_tsk_need_resched(current);
869 set_preempt_need_resched();
870 }
871
872 /* Invoked on each online non-idle CPU for expedited quiescent state. */
rcu_exp_handler(void * unused)873 static void rcu_exp_handler(void *unused)
874 {
875 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
876 struct rcu_node *rnp = rdp->mynode;
877 bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
878
879 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
880 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
881 return;
882 if (rcu_is_cpu_rrupt_from_idle() ||
883 (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
884 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
885 return;
886 }
887 rcu_exp_need_qs();
888 }
889
890 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
sync_sched_exp_online_cleanup(int cpu)891 static void sync_sched_exp_online_cleanup(int cpu)
892 {
893 unsigned long flags;
894 int my_cpu;
895 struct rcu_data *rdp;
896 int ret;
897 struct rcu_node *rnp;
898
899 rdp = per_cpu_ptr(&rcu_data, cpu);
900 rnp = rdp->mynode;
901 my_cpu = get_cpu();
902 /* Quiescent state either not needed or already requested, leave. */
903 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
904 READ_ONCE(rdp->cpu_no_qs.b.exp)) {
905 put_cpu();
906 return;
907 }
908 /* Quiescent state needed on current CPU, so set it up locally. */
909 if (my_cpu == cpu) {
910 local_irq_save(flags);
911 rcu_exp_need_qs();
912 local_irq_restore(flags);
913 put_cpu();
914 return;
915 }
916 /* Quiescent state needed on some other CPU, send IPI. */
917 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
918 put_cpu();
919 WARN_ON_ONCE(ret);
920 }
921
922 /*
923 * Because preemptible RCU does not exist, we never have to check for
924 * tasks blocked within RCU read-side critical sections that are
925 * blocking the current expedited grace period.
926 */
rcu_print_task_exp_stall(struct rcu_node * rnp)927 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
928 {
929 return 0;
930 }
931
932 /*
933 * Because preemptible RCU does not exist, we never have to print out
934 * tasks blocked within RCU read-side critical sections that are blocking
935 * the current expedited grace period.
936 */
rcu_exp_print_detail_task_stall_rnp(struct rcu_node * rnp)937 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
938 {
939 }
940
941 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
942
943 /**
944 * synchronize_rcu_expedited - Brute-force RCU grace period
945 *
946 * Wait for an RCU grace period, but expedite it. The basic idea is to
947 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
948 * the CPU is in an RCU critical section, and if so, it sets a flag that
949 * causes the outermost rcu_read_unlock() to report the quiescent state
950 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
951 * other hand, if the CPU is not in an RCU read-side critical section,
952 * the IPI handler reports the quiescent state immediately.
953 *
954 * Although this is a great improvement over previous expedited
955 * implementations, it is still unfriendly to real-time workloads, so is
956 * thus not recommended for any sort of common-case code. In fact, if
957 * you are using synchronize_rcu_expedited() in a loop, please restructure
958 * your code to batch your updates, and then use a single synchronize_rcu()
959 * instead.
960 *
961 * This has the same semantics as (but is more brutal than) synchronize_rcu().
962 */
synchronize_rcu_expedited(void)963 void synchronize_rcu_expedited(void)
964 {
965 bool use_worker;
966 unsigned long flags;
967 struct rcu_exp_work rew;
968 struct rcu_node *rnp;
969 unsigned long s;
970
971 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
972 lock_is_held(&rcu_lock_map) ||
973 lock_is_held(&rcu_sched_lock_map),
974 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
975
976 use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
977 rcu_exp_worker_started();
978
979 /* Is the state is such that the call is a grace period? */
980 if (rcu_blocking_is_gp()) {
981 // Note well that this code runs with !PREEMPT && !SMP.
982 // In addition, all code that advances grace periods runs
983 // at process level. Therefore, this expedited GP overlaps
984 // with other expedited GPs only by being fully nested within
985 // them, which allows reuse of ->gp_seq_polled_exp_snap.
986 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
987 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
988
989 local_irq_save(flags);
990 WARN_ON_ONCE(num_online_cpus() > 1);
991 rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
992 local_irq_restore(flags);
993 return; // Context allows vacuous grace periods.
994 }
995
996 /* If expedited grace periods are prohibited, fall back to normal. */
997 if (rcu_gp_is_normal()) {
998 wait_rcu_gp(call_rcu_hurry);
999 return;
1000 }
1001
1002 /* Take a snapshot of the sequence number. */
1003 s = rcu_exp_gp_seq_snap();
1004 if (exp_funnel_lock(s))
1005 return; /* Someone else did our work for us. */
1006
1007 /* Ensure that load happens before action based on it. */
1008 if (unlikely(!use_worker)) {
1009 /* Direct call during scheduler init and early_initcalls(). */
1010 rcu_exp_sel_wait_wake(s);
1011 } else {
1012 /* Marshall arguments & schedule the expedited grace period. */
1013 rew.rew_s = s;
1014 synchronize_rcu_expedited_queue_work(&rew);
1015 }
1016
1017 /* Wait for expedited grace period to complete. */
1018 rnp = rcu_get_root();
1019 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
1020 sync_exp_work_done(s));
1021 smp_mb(); /* Work actions happen before return. */
1022
1023 /* Let the next expedited grace period start. */
1024 mutex_unlock(&rcu_state.exp_mutex);
1025
1026 if (likely(use_worker))
1027 synchronize_rcu_expedited_destroy_work(&rew);
1028 }
1029 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1030
1031 /*
1032 * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
1033 * RCU grace periods that it needs.
1034 */
sync_rcu_do_polled_gp(struct work_struct * wp)1035 static void sync_rcu_do_polled_gp(struct work_struct *wp)
1036 {
1037 unsigned long flags;
1038 int i = 0;
1039 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
1040 unsigned long s;
1041
1042 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1043 s = rnp->exp_seq_poll_rq;
1044 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1045 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1046 if (s == RCU_GET_STATE_COMPLETED)
1047 return;
1048 while (!poll_state_synchronize_rcu(s)) {
1049 synchronize_rcu_expedited();
1050 if (i == 10 || i == 20)
1051 pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
1052 i++;
1053 }
1054 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1055 s = rnp->exp_seq_poll_rq;
1056 if (poll_state_synchronize_rcu(s))
1057 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1058 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1059 }
1060
1061 /**
1062 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1063 *
1064 * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1065 * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1066 * allowing them to determine whether or not any sort of grace period has
1067 * elapsed in the meantime. If the needed expedited grace period is not
1068 * already slated to start, initiates that grace period.
1069 */
start_poll_synchronize_rcu_expedited(void)1070 unsigned long start_poll_synchronize_rcu_expedited(void)
1071 {
1072 unsigned long flags;
1073 struct rcu_data *rdp;
1074 struct rcu_node *rnp;
1075 unsigned long s;
1076
1077 s = get_state_synchronize_rcu();
1078 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1079 rnp = rdp->mynode;
1080 if (rcu_init_invoked())
1081 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1082 if (!poll_state_synchronize_rcu(s)) {
1083 if (rcu_init_invoked()) {
1084 rnp->exp_seq_poll_rq = s;
1085 queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1086 }
1087 }
1088 if (rcu_init_invoked())
1089 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1090
1091 return s;
1092 }
1093 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1094
1095 /**
1096 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1097 * @rgosp: Place to put snapshot of grace-period state
1098 *
1099 * Places the normal and expedited grace-period states in rgosp. This
1100 * state value can be passed to a later call to cond_synchronize_rcu_full()
1101 * or poll_state_synchronize_rcu_full() to determine whether or not a
1102 * grace period (whether normal or expedited) has elapsed in the meantime.
1103 * If the needed expedited grace period is not already slated to start,
1104 * initiates that grace period.
1105 */
start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)1106 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1107 {
1108 get_state_synchronize_rcu_full(rgosp);
1109 (void)start_poll_synchronize_rcu_expedited();
1110 }
1111 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
1112
1113 /**
1114 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1115 *
1116 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1117 *
1118 * If any type of full RCU grace period has elapsed since the earlier
1119 * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1120 * or start_poll_synchronize_rcu_expedited(), just return. Otherwise,
1121 * invoke synchronize_rcu_expedited() to wait for a full grace period.
1122 *
1123 * Yes, this function does not take counter wrap into account.
1124 * But counter wrap is harmless. If the counter wraps, we have waited for
1125 * more than 2 billion grace periods (and way more on a 64-bit system!),
1126 * so waiting for a couple of additional grace periods should be just fine.
1127 *
1128 * This function provides the same memory-ordering guarantees that
1129 * would be provided by a synchronize_rcu() that was invoked at the call
1130 * to the function that provided @oldstate and that returned at the end
1131 * of this function.
1132 */
cond_synchronize_rcu_expedited(unsigned long oldstate)1133 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1134 {
1135 if (!poll_state_synchronize_rcu(oldstate))
1136 synchronize_rcu_expedited();
1137 }
1138 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
1139
1140 /**
1141 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1142 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
1143 *
1144 * If a full RCU grace period has elapsed since the call to
1145 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
1146 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
1147 * obtained, just return. Otherwise, invoke synchronize_rcu_expedited()
1148 * to wait for a full grace period.
1149 *
1150 * Yes, this function does not take counter wrap into account.
1151 * But counter wrap is harmless. If the counter wraps, we have waited for
1152 * more than 2 billion grace periods (and way more on a 64-bit system!),
1153 * so waiting for a couple of additional grace periods should be just fine.
1154 *
1155 * This function provides the same memory-ordering guarantees that
1156 * would be provided by a synchronize_rcu() that was invoked at the call
1157 * to the function that provided @rgosp and that returned at the end of
1158 * this function.
1159 */
cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)1160 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1161 {
1162 if (!poll_state_synchronize_rcu_full(rgosp))
1163 synchronize_rcu_expedited();
1164 }
1165 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);
1166