Lines Matching +full:throttle +full:- +full:period +full:- +full:us

1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
21 return lockdep_is_held(&rdp->nocb_lock); in rcu_lockdep_is_held_nocb()
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread) in rcu_current_is_nocb_kthread()
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread) in rcu_current_is_nocb_kthread()
37 * Offload callback processing from the boot-time-specified set of CPUs
40 * a grace period to elapse, and invoke the callbacks. These kthreads
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
53 * Offloading of callbacks can also be used as an energy-efficiency
55 * about entering dyntick-idle mode.
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
85 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
87 * on ->nocb_lock, which only can happen at high call_rcu() rates.
93 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
97 __acquires(&rdp->nocb_bypass_lock) in rcu_nocb_bypass_lock()
100 if (raw_spin_trylock(&rdp->nocb_bypass_lock)) in rcu_nocb_bypass_lock()
106 WARN_ON_ONCE(smp_processor_id() != rdp->cpu); in rcu_nocb_bypass_lock()
107 raw_spin_lock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_lock()
112 * ->nocb_bypass_lock.
117 return raw_spin_trylock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_trylock()
121 * Release the specified rcu_data structure's ->nocb_bypass_lock.
124 __releases(&rdp->nocb_bypass_lock) in rcu_nocb_bypass_unlock()
127 raw_spin_unlock(&rdp->nocb_bypass_lock); in rcu_nocb_bypass_unlock()
131 * Acquire the specified rcu_data structure's ->nocb_lock, but only
132 * if it corresponds to a no-CBs CPU.
139 raw_spin_lock(&rdp->nocb_lock); in rcu_nocb_lock()
143 * Release the specified rcu_data structure's ->nocb_lock, but only
144 * if it corresponds to a no-CBs CPU.
150 raw_spin_unlock(&rdp->nocb_lock); in rcu_nocb_unlock()
155 * Release the specified rcu_data structure's ->nocb_lock and restore
156 * interrupts, but only if it corresponds to a no-CBs CPU.
163 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_unlock_irqrestore()
169 /* Lockdep check that ->cblist may be safely accessed. */
174 lockdep_assert_held(&rdp->nocb_lock); in rcu_lockdep_assert_cblist_protected()
178 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
179 * grace period.
188 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get()
193 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
194 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
200 __releases(rdp_gp->nocb_gp_lock) in __wake_nocb_gp()
204 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { in __wake_nocb_gp()
205 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp()
206 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __wake_nocb_gp()
211 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { in __wake_nocb_gp()
212 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); in __wake_nocb_gp()
213 del_timer(&rdp_gp->nocb_timer); in __wake_nocb_gp()
216 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { in __wake_nocb_gp()
217 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); in __wake_nocb_gp()
220 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in __wake_nocb_gp()
222 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); in __wake_nocb_gp()
224 swake_up_one_online(&rdp_gp->nocb_gp_wq); in __wake_nocb_gp()
226 wake_up_process(rdp_gp->nocb_gp_kthread); in __wake_nocb_gp()
238 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in wake_nocb_gp()
240 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp()
277 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in wake_nocb_gp_defer()
279 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp_defer()
286 rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) { in wake_nocb_gp_defer()
287 mod_timer(&rdp_gp->nocb_timer, jiffies + jiffies_till_flush); in wake_nocb_gp_defer()
288 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
290 mod_timer(&rdp_gp->nocb_timer, jiffies + 2); in wake_nocb_gp_defer()
291 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
293 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE) in wake_nocb_gp_defer()
294 mod_timer(&rdp_gp->nocb_timer, jiffies + 1); in wake_nocb_gp_defer()
295 if (rdp_gp->nocb_defer_wakeup < waketype) in wake_nocb_gp_defer()
296 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); in wake_nocb_gp_defer()
299 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in wake_nocb_gp_defer()
301 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); in wake_nocb_gp_defer()
305 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
306 * However, if there is a callback to be enqueued and if ->nocb_bypass
307 * proves to be initially empty, just return false because the no-CB GP
323 lockdep_assert_held(&rdp->nocb_bypass_lock); in rcu_nocb_do_flush_bypass()
324 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { in rcu_nocb_do_flush_bypass()
325 raw_spin_unlock(&rdp->nocb_bypass_lock); in rcu_nocb_do_flush_bypass()
328 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */ in rcu_nocb_do_flush_bypass()
330 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ in rcu_nocb_do_flush_bypass()
334 * ->cblist so that we can take advantage of the grace-period that will in rcu_nocb_do_flush_bypass()
339 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
342 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
343 WRITE_ONCE(rdp->lazy_len, 0); in rcu_nocb_do_flush_bypass()
345 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); in rcu_nocb_do_flush_bypass()
346 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_do_flush_bypass()
352 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
353 * However, if there is a callback to be enqueued and if ->nocb_bypass
354 * proves to be initially empty, just return false because the no-CB GP
370 * If the ->nocb_bypass_lock is immediately available, flush the
371 * ->nocb_bypass queue into ->cblist.
383 * See whether it is appropriate to use the ->nocb_bypass list in order
384 * to control contention on ->nocb_lock. A limited number of direct
385 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
386 * is non-empty, further callbacks must be placed into ->nocb_bypass,
388 * back to direct use of ->cblist. However, ->nocb_bypass should not be
389 * used if ->cblist is empty, because otherwise callbacks can be stranded
390 * on ->nocb_bypass because we cannot count on the current CPU ever again
391 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
392 * non-empty, the corresponding no-CBs grace-period kthread must not be
396 * as doing so would confuse the auto-initialization code. Besides
407 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in rcu_nocb_try_bypass()
408 bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len)); in rcu_nocb_try_bypass()
415 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
419 // In the process of (de-)offloading: no bypassing, but in rcu_nocb_try_bypass()
421 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { in rcu_nocb_try_bypass()
423 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
427 // Don't use ->nocb_bypass during early boot. in rcu_nocb_try_bypass()
430 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
431 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
436 // moving back from ->nocb_bypass to ->cblist. in rcu_nocb_try_bypass()
437 if (j == rdp->nocb_nobypass_last) { in rcu_nocb_try_bypass()
438 c = rdp->nocb_nobypass_count + 1; in rcu_nocb_try_bypass()
440 WRITE_ONCE(rdp->nocb_nobypass_last, j); in rcu_nocb_try_bypass()
441 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; in rcu_nocb_try_bypass()
442 if (ULONG_CMP_LT(rdp->nocb_nobypass_count, in rcu_nocb_try_bypass()
448 WRITE_ONCE(rdp->nocb_nobypass_count, c); in rcu_nocb_try_bypass()
450 // If there hasn't yet been all that many ->cblist enqueues in rcu_nocb_try_bypass()
451 // this jiffy, tell the caller to enqueue onto ->cblist. But flush in rcu_nocb_try_bypass()
452 // ->nocb_bypass first. in rcu_nocb_try_bypass()
453 // Lazy CBs throttle this back and do immediate bypass queuing. in rcu_nocb_try_bypass()
454 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) { in rcu_nocb_try_bypass()
456 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
458 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
462 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
466 // If ->nocb_bypass has been used too long or is too full, in rcu_nocb_try_bypass()
467 // flush ->nocb_bypass to ->cblist. in rcu_nocb_try_bypass()
468 if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) || in rcu_nocb_try_bypass()
470 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush))) || in rcu_nocb_try_bypass()
473 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_nocb_try_bypass()
477 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
479 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_try_bypass()
482 if (j != rdp->nocb_gp_adv_time && in rcu_nocb_try_bypass()
483 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in rcu_nocb_try_bypass()
484 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { in rcu_nocb_try_bypass()
485 rcu_advance_cbs_nowake(rdp->mynode, rdp); in rcu_nocb_try_bypass()
486 rdp->nocb_gp_adv_time = j; in rcu_nocb_try_bypass()
499 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in rcu_nocb_try_bypass()
500 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ in rcu_nocb_try_bypass()
501 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_try_bypass()
504 WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1); in rcu_nocb_try_bypass()
507 WRITE_ONCE(rdp->nocb_bypass_first, j); in rcu_nocb_try_bypass()
508 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); in rcu_nocb_try_bypass()
512 // A wake up of the grace period kthread or timer adjustment in rcu_nocb_try_bypass()
518 // b. The new CB is non-lazy. in rcu_nocb_try_bypass()
520 // No-CBs GP kthread might be indefinitely asleep, if so, wake. in rcu_nocb_try_bypass()
522 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { in rcu_nocb_try_bypass()
523 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
527 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in rcu_nocb_try_bypass()
536 * Awaken the no-CBs grace-period kthread if needed, either due to it
543 __releases(rdp->nocb_lock) in __call_rcu_nocb_wake()
553 t = READ_ONCE(rdp->nocb_gp_kthread); in __call_rcu_nocb_wake()
556 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __call_rcu_nocb_wake()
561 len = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_nocb_wake()
562 bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass); in __call_rcu_nocb_wake()
563 lazy_len = READ_ONCE(rdp->lazy_len); in __call_rcu_nocb_wake()
565 rdp->qlen_last_fqs_check = len; in __call_rcu_nocb_wake()
571 } else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) { in __call_rcu_nocb_wake()
575 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in __call_rcu_nocb_wake()
579 * Don't do the wake-up upfront on fragile paths. in __call_rcu_nocb_wake()
581 * (soft-)IRQs. Rely on the final deferred wake-up from in __call_rcu_nocb_wake()
588 } else if (len > rdp->qlen_last_fqs_check + qhimark) { in __call_rcu_nocb_wake()
590 rdp->qlen_last_fqs_check = len; in __call_rcu_nocb_wake()
592 if (j != rdp->nocb_gp_adv_time && in __call_rcu_nocb_wake()
593 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in __call_rcu_nocb_wake()
594 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { in __call_rcu_nocb_wake()
595 rcu_advance_cbs_nowake(rdp->mynode, rdp); in __call_rcu_nocb_wake()
596 rdp->nocb_gp_adv_time = j; in __call_rcu_nocb_wake()
599 if ((rdp->nocb_cb_sleep || in __call_rcu_nocb_wake()
600 !rcu_segcblist_ready_cbs(&rdp->cblist)) && in __call_rcu_nocb_wake()
601 !timer_pending(&rdp->nocb_timer)) { in __call_rcu_nocb_wake()
607 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); in __call_rcu_nocb_wake()
611 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); in __call_rcu_nocb_wake()
618 struct rcu_segcblist *cblist = &rdp->cblist; in nocb_gp_toggle_rdp()
627 * We will handle this rdp until it ever gets de-offloaded. in nocb_gp_toggle_rdp()
636 * De-offloading. Clear our flag and notify the de-offload worker. in nocb_gp_toggle_rdp()
637 * We will ignore this rdp until it ever gets re-offloaded. in nocb_gp_toggle_rdp()
645 ret = -1; in nocb_gp_toggle_rdp()
656 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq, in nocb_gp_sleep()
657 !READ_ONCE(my_rdp->nocb_gp_sleep)); in nocb_gp_sleep()
662 * No-CBs GP kthreads come here to wait for additional callbacks to show up
668 int __maybe_unused cpu = my_rdp->cpu; in nocb_gp_wait()
684 * nearest grace period (if any) to wait for next. The CB kthreads in nocb_gp_wait()
685 * and the global grace-period kthread are awakened if needed. in nocb_gp_wait()
687 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); in nocb_gp_wait()
690 * CPU is de-offloaded and added to the list before that CPU is in nocb_gp_wait()
691 * (re-)offloaded. If the following loop happens to be referencing in nocb_gp_wait()
693 * CPU is de-offloaded and then immediately re-offloaded, this in nocb_gp_wait()
699 * is added to the list, so the skipped-over rcu_data structures in nocb_gp_wait()
702 list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) { in nocb_gp_wait()
707 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); in nocb_gp_wait()
709 lockdep_assert_held(&rdp->nocb_lock); in nocb_gp_wait()
710 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in nocb_gp_wait()
711 lazy_ncbs = READ_ONCE(rdp->lazy_len); in nocb_gp_wait()
714 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush) || in nocb_gp_wait()
718 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || in nocb_gp_wait()
721 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { in nocb_gp_wait()
729 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); in nocb_gp_wait()
730 lazy_ncbs = READ_ONCE(rdp->lazy_len); in nocb_gp_wait()
734 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in nocb_gp_wait()
741 rnp = rdp->mynode; in nocb_gp_wait()
745 if (!rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
747 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && in nocb_gp_wait()
748 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { in nocb_gp_wait()
751 wasempty = rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
755 // Need to wait on some grace period? in nocb_gp_wait()
757 !rcu_segcblist_restempty(&rdp->cblist, in nocb_gp_wait()
759 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { in nocb_gp_wait()
764 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, in nocb_gp_wait()
767 if (rcu_segcblist_ready_cbs(&rdp->cblist)) { in nocb_gp_wait()
768 needwake = rdp->nocb_cb_sleep; in nocb_gp_wait()
769 WRITE_ONCE(rdp->nocb_cb_sleep, false); in nocb_gp_wait()
770 smp_mb(); /* CB invocation -after- GP end. */ in nocb_gp_wait()
776 swake_up_one(&rdp->nocb_cb_wq); in nocb_gp_wait()
783 my_rdp->nocb_gp_bypass = bypass; in nocb_gp_wait()
784 my_rdp->nocb_gp_gp = needwait_gp; in nocb_gp_wait()
785 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; in nocb_gp_wait()
787 // At least one child with non-empty ->nocb_bypass, so set in nocb_gp_wait()
805 if (list_empty(&my_rdp->nocb_head_rdp)) { in nocb_gp_wait()
806 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
807 if (!my_rdp->nocb_toggling_rdp) in nocb_gp_wait()
808 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); in nocb_gp_wait()
809 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
819 rnp = my_rdp->mynode; in nocb_gp_wait()
822 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1], in nocb_gp_wait()
823 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) || in nocb_gp_wait()
824 !READ_ONCE(my_rdp->nocb_gp_sleep)); in nocb_gp_wait()
829 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
830 // (De-)queue an rdp to/from the group if its nocb state is changing in nocb_gp_wait()
831 rdp_toggling = my_rdp->nocb_toggling_rdp; in nocb_gp_wait()
833 my_rdp->nocb_toggling_rdp = NULL; in nocb_gp_wait()
835 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { in nocb_gp_wait()
836 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); in nocb_gp_wait()
837 del_timer(&my_rdp->nocb_timer); in nocb_gp_wait()
839 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); in nocb_gp_wait()
840 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
842 rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp); in nocb_gp_wait()
851 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
852 my_rdp->nocb_toggling_rdp = NULL; in nocb_gp_wait()
853 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); in nocb_gp_wait()
863 list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp); in nocb_gp_wait()
865 list_del(&rdp_toggling->nocb_entry_rdp); in nocb_gp_wait()
867 swake_up_one(&rdp_toggling->nocb_state_wq); in nocb_gp_wait()
870 my_rdp->nocb_gp_seq = -1; in nocb_gp_wait()
875 * No-CBs grace-period-wait kthread. There is one of these per group
879 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
880 * that then have callback-invocation work to do.
887 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); in rcu_nocb_gp_kthread()
898 return rcu_segcblist_test_flags(&rdp->cblist, flags); in nocb_cb_can_run()
903 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep); in nocb_cb_wait_cond()
907 * Invoke any ready callbacks from the corresponding no-CBs CPU,
912 struct rcu_segcblist *cblist = &rdp->cblist; in nocb_cb_wait()
918 struct rcu_node *rnp = rdp->mynode; in nocb_cb_wait()
921 swait_event_interruptible_exclusive(rdp->nocb_cb_wq, in nocb_cb_wait()
925 if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^ in nocb_cb_wait()
927 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); in nocb_cb_wait()
937 * transitioning to/from NOCB mode, a self-requeuing callback might in nocb_cb_wait()
938 * be invoked from softirq. A short grace period could cause both in nocb_cb_wait()
947 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && in nocb_cb_wait()
949 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); in nocb_cb_wait()
963 * De-offloading. Clear our flag and notify the de-offload worker. in nocb_cb_wait()
965 * get re-offloaded. in nocb_cb_wait()
973 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep); in nocb_cb_wait()
975 if (rdp->nocb_cb_sleep) in nocb_cb_wait()
976 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); in nocb_cb_wait()
983 swake_up_one(&rdp->nocb_state_wq); in nocb_cb_wait()
987 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
1006 return READ_ONCE(rdp->nocb_defer_wakeup) >= level; in rcu_nocb_need_deferred_wakeup()
1013 __releases(rdp_gp->nocb_gp_lock) in do_nocb_deferred_wakeup_common()
1019 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in do_nocb_deferred_wakeup_common()
1023 ndw = rdp_gp->nocb_defer_wakeup; in do_nocb_deferred_wakeup_common()
1025 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); in do_nocb_deferred_wakeup_common()
1036 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp); in do_nocb_deferred_wakeup_timer()
1037 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); in do_nocb_deferred_wakeup_timer()
1039 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags); in do_nocb_deferred_wakeup_timer()
1046 * This means we do an inexact common-case check. Note that if
1047 * we miss, ->nocb_timer will eventually clean things up.
1052 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in do_nocb_deferred_wakeup()
1057 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in do_nocb_deferred_wakeup()
1069 __releases(rdp->nocb_lock) in rdp_offload_toggle()
1071 struct rcu_segcblist *cblist = &rdp->cblist; in rdp_offload_toggle()
1072 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in rdp_offload_toggle()
1077 if (rdp->nocb_cb_sleep) in rdp_offload_toggle()
1078 rdp->nocb_cb_sleep = false; in rdp_offload_toggle()
1085 swake_up_one(&rdp->nocb_cb_wq); in rdp_offload_toggle()
1087 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); in rdp_offload_toggle()
1089 WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp); in rdp_offload_toggle()
1090 if (rdp_gp->nocb_gp_sleep) { in rdp_offload_toggle()
1091 rdp_gp->nocb_gp_sleep = false; in rdp_offload_toggle()
1094 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); in rdp_offload_toggle()
1102 struct rcu_segcblist *cblist = &rdp->cblist; in rcu_nocb_rdp_deoffload()
1105 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in rcu_nocb_rdp_deoffload()
1109 * rcuog/o[p] spawn failed, because at this time the rdp->cpu in rcu_nocb_rdp_deoffload()
1112 WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu)); in rcu_nocb_rdp_deoffload()
1114 pr_info("De-offloading %d\n", rdp->cpu); in rcu_nocb_rdp_deoffload()
1119 * running on the target CPU holding ->nocb_lock (thus having in rcu_nocb_rdp_deoffload()
1138 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_nocb_rdp_deoffload()
1139 if (rdp_gp->nocb_gp_kthread) { in rcu_nocb_rdp_deoffload()
1141 wake_up_process(rdp_gp->nocb_gp_kthread); in rcu_nocb_rdp_deoffload()
1147 if (!rdp->nocb_cb_kthread) { in rcu_nocb_rdp_deoffload()
1149 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); in rcu_nocb_rdp_deoffload()
1153 swait_event_exclusive(rdp->nocb_state_wq, in rcu_nocb_rdp_deoffload()
1158 * No kthread to clear the flags for us or remove the rdp from the nocb list in rcu_nocb_rdp_deoffload()
1163 rcu_segcblist_clear_flags(&rdp->cblist, in rcu_nocb_rdp_deoffload()
1167 list_del(&rdp->nocb_entry_rdp); in rcu_nocb_rdp_deoffload()
1169 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_nocb_rdp_deoffload()
1185 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); in rcu_nocb_rdp_deoffload()
1188 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); in rcu_nocb_rdp_deoffload()
1207 pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu); in rcu_nocb_cpu_deoffload()
1208 ret = -EINVAL; in rcu_nocb_cpu_deoffload()
1221 struct rcu_segcblist *cblist = &rdp->cblist; in rcu_nocb_rdp_offload()
1224 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; in rcu_nocb_rdp_offload()
1226 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id()); in rcu_nocb_rdp_offload()
1228 * For now we only support re-offload, ie: the rdp must have been in rcu_nocb_rdp_offload()
1231 if (!rdp->nocb_gp_rdp) in rcu_nocb_rdp_offload()
1232 return -EINVAL; in rcu_nocb_rdp_offload()
1234 if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread)) in rcu_nocb_rdp_offload()
1235 return -EINVAL; in rcu_nocb_rdp_offload()
1237 pr_info("Offloading %d\n", rdp->cpu); in rcu_nocb_rdp_offload()
1243 raw_spin_lock_irqsave(&rdp->nocb_lock, flags); in rcu_nocb_rdp_offload()
1247 * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode). in rcu_nocb_rdp_offload()
1249 * rdp->cblist must be visible remotely by the nocb kthreads in rcu_nocb_rdp_offload()
1255 * ------------------------- ---------------------------- in rcu_nocb_rdp_offload()
1263 wake_up_process(rdp_gp->nocb_gp_kthread); in rcu_nocb_rdp_offload()
1264 swait_event_exclusive(rdp->nocb_state_wq, in rcu_nocb_rdp_offload()
1292 pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu); in rcu_nocb_cpu_offload()
1293 ret = -EINVAL; in rcu_nocb_cpu_offload()
1313 /* Protect rcu_nocb_mask against concurrent (de-)offloading. */ in lazy_rcu_shrink_count()
1321 count += READ_ONCE(rdp->lazy_len); in lazy_rcu_shrink_count()
1339 * Protect against concurrent (de-)offloading. Otherwise nocb locking in lazy_rcu_shrink_scan()
1360 if (!READ_ONCE(rdp->lazy_len)) in lazy_rcu_shrink_scan()
1369 _count = READ_ONCE(rdp->lazy_len); in lazy_rcu_shrink_scan()
1377 sc->nr_to_scan -= _count; in lazy_rcu_shrink_scan()
1379 if (sc->nr_to_scan <= 0) in lazy_rcu_shrink_scan()
1427 if (register_shrinker(&lazy_rcu_shrinker, "rcu-lazy")) in rcu_init_nohz()
1442 pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); in rcu_init_nohz()
1446 if (rcu_segcblist_empty(&rdp->cblist)) in rcu_init_nohz()
1447 rcu_segcblist_init(&rdp->cblist); in rcu_init_nohz()
1448 rcu_segcblist_offload(&rdp->cblist, true); in rcu_init_nohz()
1449 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP); in rcu_init_nohz()
1450 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE); in rcu_init_nohz()
1455 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1458 init_swait_queue_head(&rdp->nocb_cb_wq); in rcu_boot_init_nocb_percpu_data()
1459 init_swait_queue_head(&rdp->nocb_gp_wq); in rcu_boot_init_nocb_percpu_data()
1460 init_swait_queue_head(&rdp->nocb_state_wq); in rcu_boot_init_nocb_percpu_data()
1461 raw_spin_lock_init(&rdp->nocb_lock); in rcu_boot_init_nocb_percpu_data()
1462 raw_spin_lock_init(&rdp->nocb_bypass_lock); in rcu_boot_init_nocb_percpu_data()
1463 raw_spin_lock_init(&rdp->nocb_gp_lock); in rcu_boot_init_nocb_percpu_data()
1464 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); in rcu_boot_init_nocb_percpu_data()
1465 rcu_cblist_init(&rdp->nocb_bypass); in rcu_boot_init_nocb_percpu_data()
1466 WRITE_ONCE(rdp->lazy_len, 0); in rcu_boot_init_nocb_percpu_data()
1467 mutex_init(&rdp->nocb_gp_kthread_mutex); in rcu_boot_init_nocb_percpu_data()
1471 * If the specified CPU is a no-CBs CPU that does not already have its
1486 if (rdp->nocb_cb_kthread) in rcu_spawn_cpu_nocb_kthread()
1491 rdp_gp = rdp->nocb_gp_rdp; in rcu_spawn_cpu_nocb_kthread()
1492 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_spawn_cpu_nocb_kthread()
1493 if (!rdp_gp->nocb_gp_kthread) { in rcu_spawn_cpu_nocb_kthread()
1495 "rcuog/%d", rdp_gp->cpu); in rcu_spawn_cpu_nocb_kthread()
1497 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_spawn_cpu_nocb_kthread()
1500 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); in rcu_spawn_cpu_nocb_kthread()
1504 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); in rcu_spawn_cpu_nocb_kthread()
1515 WRITE_ONCE(rdp->nocb_cb_kthread, t); in rcu_spawn_cpu_nocb_kthread()
1516 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); in rcu_spawn_cpu_nocb_kthread()
1527 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1528 static int rcu_nocb_gp_stride = -1;
1532 * Initialize GP-CB relationships for all no-CBs CPU.
1547 if (ls == -1) { in rcu_organize_nocb_kthreads()
1559 if (rdp->cpu >= nl) { in rcu_organize_nocb_kthreads()
1562 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; in rcu_organize_nocb_kthreads()
1564 INIT_LIST_HEAD(&rdp->nocb_head_rdp); in rcu_organize_nocb_kthreads()
1571 pr_alert("%s: No-CB GP kthread CPU %d:", in rcu_organize_nocb_kthreads()
1580 rdp->nocb_gp_rdp = rdp_gp; in rcu_organize_nocb_kthreads()
1582 list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); in rcu_organize_nocb_kthreads()
1595 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); in rcu_bind_current_to_nocb()
1599 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1603 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : ""; in show_rcu_should_be_on_cpu()
1613 * Dump out nocb grace-period kthread state for the specified rcu_data
1618 struct rcu_node *rnp = rdp->mynode; in show_rcu_nocb_gp_state()
1621 rdp->cpu, in show_rcu_nocb_gp_state()
1622 "kK"[!!rdp->nocb_gp_kthread], in show_rcu_nocb_gp_state()
1623 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], in show_rcu_nocb_gp_state()
1624 "dD"[!!rdp->nocb_defer_wakeup], in show_rcu_nocb_gp_state()
1625 "tT"[timer_pending(&rdp->nocb_timer)], in show_rcu_nocb_gp_state()
1626 "sS"[!!rdp->nocb_gp_sleep], in show_rcu_nocb_gp_state()
1627 ".W"[swait_active(&rdp->nocb_gp_wq)], in show_rcu_nocb_gp_state()
1628 ".W"[swait_active(&rnp->nocb_gp_wq[0])], in show_rcu_nocb_gp_state()
1629 ".W"[swait_active(&rnp->nocb_gp_wq[1])], in show_rcu_nocb_gp_state()
1630 ".B"[!!rdp->nocb_gp_bypass], in show_rcu_nocb_gp_state()
1631 ".G"[!!rdp->nocb_gp_gp], in show_rcu_nocb_gp_state()
1632 (long)rdp->nocb_gp_seq, in show_rcu_nocb_gp_state()
1633 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops), in show_rcu_nocb_gp_state()
1634 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.', in show_rcu_nocb_gp_state()
1635 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state()
1636 show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread)); in show_rcu_nocb_gp_state()
1645 struct rcu_segcblist *rsclp = &rdp->cblist; in show_rcu_nocb_state()
1649 if (rdp->nocb_gp_rdp == rdp) in show_rcu_nocb_state()
1652 nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp, in show_rcu_nocb_state()
1653 &rdp->nocb_entry_rdp, in show_rcu_nocb_state()
1657 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]); in show_rcu_nocb_state()
1658 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); in show_rcu_nocb_state()
1659 pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", in show_rcu_nocb_state()
1660 rdp->cpu, rdp->nocb_gp_rdp->cpu, in show_rcu_nocb_state()
1661 nocb_next_rdp ? nocb_next_rdp->cpu : -1, in show_rcu_nocb_state()
1662 "kK"[!!rdp->nocb_cb_kthread], in show_rcu_nocb_state()
1663 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], in show_rcu_nocb_state()
1664 "lL"[raw_spin_is_locked(&rdp->nocb_lock)], in show_rcu_nocb_state()
1665 "sS"[!!rdp->nocb_cb_sleep], in show_rcu_nocb_state()
1666 ".W"[swait_active(&rdp->nocb_cb_wq)], in show_rcu_nocb_state()
1667 jiffies - rdp->nocb_bypass_first, in show_rcu_nocb_state()
1668 jiffies - rdp->nocb_nobypass_last, in show_rcu_nocb_state()
1669 rdp->nocb_nobypass_count, in show_rcu_nocb_state()
1676 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], in show_rcu_nocb_state()
1677 rcu_segcblist_n_cbs(&rdp->cblist), in show_rcu_nocb_state()
1678 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.', in show_rcu_nocb_state()
1679 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1, in show_rcu_nocb_state()
1680 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread)); in show_rcu_nocb_state()
1683 if (rdp->nocb_gp_rdp == rdp) in show_rcu_nocb_state()
1686 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); in show_rcu_nocb_state()
1687 wassleep = swait_active(&rdp->nocb_gp_wq); in show_rcu_nocb_state()
1688 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep) in show_rcu_nocb_state()
1691 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n", in show_rcu_nocb_state()
1693 "dD"[!!rdp->nocb_defer_wakeup], in show_rcu_nocb_state()
1694 "sS"[!!rdp->nocb_gp_sleep], in show_rcu_nocb_state()
1710 /* No ->nocb_lock to acquire. */
1715 /* No ->nocb_lock to release. */
1720 /* No ->nocb_lock to release. */
1727 /* Lockdep check that ->cblist may be safely accessed. */