Lines Matching refs:rdp

150 static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
153 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
154 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
236 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu() local
238 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
239 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
291 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) in rcu_dynticks_in_eqs_since() argument
293 return snap != rcu_dynticks_snap(rdp->cpu); in rcu_dynticks_in_eqs_since()
476 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
549 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched() local
558 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { in rcu_irq_work_resched()
612 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick() local
621 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
622 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
623 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
635 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
636 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
639 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
640 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
642 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
669 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs() argument
671 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
672 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
673 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
674 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
675 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
676 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
731 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
734 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
736 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
737 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
738 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
746 static int dyntick_save_progress_counter(struct rcu_data *rdp) in dyntick_save_progress_counter() argument
748 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu); in dyntick_save_progress_counter()
749 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { in dyntick_save_progress_counter()
750 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
751 rcu_gpnum_ovf(rdp->mynode, rdp); in dyntick_save_progress_counter()
767 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) in rcu_implicit_dynticks_qs() argument
771 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs()
781 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { in rcu_implicit_dynticks_qs()
782 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
783 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
805 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { in rcu_implicit_dynticks_qs()
815 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], in rcu_implicit_dynticks_qs()
816 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in rcu_implicit_dynticks_qs()
817 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in rcu_implicit_dynticks_qs()
833 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_implicit_dynticks_qs()
837 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_implicit_dynticks_qs()
839 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
841 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
852 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_implicit_dynticks_qs()
853 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_implicit_dynticks_qs()
855 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_implicit_dynticks_qs()
856 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
869 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_implicit_dynticks_qs()
870 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_implicit_dynticks_qs()
874 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
875 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
876 rdp->rcu_iw_pending = true; in rcu_implicit_dynticks_qs()
877 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
878 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_implicit_dynticks_qs()
881 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { in rcu_implicit_dynticks_qs()
882 int cpu = rdp->cpu; in rcu_implicit_dynticks_qs()
888 rsrp = &rdp->snap_record; in rcu_implicit_dynticks_qs()
892 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); in rcu_implicit_dynticks_qs()
893 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); in rcu_implicit_dynticks_qs()
894 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); in rcu_implicit_dynticks_qs()
896 rsrp->gp_seq = rdp->gp_seq; in rcu_implicit_dynticks_qs()
904 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
928 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
944 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
952 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
964 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
976 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
979 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
983 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
992 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1006 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup() local
1011 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1087 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1092 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs()
1096 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1099 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1112 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1113 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1116 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1121 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1134 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1139 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs_unlocked()
1141 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1143 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1147 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1163 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1165 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs()
1169 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1176 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1179 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1187 struct rcu_data *rdp) in rcu_advance_cbs_nowake() argument
1189 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs_nowake()
1194 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1217 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1221 const bool offloaded = rcu_rdp_is_offloaded(rdp); in __note_gp_changes()
1225 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1229 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1230 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1232 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1233 rdp->core_needs_qs = false; in __note_gp_changes()
1234 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1237 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1238 if (rdp->core_needs_qs) in __note_gp_changes()
1239 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1243 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1244 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1251 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1252 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1253 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1254 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1256 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1257 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1258 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1259 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) in __note_gp_changes()
1260 WRITE_ONCE(rdp->last_sched_clock, jiffies); in __note_gp_changes()
1261 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1262 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1266 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes() argument
1273 rnp = rdp->mynode; in note_gp_changes()
1274 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1275 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1280 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1433 struct rcu_data *rdp; in rcu_gp_init() local
1540 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1544 if (rnp == rdp->mynode) in rcu_gp_init()
1545 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
1725 struct rcu_data *rdp; in rcu_gp_cleanup() local
1766 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
1767 if (rnp == rdp->mynode) in rcu_gp_cleanup()
1768 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
1774 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
1775 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
1793 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
1795 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
1800 offloaded = rcu_rdp_is_offloaded(rdp); in rcu_gp_cleanup()
1801 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2009 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2016 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2017 rnp = rdp->mynode; in rcu_report_qs_rdp()
2019 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2020 rdp->gpwrap) { in rcu_report_qs_rdp()
2028 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2032 mask = rdp->grpmask; in rcu_report_qs_rdp()
2033 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2043 if (!rcu_rdp_is_offloaded(rdp)) { in rcu_report_qs_rdp()
2049 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); in rcu_report_qs_rdp()
2050 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { in rcu_report_qs_rdp()
2058 rcu_disable_urgency_upon_qs(rdp); in rcu_report_qs_rdp()
2063 rcu_nocb_lock_irqsave(rdp, flags); in rcu_report_qs_rdp()
2064 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_report_qs_rdp()
2065 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_report_qs_rdp()
2077 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2080 note_gp_changes(rdp); in rcu_check_quiescent_state()
2086 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2093 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2100 rcu_report_qs_rdp(rdp); in rcu_check_quiescent_state()
2119 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch() argument
2134 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2136 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2138 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2140 rcu_is_callbacks_kthread(rdp)); in rcu_do_batch()
2149 rcu_nocb_lock_irqsave(rdp, flags); in rcu_do_batch()
2151 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); in rcu_do_batch()
2154 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2155 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && in rcu_do_batch()
2166 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2167 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2168 if (rcu_rdp_is_offloaded(rdp)) in rcu_do_batch()
2169 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2171 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2172 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2215 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && in rcu_do_batch()
2217 rdp->rcu_cpu_has_work = 1; in rcu_do_batch()
2223 rcu_nocb_lock_irqsave(rdp, flags); in rcu_do_batch()
2224 rdp->n_cbs_invoked += count; in rcu_do_batch()
2226 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); in rcu_do_batch()
2229 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2230 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2233 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2234 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2235 rdp->blimit = blimit; in rcu_do_batch()
2238 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2239 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2240 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2241 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2242 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2248 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2252 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2253 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2255 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2306 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp() argument
2336 struct rcu_data *rdp; in force_qs_rnp() local
2339 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2340 ret = f(rdp); in force_qs_rnp()
2342 mask |= rdp->grpmask; in force_qs_rnp()
2343 rcu_disable_urgency_upon_qs(rdp); in force_qs_rnp()
2346 rsmask |= rdp->grpmask; in force_qs_rnp()
2411 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core() local
2412 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2430 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); in rcu_core()
2435 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2446 rcu_check_quiescent_state(rdp); in rcu_core()
2450 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { in rcu_core()
2451 rcu_nocb_lock_irqsave(rdp, flags); in rcu_core()
2452 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2453 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2454 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_core()
2457 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2460 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2462 rcu_do_batch(rdp); in rcu_core()
2464 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_core()
2469 do_nocb_deferred_wakeup(rdp); in rcu_core()
2474 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2594 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in __call_rcu_core() argument
2615 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in __call_rcu_core()
2616 rdp->qlen_last_fqs_check + qhimark)) { in __call_rcu_core()
2619 note_gp_changes(rdp); in __call_rcu_core()
2623 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in __call_rcu_core()
2626 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in __call_rcu_core()
2627 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in __call_rcu_core()
2628 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in __call_rcu_core()
2630 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in __call_rcu_core()
2631 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in __call_rcu_core()
2649 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2654 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
2655 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2657 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2672 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld() argument
2674 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
2677 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
2678 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2681 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2691 struct rcu_data *rdp; in __call_rcu_common() local
2714 rdp = this_cpu_ptr(&rcu_data); in __call_rcu_common()
2718 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu_common()
2724 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu_common()
2725 rcu_segcblist_init(&rdp->cblist); in __call_rcu_common()
2728 check_cb_ovld(rdp); in __call_rcu_common()
2729 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) in __call_rcu_common()
2732 rcu_segcblist_enqueue(&rdp->cblist, head); in __call_rcu_common()
2736 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu_common()
2739 rcu_segcblist_n_cbs(&rdp->cblist)); in __call_rcu_common()
2741 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in __call_rcu_common()
2744 if (unlikely(rcu_rdp_is_offloaded(rdp))) { in __call_rcu_common()
2745 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ in __call_rcu_common()
2747 __call_rcu_core(rdp, head, flags); in __call_rcu_common()
3705 struct rcu_data *rdp; in start_poll_synchronize_rcu_common() local
3710 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu_common()
3711 rnp = rdp->mynode; in start_poll_synchronize_rcu_common()
3719 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); in start_poll_synchronize_rcu_common()
3923 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending() local
3924 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
3929 check_cpu_stall(rdp); in rcu_pending()
3932 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) in rcu_pending()
3941 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
3945 if (!rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3946 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
3950 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
3951 !rcu_rdp_is_offloaded(rdp) && in rcu_pending()
3952 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
3956 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
3957 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
3999 static void rcu_barrier_entrain(struct rcu_data *rdp) in rcu_barrier_entrain() argument
4002 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); in rcu_barrier_entrain()
4010 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_entrain()
4011 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_entrain()
4012 rcu_nocb_lock(rdp); in rcu_barrier_entrain()
4018 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
4019 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); in rcu_barrier_entrain()
4020 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
4021 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_entrain()
4024 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_entrain()
4027 rcu_nocb_unlock(rdp); in rcu_barrier_entrain()
4029 wake_nocb_gp(rdp, false); in rcu_barrier_entrain()
4030 smp_store_release(&rdp->barrier_seq_snap, gseq); in rcu_barrier_entrain()
4039 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_handler() local
4042 WARN_ON_ONCE(cpu != rdp->cpu); in rcu_barrier_handler()
4045 rcu_barrier_entrain(rdp); in rcu_barrier_handler()
4062 struct rcu_data *rdp; in rcu_barrier() local
4101 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4103 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) in rcu_barrier()
4106 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { in rcu_barrier()
4107 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
4112 if (!rcu_rdp_cpu_online(rdp)) { in rcu_barrier()
4113 rcu_barrier_entrain(rdp); in rcu_barrier()
4114 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
4124 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
4143 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4145 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
4169 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) in rcu_rdp_cpu_online() argument
4171 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); in rcu_rdp_cpu_online()
4176 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_online() local
4178 return rcu_rdp_cpu_online(rdp); in rcu_cpu_online()
4198 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
4204 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
4212 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) in rcu_lockdep_current_cpu_online()
4235 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dying_cpu() local
4236 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
4241 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); in rcutree_dying_cpu()
4344 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data() local
4347 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4348 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4351 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4352 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4353 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4354 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4355 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4356 rdp->last_sched_clock = jiffies; in rcu_boot_init_percpu_data()
4357 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4358 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
4375 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu() local
4380 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4381 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4382 rdp->blimit = blimit; in rcutree_prepare_cpu()
4390 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4391 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4398 rnp = rdp->mynode; in rcutree_prepare_cpu()
4400 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4401 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4402 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4403 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4404 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4405 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
4406 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
4407 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4421 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting() local
4423 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); in rcutree_affinity_setting()
4431 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_beenfullyonline() local
4433 return smp_load_acquire(&rdp->beenonline); in rcu_cpu_beenfullyonline()
4443 struct rcu_data *rdp; in rcutree_online_cpu() local
4446 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
4447 rnp = rdp->mynode; in rcutree_online_cpu()
4449 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4468 struct rcu_data *rdp; in rcutree_offline_cpu() local
4471 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
4472 rnp = rdp->mynode; in rcutree_offline_cpu()
4474 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4499 struct rcu_data *rdp; in rcu_cpu_starting() local
4504 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_starting()
4505 if (rdp->cpu_started) in rcu_cpu_starting()
4507 rdp->cpu_started = true; in rcu_cpu_starting()
4509 rnp = rdp->mynode; in rcu_cpu_starting()
4510 mask = rdp->grpmask; in rcu_cpu_starting()
4522 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4523 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_cpu_starting()
4524 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_cpu_starting()
4532 rcu_disable_urgency_upon_qs(rdp); in rcu_cpu_starting()
4539 smp_store_release(&rdp->beenonline, true); in rcu_cpu_starting()
4555 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_report_dead() local
4556 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead()
4559 do_nocb_deferred_wakeup(rdp); in rcu_report_dead()
4564 mask = rdp->grpmask; in rcu_report_dead()
4568 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_report_dead()
4569 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_report_dead()
4572 rcu_disable_urgency_upon_qs(rdp); in rcu_report_dead()
4581 rdp->cpu_started = false; in rcu_report_dead()
4595 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks() local
4598 if (rcu_rdp_is_offloaded(rdp) || in rcutree_migrate_callbacks()
4599 rcu_segcblist_empty(&rdp->cblist)) in rcutree_migrate_callbacks()
4603 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); in rcutree_migrate_callbacks()
4604 rcu_barrier_entrain(rdp); in rcutree_migrate_callbacks()
4611 needwake = rcu_advance_cbs(my_rnp, rdp) || in rcutree_migrate_callbacks()
4613 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
4616 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
4629 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
4630 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
4632 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
4633 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()
4718 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_spawn_gp_kthread() local
4743 rcu_spawn_one_boost_kthread(rdp->mynode); in rcu_spawn_gp_kthread()