Home
last modified time | relevance | path

Searched refs:rnp (Results 1 – 9 of 9) sorted by relevance

/openbmc/linux/kernel/rcu/
H A Dtree_exp.h13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
83 struct rcu_node *rnp; in sync_exp_reset_tree_hotplug() local
95 rcu_for_each_leaf_node(rnp) { in sync_exp_reset_tree_hotplug()
96 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
97 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug()
98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
103 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug()
104 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug()
105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
[all …]
H A Dtree_plugin.h105 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
151 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue() argument
152 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
160 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_ctxt_queue()
161 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
162 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_ctxt_queue()
[all …]
H A Dtree.c146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
148 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
152 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
731 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
733 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf()
735 rnp->gp_seq)) in rcu_gpnum_ovf()
737 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
738 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
771 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs() local
783 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
[all …]
H A Dtree_stall.h221 struct rcu_node *rnp; in rcu_iw_handler() local
224 rnp = rdp->mynode; in rcu_iw_handler()
225 raw_spin_lock_rcu_node(rnp); in rcu_iw_handler()
227 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_iw_handler()
230 raw_spin_unlock_rcu_node(rnp); in rcu_iw_handler()
243 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_print_detail_task_stall_rnp() argument
248 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp()
249 if (!rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_print_detail_task_stall_rnp()
250 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_detail_task_stall_rnp()
253 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp()
[all …]
H A Drcu.h378 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) argument
381 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) argument
388 #define _rcu_for_each_node_breadth_first(sp, rnp) \ argument
389 for ((rnp) = &(sp)->node[0]; \
390 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
391 #define rcu_for_each_node_breadth_first(rnp) \ argument
392 _rcu_for_each_node_breadth_first(&rcu_state, rnp)
393 #define srcu_for_each_node_breadth_first(ssp, rnp) \ argument
394 _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp)
402 #define rcu_for_each_leaf_node(rnp) \ argument
[all …]
H A Dtree.h147 #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) argument
447 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
449 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
451 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
452 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
454 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
455 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
456 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
459 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
460 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
[all …]
H A Dtree_nocb.h186 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) in rcu_nocb_gp_get() argument
188 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; in rcu_nocb_gp_get()
191 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument
193 init_swait_queue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
194 init_swait_queue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
678 struct rcu_node *rnp; in nocb_gp_wait() local
741 rnp = rdp->mynode; in nocb_gp_wait()
748 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { in nocb_gp_wait()
749 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */ in nocb_gp_wait()
750 needwake_gp = rcu_advance_cbs(rnp, rdp); in nocb_gp_wait()
[all …]
/openbmc/linux/Documentation/RCU/Design/Memory-Ordering/
H A DTree-RCU-Memory-Ordering.rst84 5 raw_spin_lock_rcu_node(rnp);
87 8 raw_spin_unlock_rcu_node(rnp);
92 13 raw_spin_lock_rcu_node(rnp);
95 16 raw_spin_unlock_rcu_node(rnp);
206 5 struct rcu_node *rnp;
232 31 rnp = rdp->mynode;
233 32 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
234 33 needwake = rcu_accelerate_cbs(rnp, rdp);
235 34 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
/openbmc/linux/Documentation/RCU/Design/Data-Structures/
H A DData-Structures.rst1106 6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
1107 7 for ((rnp) = &(rsp)->node[0]; \
1108 8 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
1110 10 #define rcu_for_each_leaf_node(rsp, rnp) \
1111 11 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
1112 12 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)