Lines Matching full:gp
50 unsigned long gp_seq_needed; /* Track furthest future GP request. */
58 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
60 /* Per-GP initial value for qsmask. */
65 /* to allow the current expedited GP */
68 /* Per-GP initial values for expmask. */
70 /* beginning of each expedited GP. */
72 /* Online CPUs for next expedited GP. */
127 /* Place for rcu_nocb_kthread() to wait GP. */
182 unsigned long gp_seq_needed; /* Track furthest future GP request. */
210 int dynticks_snap; /* Per-GP tracking for dynticks. */
211 bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
212 bool rcu_urgent_qs; /* GP old need light quiescent state. */
214 bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
230 struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */
240 /* The following fields are used by GP kthread, hence own cacheline. */
242 u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
244 u8 nocb_gp_gp; /* GP to wait for on last scan? */
259 /* GP rdp takes GP-end wakeups. */
340 unsigned long gp_max; /* Maximum GP duration in */
343 struct swait_queue_head gp_wq; /* Where GP task waits. */
344 short gp_flags; /* Commands for GP task. */
345 short gp_state; /* GP kthread sleep state. */
346 unsigned long gp_wake_time; /* Last GP kthread wake. */
348 unsigned long gp_seq_polled; /* GP seq for polled API. */
349 unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */
350 unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */
363 struct mutex exp_mutex; /* Serialize expedited GP. */
378 unsigned long gp_start; /* Time at which GP started, */
380 unsigned long gp_end; /* Time last GP ended, again */
382 unsigned long gp_activity; /* Time of last GP kthread */
384 unsigned long gp_req_activity; /* Time of last GP request */
395 /* GP start. */
401 /* GP pre-initialization. */
411 #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */