xref: /openbmc/linux/kernel/rcu/tree_stall.h (revision 858c0840)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * RCU CPU stall warnings for normal RCU grace periods
4  *
5  * Copyright IBM Corporation, 2019
6  *
7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #include <linux/kvm_para.h>
11 
12 //////////////////////////////////////////////////////////////////////////////
13 //
14 // Controlling CPU stall warnings, including delay calculation.
15 
16 /* panic() on RCU Stall sysctl. */
17 int sysctl_panic_on_rcu_stall __read_mostly;
18 int sysctl_max_rcu_stall_to_panic __read_mostly;
19 
20 #ifdef CONFIG_PROVE_RCU
21 #define RCU_STALL_DELAY_DELTA		(5 * HZ)
22 #else
23 #define RCU_STALL_DELAY_DELTA		0
24 #endif
25 #define RCU_STALL_MIGHT_DIV		8
26 #define RCU_STALL_MIGHT_MIN		(2 * HZ)
27 
28 int rcu_exp_jiffies_till_stall_check(void)
29 {
30 	int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout);
31 	int exp_stall_delay_delta = 0;
32 	int till_stall_check;
33 
34 	// Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
35 	if (!cpu_stall_timeout)
36 		cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check());
37 
38 	// Limit check must be consistent with the Kconfig limits for
39 	// CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
40 	// The minimum clamped value is "2UL", because at least one full
41 	// tick has to be guaranteed.
42 	till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 300UL * HZ);
43 
44 	if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout)
45 		WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check));
46 
47 #ifdef CONFIG_PROVE_RCU
48 	/* Add extra ~25% out of till_stall_check. */
49 	exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1;
50 #endif
51 
52 	return till_stall_check + exp_stall_delay_delta;
53 }
54 EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check);
55 
56 /* Limit-check stall timeouts specified at boottime and runtime. */
57 int rcu_jiffies_till_stall_check(void)
58 {
59 	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
60 
61 	/*
62 	 * Limit check must be consistent with the Kconfig limits
63 	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
64 	 */
65 	if (till_stall_check < 3) {
66 		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
67 		till_stall_check = 3;
68 	} else if (till_stall_check > 300) {
69 		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
70 		till_stall_check = 300;
71 	}
72 	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
73 }
74 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
75 
76 /**
77  * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
78  *
79  * Returns @true if the current grace period is sufficiently old that
80  * it is reasonable to assume that it might be stalled.  This can be
81  * useful when deciding whether to allocate memory to enable RCU-mediated
82  * freeing on the one hand or just invoking synchronize_rcu() on the other.
83  * The latter is preferable when the grace period is stalled.
84  *
85  * Note that sampling of the .gp_start and .gp_seq fields must be done
86  * carefully to avoid false positives at the beginnings and ends of
87  * grace periods.
88  */
89 bool rcu_gp_might_be_stalled(void)
90 {
91 	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
92 	unsigned long j = jiffies;
93 
94 	if (d < RCU_STALL_MIGHT_MIN)
95 		d = RCU_STALL_MIGHT_MIN;
96 	smp_mb(); // jiffies before .gp_seq to avoid false positives.
97 	if (!rcu_gp_in_progress())
98 		return false;
99 	// Long delays at this point avoids false positive, but a delay
100 	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
101 	smp_mb(); // .gp_seq before second .gp_start
102 	// And ditto here.
103 	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
104 }
105 
106 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
107 void rcu_sysrq_start(void)
108 {
109 	if (!rcu_cpu_stall_suppress)
110 		rcu_cpu_stall_suppress = 2;
111 }
112 
113 void rcu_sysrq_end(void)
114 {
115 	if (rcu_cpu_stall_suppress == 2)
116 		rcu_cpu_stall_suppress = 0;
117 }
118 
119 /* Don't print RCU CPU stall warnings during a kernel panic. */
120 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
121 {
122 	rcu_cpu_stall_suppress = 1;
123 	return NOTIFY_DONE;
124 }
125 
126 static struct notifier_block rcu_panic_block = {
127 	.notifier_call = rcu_panic,
128 };
129 
130 static int __init check_cpu_stall_init(void)
131 {
132 	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
133 	return 0;
134 }
135 early_initcall(check_cpu_stall_init);
136 
137 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
138 static void panic_on_rcu_stall(void)
139 {
140 	static int cpu_stall;
141 
142 	if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
143 		return;
144 
145 	if (sysctl_panic_on_rcu_stall)
146 		panic("RCU Stall\n");
147 }
148 
149 /**
150  * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
151  *
152  * To perform the reset request from the caller, disable stall detection until
153  * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
154  * loaded.  It should be safe to do from the fqs loop as enough timer
155  * interrupts and context switches should have passed.
156  *
157  * The caller must disable hard irqs.
158  */
159 void rcu_cpu_stall_reset(void)
160 {
161 	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
162 	WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
163 }
164 
165 //////////////////////////////////////////////////////////////////////////////
166 //
167 // Interaction with RCU grace periods
168 
169 /* Start of new grace period, so record stall time (and forcing times). */
170 static void record_gp_stall_check_time(void)
171 {
172 	unsigned long j = jiffies;
173 	unsigned long j1;
174 
175 	WRITE_ONCE(rcu_state.gp_start, j);
176 	j1 = rcu_jiffies_till_stall_check();
177 	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
178 	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
179 	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
180 	rcu_state.jiffies_resched = j + j1 / 2;
181 	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
182 }
183 
184 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
185 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
186 {
187 	rdp->ticks_this_gp = 0;
188 	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
189 	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
190 }
191 
192 /*
193  * If too much time has passed in the current grace period, and if
194  * so configured, go kick the relevant kthreads.
195  */
196 static void rcu_stall_kick_kthreads(void)
197 {
198 	unsigned long j;
199 
200 	if (!READ_ONCE(rcu_kick_kthreads))
201 		return;
202 	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
203 	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
204 	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
205 		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
206 			  rcu_state.name);
207 		rcu_ftrace_dump(DUMP_ALL);
208 		wake_up_process(rcu_state.gp_kthread);
209 		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
210 	}
211 }
212 
213 /*
214  * Handler for the irq_work request posted about halfway into the RCU CPU
215  * stall timeout, and used to detect excessive irq disabling.  Set state
216  * appropriately, but just complain if there is unexpected state on entry.
217  */
218 static void rcu_iw_handler(struct irq_work *iwp)
219 {
220 	struct rcu_data *rdp;
221 	struct rcu_node *rnp;
222 
223 	rdp = container_of(iwp, struct rcu_data, rcu_iw);
224 	rnp = rdp->mynode;
225 	raw_spin_lock_rcu_node(rnp);
226 	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
227 		rdp->rcu_iw_gp_seq = rnp->gp_seq;
228 		rdp->rcu_iw_pending = false;
229 	}
230 	raw_spin_unlock_rcu_node(rnp);
231 }
232 
233 //////////////////////////////////////////////////////////////////////////////
234 //
235 // Printing RCU CPU stall warnings
236 
237 #ifdef CONFIG_PREEMPT_RCU
238 
239 /*
240  * Dump detailed information for all tasks blocking the current RCU
241  * grace period on the specified rcu_node structure.
242  */
243 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
244 {
245 	unsigned long flags;
246 	struct task_struct *t;
247 
248 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
249 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
250 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
251 		return;
252 	}
253 	t = list_entry(rnp->gp_tasks->prev,
254 		       struct task_struct, rcu_node_entry);
255 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
256 		/*
257 		 * We could be printing a lot while holding a spinlock.
258 		 * Avoid triggering hard lockup.
259 		 */
260 		touch_nmi_watchdog();
261 		sched_show_task(t);
262 	}
263 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
264 }
265 
266 // Communicate task state back to the RCU CPU stall warning request.
267 struct rcu_stall_chk_rdr {
268 	int nesting;
269 	union rcu_special rs;
270 	bool on_blkd_list;
271 };
272 
273 /*
274  * Report out the state of a not-running task that is stalling the
275  * current RCU grace period.
276  */
277 static int check_slow_task(struct task_struct *t, void *arg)
278 {
279 	struct rcu_stall_chk_rdr *rscrp = arg;
280 
281 	if (task_curr(t))
282 		return -EBUSY; // It is running, so decline to inspect it.
283 	rscrp->nesting = t->rcu_read_lock_nesting;
284 	rscrp->rs = t->rcu_read_unlock_special;
285 	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
286 	return 0;
287 }
288 
289 /*
290  * Scan the current list of tasks blocked within RCU read-side critical
291  * sections, printing out the tid of each of the first few of them.
292  */
293 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
294 	__releases(rnp->lock)
295 {
296 	int i = 0;
297 	int ndetected = 0;
298 	struct rcu_stall_chk_rdr rscr;
299 	struct task_struct *t;
300 	struct task_struct *ts[8];
301 
302 	lockdep_assert_irqs_disabled();
303 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
304 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
305 		return 0;
306 	}
307 	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
308 	       rnp->level, rnp->grplo, rnp->grphi);
309 	t = list_entry(rnp->gp_tasks->prev,
310 		       struct task_struct, rcu_node_entry);
311 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
312 		get_task_struct(t);
313 		ts[i++] = t;
314 		if (i >= ARRAY_SIZE(ts))
315 			break;
316 	}
317 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
318 	while (i) {
319 		t = ts[--i];
320 		if (task_call_func(t, check_slow_task, &rscr))
321 			pr_cont(" P%d", t->pid);
322 		else
323 			pr_cont(" P%d/%d:%c%c%c%c",
324 				t->pid, rscr.nesting,
325 				".b"[rscr.rs.b.blocked],
326 				".q"[rscr.rs.b.need_qs],
327 				".e"[rscr.rs.b.exp_hint],
328 				".l"[rscr.on_blkd_list]);
329 		lockdep_assert_irqs_disabled();
330 		put_task_struct(t);
331 		ndetected++;
332 	}
333 	pr_cont("\n");
334 	return ndetected;
335 }
336 
337 #else /* #ifdef CONFIG_PREEMPT_RCU */
338 
339 /*
340  * Because preemptible RCU does not exist, we never have to check for
341  * tasks blocked within RCU read-side critical sections.
342  */
343 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
344 {
345 }
346 
347 /*
348  * Because preemptible RCU does not exist, we never have to check for
349  * tasks blocked within RCU read-side critical sections.
350  */
351 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
352 	__releases(rnp->lock)
353 {
354 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
355 	return 0;
356 }
357 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
358 
359 /*
360  * Dump stacks of all tasks running on stalled CPUs.  First try using
361  * NMIs, but fall back to manual remote stack tracing on architectures
362  * that don't support NMI-based stack dumps.  The NMI-triggered stack
363  * traces are more accurate because they are printed by the target CPU.
364  */
365 static void rcu_dump_cpu_stacks(void)
366 {
367 	int cpu;
368 	unsigned long flags;
369 	struct rcu_node *rnp;
370 
371 	rcu_for_each_leaf_node(rnp) {
372 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
373 		for_each_leaf_node_possible_cpu(rnp, cpu)
374 			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
375 				if (cpu_is_offline(cpu))
376 					pr_err("Offline CPU %d blocking current GP.\n", cpu);
377 				else
378 					dump_cpu_task(cpu);
379 			}
380 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
381 	}
382 }
383 
384 static const char * const gp_state_names[] = {
385 	[RCU_GP_IDLE] = "RCU_GP_IDLE",
386 	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
387 	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
388 	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
389 	[RCU_GP_INIT] = "RCU_GP_INIT",
390 	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
391 	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
392 	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
393 	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
394 };
395 
396 /*
397  * Convert a ->gp_state value to a character string.
398  */
399 static const char *gp_state_getname(short gs)
400 {
401 	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
402 		return "???";
403 	return gp_state_names[gs];
404 }
405 
406 /* Is the RCU grace-period kthread being starved of CPU time? */
407 static bool rcu_is_gp_kthread_starving(unsigned long *jp)
408 {
409 	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
410 
411 	if (jp)
412 		*jp = j;
413 	return j > 2 * HZ;
414 }
415 
416 static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
417 {
418 	int cpu;
419 	struct task_struct *rcuc;
420 	unsigned long j;
421 
422 	rcuc = rdp->rcu_cpu_kthread_task;
423 	if (!rcuc)
424 		return false;
425 
426 	cpu = task_cpu(rcuc);
427 	if (cpu_is_offline(cpu) || idle_cpu(cpu))
428 		return false;
429 
430 	j = jiffies - READ_ONCE(rdp->rcuc_activity);
431 
432 	if (jp)
433 		*jp = j;
434 	return j > 2 * HZ;
435 }
436 
437 static void print_cpu_stat_info(int cpu)
438 {
439 	struct rcu_snap_record rsr, *rsrp;
440 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
441 	struct kernel_cpustat *kcsp = &kcpustat_cpu(cpu);
442 
443 	if (!rcu_cpu_stall_cputime)
444 		return;
445 
446 	rsrp = &rdp->snap_record;
447 	if (rsrp->gp_seq != rdp->gp_seq)
448 		return;
449 
450 	rsr.cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
451 	rsr.cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
452 	rsr.cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
453 
454 	pr_err("\t         hardirqs   softirqs   csw/system\n");
455 	pr_err("\t number: %8ld %10d %12lld\n",
456 		kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs,
457 		kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs,
458 		nr_context_switches_cpu(cpu) - rsrp->nr_csw);
459 	pr_err("\tcputime: %8lld %10lld %12lld   ==> %d(ms)\n",
460 		div_u64(rsr.cputime_irq - rsrp->cputime_irq, NSEC_PER_MSEC),
461 		div_u64(rsr.cputime_softirq - rsrp->cputime_softirq, NSEC_PER_MSEC),
462 		div_u64(rsr.cputime_system - rsrp->cputime_system, NSEC_PER_MSEC),
463 		jiffies_to_msecs(jiffies - rsrp->jiffies));
464 }
465 
466 /*
467  * Print out diagnostic information for the specified stalled CPU.
468  *
469  * If the specified CPU is aware of the current RCU grace period, then
470  * print the number of scheduling clock interrupts the CPU has taken
471  * during the time that it has been aware.  Otherwise, print the number
472  * of RCU grace periods that this CPU is ignorant of, for example, "1"
473  * if the CPU was aware of the previous grace period.
474  *
475  * Also print out idle info.
476  */
477 static void print_cpu_stall_info(int cpu)
478 {
479 	unsigned long delta;
480 	bool falsepositive;
481 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
482 	char *ticks_title;
483 	unsigned long ticks_value;
484 	bool rcuc_starved;
485 	unsigned long j;
486 	char buf[32];
487 
488 	/*
489 	 * We could be printing a lot while holding a spinlock.  Avoid
490 	 * triggering hard lockup.
491 	 */
492 	touch_nmi_watchdog();
493 
494 	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
495 	if (ticks_value) {
496 		ticks_title = "GPs behind";
497 	} else {
498 		ticks_title = "ticks this GP";
499 		ticks_value = rdp->ticks_this_gp;
500 	}
501 	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
502 	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
503 			rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
504 	rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
505 	if (rcuc_starved)
506 		sprintf(buf, " rcuc=%ld jiffies(starved)", j);
507 	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
508 	       cpu,
509 	       "O."[!!cpu_online(cpu)],
510 	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
511 	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
512 	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
513 			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
514 				"!."[!delta],
515 	       ticks_value, ticks_title,
516 	       rcu_dynticks_snap(cpu) & 0xffff,
517 	       ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
518 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
519 	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
520 	       rcuc_starved ? buf : "",
521 	       falsepositive ? " (false positive?)" : "");
522 
523 	print_cpu_stat_info(cpu);
524 }
525 
526 /* Complain about starvation of grace-period kthread.  */
527 static void rcu_check_gp_kthread_starvation(void)
528 {
529 	int cpu;
530 	struct task_struct *gpk = rcu_state.gp_kthread;
531 	unsigned long j;
532 
533 	if (rcu_is_gp_kthread_starving(&j)) {
534 		cpu = gpk ? task_cpu(gpk) : -1;
535 		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
536 		       rcu_state.name, j,
537 		       (long)rcu_seq_current(&rcu_state.gp_seq),
538 		       data_race(READ_ONCE(rcu_state.gp_flags)),
539 		       gp_state_getname(rcu_state.gp_state),
540 		       data_race(READ_ONCE(rcu_state.gp_state)),
541 		       gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
542 		if (gpk) {
543 			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
544 			pr_err("RCU grace-period kthread stack dump:\n");
545 			sched_show_task(gpk);
546 			if (cpu >= 0) {
547 				if (cpu_is_offline(cpu)) {
548 					pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
549 				} else  {
550 					pr_err("Stack dump where RCU GP kthread last ran:\n");
551 					dump_cpu_task(cpu);
552 				}
553 			}
554 			wake_up_process(gpk);
555 		}
556 	}
557 }
558 
559 /* Complain about missing wakeups from expired fqs wait timer */
560 static void rcu_check_gp_kthread_expired_fqs_timer(void)
561 {
562 	struct task_struct *gpk = rcu_state.gp_kthread;
563 	short gp_state;
564 	unsigned long jiffies_fqs;
565 	int cpu;
566 
567 	/*
568 	 * Order reads of .gp_state and .jiffies_force_qs.
569 	 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
570 	 */
571 	gp_state = smp_load_acquire(&rcu_state.gp_state);
572 	jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
573 
574 	if (gp_state == RCU_GP_WAIT_FQS &&
575 	    time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
576 	    gpk && !READ_ONCE(gpk->on_rq)) {
577 		cpu = task_cpu(gpk);
578 		pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
579 		       rcu_state.name, (jiffies - jiffies_fqs),
580 		       (long)rcu_seq_current(&rcu_state.gp_seq),
581 		       data_race(rcu_state.gp_flags),
582 		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
583 		       data_race(READ_ONCE(gpk->__state)));
584 		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
585 		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
586 	}
587 }
588 
589 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
590 {
591 	int cpu;
592 	unsigned long flags;
593 	unsigned long gpa;
594 	unsigned long j;
595 	int ndetected = 0;
596 	struct rcu_node *rnp;
597 	long totqlen = 0;
598 
599 	lockdep_assert_irqs_disabled();
600 
601 	/* Kick and suppress, if so configured. */
602 	rcu_stall_kick_kthreads();
603 	if (rcu_stall_is_suppressed())
604 		return;
605 
606 	/*
607 	 * OK, time to rat on our buddy...
608 	 * See Documentation/RCU/stallwarn.rst for info on how to debug
609 	 * RCU CPU stall warnings.
610 	 */
611 	trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
612 	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
613 	rcu_for_each_leaf_node(rnp) {
614 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
615 		if (rnp->qsmask != 0) {
616 			for_each_leaf_node_possible_cpu(rnp, cpu)
617 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
618 					print_cpu_stall_info(cpu);
619 					ndetected++;
620 				}
621 		}
622 		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
623 		lockdep_assert_irqs_disabled();
624 	}
625 
626 	for_each_possible_cpu(cpu)
627 		totqlen += rcu_get_n_cbs_cpu(cpu);
628 	pr_err("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
629 	       smp_processor_id(), (long)(jiffies - gps),
630 	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
631 	if (ndetected) {
632 		rcu_dump_cpu_stacks();
633 
634 		/* Complain about tasks blocking the grace period. */
635 		rcu_for_each_leaf_node(rnp)
636 			rcu_print_detail_task_stall_rnp(rnp);
637 	} else {
638 		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
639 			pr_err("INFO: Stall ended before state dump start\n");
640 		} else {
641 			j = jiffies;
642 			gpa = data_race(READ_ONCE(rcu_state.gp_activity));
643 			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
644 			       rcu_state.name, j - gpa, j, gpa,
645 			       data_race(READ_ONCE(jiffies_till_next_fqs)),
646 			       data_race(READ_ONCE(rcu_get_root()->qsmask)));
647 		}
648 	}
649 	/* Rewrite if needed in case of slow consoles. */
650 	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
651 		WRITE_ONCE(rcu_state.jiffies_stall,
652 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
653 
654 	rcu_check_gp_kthread_expired_fqs_timer();
655 	rcu_check_gp_kthread_starvation();
656 
657 	panic_on_rcu_stall();
658 
659 	rcu_force_quiescent_state();  /* Kick them all. */
660 }
661 
662 static void print_cpu_stall(unsigned long gps)
663 {
664 	int cpu;
665 	unsigned long flags;
666 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
667 	struct rcu_node *rnp = rcu_get_root();
668 	long totqlen = 0;
669 
670 	lockdep_assert_irqs_disabled();
671 
672 	/* Kick and suppress, if so configured. */
673 	rcu_stall_kick_kthreads();
674 	if (rcu_stall_is_suppressed())
675 		return;
676 
677 	/*
678 	 * OK, time to rat on ourselves...
679 	 * See Documentation/RCU/stallwarn.rst for info on how to debug
680 	 * RCU CPU stall warnings.
681 	 */
682 	trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
683 	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
684 	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
685 	print_cpu_stall_info(smp_processor_id());
686 	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
687 	for_each_possible_cpu(cpu)
688 		totqlen += rcu_get_n_cbs_cpu(cpu);
689 	pr_err("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
690 		jiffies - gps,
691 		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen, rcu_state.n_online_cpus);
692 
693 	rcu_check_gp_kthread_expired_fqs_timer();
694 	rcu_check_gp_kthread_starvation();
695 
696 	rcu_dump_cpu_stacks();
697 
698 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
699 	/* Rewrite if needed in case of slow consoles. */
700 	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
701 		WRITE_ONCE(rcu_state.jiffies_stall,
702 			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
703 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
704 
705 	panic_on_rcu_stall();
706 
707 	/*
708 	 * Attempt to revive the RCU machinery by forcing a context switch.
709 	 *
710 	 * A context switch would normally allow the RCU state machine to make
711 	 * progress and it could be we're stuck in kernel space without context
712 	 * switches for an entirely unreasonable amount of time.
713 	 */
714 	set_tsk_need_resched(current);
715 	set_preempt_need_resched();
716 }
717 
718 static void check_cpu_stall(struct rcu_data *rdp)
719 {
720 	bool didstall = false;
721 	unsigned long gs1;
722 	unsigned long gs2;
723 	unsigned long gps;
724 	unsigned long j;
725 	unsigned long jn;
726 	unsigned long js;
727 	struct rcu_node *rnp;
728 
729 	lockdep_assert_irqs_disabled();
730 	if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
731 	    !rcu_gp_in_progress())
732 		return;
733 	rcu_stall_kick_kthreads();
734 
735 	/*
736 	 * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
737 	 * loop has to set jiffies to ensure a non-stale jiffies value. This
738 	 * is required to have good jiffies value after coming out of long
739 	 * breaks of jiffies updates. Not doing so can cause false positives.
740 	 */
741 	if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
742 		return;
743 
744 	j = jiffies;
745 
746 	/*
747 	 * Lots of memory barriers to reject false positives.
748 	 *
749 	 * The idea is to pick up rcu_state.gp_seq, then
750 	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
751 	 * another copy of rcu_state.gp_seq.  These values are updated in
752 	 * the opposite order with memory barriers (or equivalent) during
753 	 * grace-period initialization and cleanup.  Now, a false positive
754 	 * can occur if we get an new value of rcu_state.gp_start and a old
755 	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
756 	 * the only way that this can happen is if one grace period ends
757 	 * and another starts between these two fetches.  This is detected
758 	 * by comparing the second fetch of rcu_state.gp_seq with the
759 	 * previous fetch from rcu_state.gp_seq.
760 	 *
761 	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
762 	 * and rcu_state.gp_start suffice to forestall false positives.
763 	 */
764 	gs1 = READ_ONCE(rcu_state.gp_seq);
765 	smp_rmb(); /* Pick up ->gp_seq first... */
766 	js = READ_ONCE(rcu_state.jiffies_stall);
767 	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
768 	gps = READ_ONCE(rcu_state.gp_start);
769 	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
770 	gs2 = READ_ONCE(rcu_state.gp_seq);
771 	if (gs1 != gs2 ||
772 	    ULONG_CMP_LT(j, js) ||
773 	    ULONG_CMP_GE(gps, js))
774 		return; /* No stall or GP completed since entering function. */
775 	rnp = rdp->mynode;
776 	jn = jiffies + ULONG_MAX / 2;
777 	if (rcu_gp_in_progress() &&
778 	    (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
779 	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
780 
781 		/*
782 		 * If a virtual machine is stopped by the host it can look to
783 		 * the watchdog like an RCU stall. Check to see if the host
784 		 * stopped the vm.
785 		 */
786 		if (kvm_check_and_clear_guest_paused())
787 			return;
788 
789 		/* We haven't checked in, so go dump stack. */
790 		print_cpu_stall(gps);
791 		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
792 			rcu_ftrace_dump(DUMP_ALL);
793 		didstall = true;
794 
795 	} else if (rcu_gp_in_progress() &&
796 		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
797 		   cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
798 
799 		/*
800 		 * If a virtual machine is stopped by the host it can look to
801 		 * the watchdog like an RCU stall. Check to see if the host
802 		 * stopped the vm.
803 		 */
804 		if (kvm_check_and_clear_guest_paused())
805 			return;
806 
807 		/* They had a few time units to dump stack, so complain. */
808 		print_other_cpu_stall(gs2, gps);
809 		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
810 			rcu_ftrace_dump(DUMP_ALL);
811 		didstall = true;
812 	}
813 	if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) {
814 		jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
815 		WRITE_ONCE(rcu_state.jiffies_stall, jn);
816 	}
817 }
818 
819 //////////////////////////////////////////////////////////////////////////////
820 //
821 // RCU forward-progress mechanisms, including of callback invocation.
822 
823 
824 /*
825  * Check to see if a failure to end RCU priority inversion was due to
826  * a CPU not passing through a quiescent state.  When this happens, there
827  * is nothing that RCU priority boosting can do to help, so we shouldn't
828  * count this as an RCU priority boosting failure.  A return of true says
829  * RCU priority boosting is to blame, and false says otherwise.  If false
830  * is returned, the first of the CPUs to blame is stored through cpup.
831  * If there was no CPU blocking the current grace period, but also nothing
832  * in need of being boosted, *cpup is set to -1.  This can happen in case
833  * of vCPU preemption while the last CPU is reporting its quiscent state,
834  * for example.
835  *
836  * If cpup is NULL, then a lockless quick check is carried out, suitable
837  * for high-rate usage.  On the other hand, if cpup is non-NULL, each
838  * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
839  */
840 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
841 {
842 	bool atb = false;
843 	int cpu;
844 	unsigned long flags;
845 	struct rcu_node *rnp;
846 
847 	rcu_for_each_leaf_node(rnp) {
848 		if (!cpup) {
849 			if (data_race(READ_ONCE(rnp->qsmask))) {
850 				return false;
851 			} else {
852 				if (READ_ONCE(rnp->gp_tasks))
853 					atb = true;
854 				continue;
855 			}
856 		}
857 		*cpup = -1;
858 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
859 		if (rnp->gp_tasks)
860 			atb = true;
861 		if (!rnp->qsmask) {
862 			// No CPUs without quiescent states for this rnp.
863 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
864 			continue;
865 		}
866 		// Find the first holdout CPU.
867 		for_each_leaf_node_possible_cpu(rnp, cpu) {
868 			if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
869 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
870 				*cpup = cpu;
871 				return false;
872 			}
873 		}
874 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
875 	}
876 	// Can't blame CPUs, so must blame RCU priority boosting.
877 	return atb;
878 }
879 EXPORT_SYMBOL_GPL(rcu_check_boost_fail);
880 
881 /*
882  * Show the state of the grace-period kthreads.
883  */
884 void show_rcu_gp_kthreads(void)
885 {
886 	unsigned long cbs = 0;
887 	int cpu;
888 	unsigned long j;
889 	unsigned long ja;
890 	unsigned long jr;
891 	unsigned long js;
892 	unsigned long jw;
893 	struct rcu_data *rdp;
894 	struct rcu_node *rnp;
895 	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
896 
897 	j = jiffies;
898 	ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
899 	jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
900 	js = j - data_race(READ_ONCE(rcu_state.gp_start));
901 	jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
902 	pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
903 		rcu_state.name, gp_state_getname(rcu_state.gp_state),
904 		data_race(READ_ONCE(rcu_state.gp_state)),
905 		t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
906 		js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
907 		(long)data_race(READ_ONCE(rcu_state.gp_seq)),
908 		(long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
909 		data_race(READ_ONCE(rcu_state.gp_max)),
910 		data_race(READ_ONCE(rcu_state.gp_flags)));
911 	rcu_for_each_node_breadth_first(rnp) {
912 		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
913 		    !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
914 		    !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
915 			continue;
916 		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
917 			rnp->grplo, rnp->grphi,
918 			(long)data_race(READ_ONCE(rnp->gp_seq)),
919 			(long)data_race(READ_ONCE(rnp->gp_seq_needed)),
920 			data_race(READ_ONCE(rnp->qsmask)),
921 			".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
922 			".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
923 			".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
924 			".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
925 			data_race(READ_ONCE(rnp->n_boosts)));
926 		if (!rcu_is_leaf_node(rnp))
927 			continue;
928 		for_each_leaf_node_possible_cpu(rnp, cpu) {
929 			rdp = per_cpu_ptr(&rcu_data, cpu);
930 			if (READ_ONCE(rdp->gpwrap) ||
931 			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
932 					 READ_ONCE(rdp->gp_seq_needed)))
933 				continue;
934 			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
935 				cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
936 		}
937 	}
938 	for_each_possible_cpu(cpu) {
939 		rdp = per_cpu_ptr(&rcu_data, cpu);
940 		cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
941 		if (rcu_segcblist_is_offloaded(&rdp->cblist))
942 			show_rcu_nocb_state(rdp);
943 	}
944 	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
945 	show_rcu_tasks_gp_kthreads();
946 }
947 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
948 
949 /*
950  * This function checks for grace-period requests that fail to motivate
951  * RCU to come out of its idle mode.
952  */
953 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
954 				     const unsigned long gpssdelay)
955 {
956 	unsigned long flags;
957 	unsigned long j;
958 	struct rcu_node *rnp_root = rcu_get_root();
959 	static atomic_t warned = ATOMIC_INIT(0);
960 
961 	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
962 	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
963 			 READ_ONCE(rnp_root->gp_seq_needed)) ||
964 	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
965 		return;
966 	j = jiffies; /* Expensive access, and in common case don't get here. */
967 	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
968 	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
969 	    atomic_read(&warned))
970 		return;
971 
972 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
973 	j = jiffies;
974 	if (rcu_gp_in_progress() ||
975 	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
976 			 READ_ONCE(rnp_root->gp_seq_needed)) ||
977 	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
978 	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
979 	    atomic_read(&warned)) {
980 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
981 		return;
982 	}
983 	/* Hold onto the leaf lock to make others see warned==1. */
984 
985 	if (rnp_root != rnp)
986 		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
987 	j = jiffies;
988 	if (rcu_gp_in_progress() ||
989 	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
990 			 READ_ONCE(rnp_root->gp_seq_needed)) ||
991 	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
992 	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
993 	    atomic_xchg(&warned, 1)) {
994 		if (rnp_root != rnp)
995 			/* irqs remain disabled. */
996 			raw_spin_unlock_rcu_node(rnp_root);
997 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
998 		return;
999 	}
1000 	WARN_ON(1);
1001 	if (rnp_root != rnp)
1002 		raw_spin_unlock_rcu_node(rnp_root);
1003 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1004 	show_rcu_gp_kthreads();
1005 }
1006 
1007 /*
1008  * Do a forward-progress check for rcutorture.  This is normally invoked
1009  * due to an OOM event.  The argument "j" gives the time period during
1010  * which rcutorture would like progress to have been made.
1011  */
1012 void rcu_fwd_progress_check(unsigned long j)
1013 {
1014 	unsigned long cbs;
1015 	int cpu;
1016 	unsigned long max_cbs = 0;
1017 	int max_cpu = -1;
1018 	struct rcu_data *rdp;
1019 
1020 	if (rcu_gp_in_progress()) {
1021 		pr_info("%s: GP age %lu jiffies\n",
1022 			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
1023 		show_rcu_gp_kthreads();
1024 	} else {
1025 		pr_info("%s: Last GP end %lu jiffies ago\n",
1026 			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
1027 		preempt_disable();
1028 		rdp = this_cpu_ptr(&rcu_data);
1029 		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
1030 		preempt_enable();
1031 	}
1032 	for_each_possible_cpu(cpu) {
1033 		cbs = rcu_get_n_cbs_cpu(cpu);
1034 		if (!cbs)
1035 			continue;
1036 		if (max_cpu < 0)
1037 			pr_info("%s: callbacks", __func__);
1038 		pr_cont(" %d: %lu", cpu, cbs);
1039 		if (cbs <= max_cbs)
1040 			continue;
1041 		max_cbs = cbs;
1042 		max_cpu = cpu;
1043 	}
1044 	if (max_cpu >= 0)
1045 		pr_cont("\n");
1046 }
1047 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
1048 
1049 /* Commandeer a sysrq key to dump RCU's tree. */
1050 static bool sysrq_rcu;
1051 module_param(sysrq_rcu, bool, 0444);
1052 
1053 /* Dump grace-period-request information due to commandeered sysrq. */
1054 static void sysrq_show_rcu(u8 key)
1055 {
1056 	show_rcu_gp_kthreads();
1057 }
1058 
1059 static const struct sysrq_key_op sysrq_rcudump_op = {
1060 	.handler = sysrq_show_rcu,
1061 	.help_msg = "show-rcu(y)",
1062 	.action_msg = "Show RCU tree",
1063 	.enable_mask = SYSRQ_ENABLE_DUMP,
1064 };
1065 
1066 static int __init rcu_sysrq_init(void)
1067 {
1068 	if (sysrq_rcu)
1069 		return register_sysrq_key('y', &sysrq_rcudump_op);
1070 	return 0;
1071 }
1072 early_initcall(rcu_sysrq_init);
1073