xref: /openbmc/linux/kernel/watchdog.c (revision 5dc3055879b8f659f62abb7c3d1eaa4d02e36d65)
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * this code detects hard lockups: incidents in where on a CPU
7  * the kernel does not respond to anything except NMI.
8  *
9  * Note: Most of this code is borrowed heavily from softlockup.c,
10  * so thanks to Ingo for the initial implementation.
11  * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12  * to those contributors as well.
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
26 
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
29 
30 int watchdog_enabled;
31 int __read_mostly softlockup_thresh = 60;
32 
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
35 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
36 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
37 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
38 #ifdef CONFIG_HARDLOCKUP_DETECTOR
39 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
40 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
44 #endif
45 
46 static int no_watchdog;
47 
48 
49 /* boot commands */
50 /*
51  * Should we panic when a soft-lockup or hard-lockup occurs:
52  */
53 #ifdef CONFIG_HARDLOCKUP_DETECTOR
54 static int hardlockup_panic;
55 
56 static int __init hardlockup_panic_setup(char *str)
57 {
58 	if (!strncmp(str, "panic", 5))
59 		hardlockup_panic = 1;
60 	else if (!strncmp(str, "0", 1))
61 		no_watchdog = 1;
62 	return 1;
63 }
64 __setup("nmi_watchdog=", hardlockup_panic_setup);
65 #endif
66 
67 unsigned int __read_mostly softlockup_panic =
68 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
69 
70 static int __init softlockup_panic_setup(char *str)
71 {
72 	softlockup_panic = simple_strtoul(str, NULL, 0);
73 
74 	return 1;
75 }
76 __setup("softlockup_panic=", softlockup_panic_setup);
77 
78 static int __init nowatchdog_setup(char *str)
79 {
80 	no_watchdog = 1;
81 	return 1;
82 }
83 __setup("nowatchdog", nowatchdog_setup);
84 
85 /* deprecated */
86 static int __init nosoftlockup_setup(char *str)
87 {
88 	no_watchdog = 1;
89 	return 1;
90 }
91 __setup("nosoftlockup", nosoftlockup_setup);
92 /*  */
93 
94 
95 /*
96  * Returns seconds, approximately.  We don't need nanosecond
97  * resolution, and we don't need to waste time with a big divide when
98  * 2^30ns == 1.074s.
99  */
100 static unsigned long get_timestamp(int this_cpu)
101 {
102 	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */
103 }
104 
105 static unsigned long get_sample_period(void)
106 {
107 	/*
108 	 * convert softlockup_thresh from seconds to ns
109 	 * the divide by 5 is to give hrtimer 5 chances to
110 	 * increment before the hardlockup detector generates
111 	 * a warning
112 	 */
113 	return softlockup_thresh / 5 * NSEC_PER_SEC;
114 }
115 
116 /* Commands for resetting the watchdog */
117 static void __touch_watchdog(void)
118 {
119 	int this_cpu = smp_processor_id();
120 
121 	__get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
122 }
123 
124 void touch_softlockup_watchdog(void)
125 {
126 	__raw_get_cpu_var(watchdog_touch_ts) = 0;
127 }
128 EXPORT_SYMBOL(touch_softlockup_watchdog);
129 
130 void touch_all_softlockup_watchdogs(void)
131 {
132 	int cpu;
133 
134 	/*
135 	 * this is done lockless
136 	 * do we care if a 0 races with a timestamp?
137 	 * all it means is the softlock check starts one cycle later
138 	 */
139 	for_each_online_cpu(cpu)
140 		per_cpu(watchdog_touch_ts, cpu) = 0;
141 }
142 
143 #ifdef CONFIG_HARDLOCKUP_DETECTOR
144 void touch_nmi_watchdog(void)
145 {
146 	if (watchdog_enabled) {
147 		unsigned cpu;
148 
149 		for_each_present_cpu(cpu) {
150 			if (per_cpu(watchdog_nmi_touch, cpu) != true)
151 				per_cpu(watchdog_nmi_touch, cpu) = true;
152 		}
153 	}
154 	touch_softlockup_watchdog();
155 }
156 EXPORT_SYMBOL(touch_nmi_watchdog);
157 
158 #endif
159 
160 void touch_softlockup_watchdog_sync(void)
161 {
162 	__raw_get_cpu_var(softlockup_touch_sync) = true;
163 	__raw_get_cpu_var(watchdog_touch_ts) = 0;
164 }
165 
166 #ifdef CONFIG_HARDLOCKUP_DETECTOR
167 /* watchdog detector functions */
168 static int is_hardlockup(void)
169 {
170 	unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
171 
172 	if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
173 		return 1;
174 
175 	__get_cpu_var(hrtimer_interrupts_saved) = hrint;
176 	return 0;
177 }
178 #endif
179 
180 static int is_softlockup(unsigned long touch_ts)
181 {
182 	unsigned long now = get_timestamp(smp_processor_id());
183 
184 	/* Warn about unreasonable delays: */
185 	if (time_after(now, touch_ts + softlockup_thresh))
186 		return now - touch_ts;
187 
188 	return 0;
189 }
190 
191 #ifdef CONFIG_HARDLOCKUP_DETECTOR
192 static struct perf_event_attr wd_hw_attr = {
193 	.type		= PERF_TYPE_HARDWARE,
194 	.config		= PERF_COUNT_HW_CPU_CYCLES,
195 	.size		= sizeof(struct perf_event_attr),
196 	.pinned		= 1,
197 	.disabled	= 1,
198 };
199 
200 /* Callback function for perf event subsystem */
201 static void watchdog_overflow_callback(struct perf_event *event, int nmi,
202 		 struct perf_sample_data *data,
203 		 struct pt_regs *regs)
204 {
205 	/* Ensure the watchdog never gets throttled */
206 	event->hw.interrupts = 0;
207 
208 	if (__get_cpu_var(watchdog_nmi_touch) == true) {
209 		__get_cpu_var(watchdog_nmi_touch) = false;
210 		return;
211 	}
212 
213 	/* check for a hardlockup
214 	 * This is done by making sure our timer interrupt
215 	 * is incrementing.  The timer interrupt should have
216 	 * fired multiple times before we overflow'd.  If it hasn't
217 	 * then this is a good indication the cpu is stuck
218 	 */
219 	if (is_hardlockup()) {
220 		int this_cpu = smp_processor_id();
221 
222 		/* only print hardlockups once */
223 		if (__get_cpu_var(hard_watchdog_warn) == true)
224 			return;
225 
226 		if (hardlockup_panic)
227 			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
228 		else
229 			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
230 
231 		__get_cpu_var(hard_watchdog_warn) = true;
232 		return;
233 	}
234 
235 	__get_cpu_var(hard_watchdog_warn) = false;
236 	return;
237 }
238 static void watchdog_interrupt_count(void)
239 {
240 	__get_cpu_var(hrtimer_interrupts)++;
241 }
242 #else
243 static inline void watchdog_interrupt_count(void) { return; }
244 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
245 
246 /* watchdog kicker functions */
247 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
248 {
249 	unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
250 	struct pt_regs *regs = get_irq_regs();
251 	int duration;
252 
253 	/* kick the hardlockup detector */
254 	watchdog_interrupt_count();
255 
256 	/* kick the softlockup detector */
257 	wake_up_process(__get_cpu_var(softlockup_watchdog));
258 
259 	/* .. and repeat */
260 	hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
261 
262 	if (touch_ts == 0) {
263 		if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
264 			/*
265 			 * If the time stamp was touched atomically
266 			 * make sure the scheduler tick is up to date.
267 			 */
268 			__get_cpu_var(softlockup_touch_sync) = false;
269 			sched_clock_tick();
270 		}
271 		__touch_watchdog();
272 		return HRTIMER_RESTART;
273 	}
274 
275 	/* check for a softlockup
276 	 * This is done by making sure a high priority task is
277 	 * being scheduled.  The task touches the watchdog to
278 	 * indicate it is getting cpu time.  If it hasn't then
279 	 * this is a good indication some task is hogging the cpu
280 	 */
281 	duration = is_softlockup(touch_ts);
282 	if (unlikely(duration)) {
283 		/* only warn once */
284 		if (__get_cpu_var(soft_watchdog_warn) == true)
285 			return HRTIMER_RESTART;
286 
287 		printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
288 			smp_processor_id(), duration,
289 			current->comm, task_pid_nr(current));
290 		print_modules();
291 		print_irqtrace_events(current);
292 		if (regs)
293 			show_regs(regs);
294 		else
295 			dump_stack();
296 
297 		if (softlockup_panic)
298 			panic("softlockup: hung tasks");
299 		__get_cpu_var(soft_watchdog_warn) = true;
300 	} else
301 		__get_cpu_var(soft_watchdog_warn) = false;
302 
303 	return HRTIMER_RESTART;
304 }
305 
306 
307 /*
308  * The watchdog thread - touches the timestamp.
309  */
310 static int watchdog(void *unused)
311 {
312 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
313 	struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
314 
315 	sched_setscheduler(current, SCHED_FIFO, &param);
316 
317 	/* initialize timestamp */
318 	__touch_watchdog();
319 
320 	/* kick off the timer for the hardlockup detector */
321 	/* done here because hrtimer_start can only pin to smp_processor_id() */
322 	hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
323 		      HRTIMER_MODE_REL_PINNED);
324 
325 	set_current_state(TASK_INTERRUPTIBLE);
326 	/*
327 	 * Run briefly once per second to reset the softlockup timestamp.
328 	 * If this gets delayed for more than 60 seconds then the
329 	 * debug-printout triggers in watchdog_timer_fn().
330 	 */
331 	while (!kthread_should_stop()) {
332 		__touch_watchdog();
333 		schedule();
334 
335 		if (kthread_should_stop())
336 			break;
337 
338 		set_current_state(TASK_INTERRUPTIBLE);
339 	}
340 	__set_current_state(TASK_RUNNING);
341 
342 	return 0;
343 }
344 
345 
346 #ifdef CONFIG_HARDLOCKUP_DETECTOR
347 static int watchdog_nmi_enable(int cpu)
348 {
349 	struct perf_event_attr *wd_attr;
350 	struct perf_event *event = per_cpu(watchdog_ev, cpu);
351 
352 	/* is it already setup and enabled? */
353 	if (event && event->state > PERF_EVENT_STATE_OFF)
354 		goto out;
355 
356 	/* it is setup but not enabled */
357 	if (event != NULL)
358 		goto out_enable;
359 
360 	/* Try to register using hardware perf events */
361 	wd_attr = &wd_hw_attr;
362 	wd_attr->sample_period = hw_nmi_get_sample_period();
363 	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
364 	if (!IS_ERR(event)) {
365 		printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
366 		goto out_save;
367 	}
368 
369 	printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
370 	return PTR_ERR(event);
371 
372 	/* success path */
373 out_save:
374 	per_cpu(watchdog_ev, cpu) = event;
375 out_enable:
376 	perf_event_enable(per_cpu(watchdog_ev, cpu));
377 out:
378 	return 0;
379 }
380 
381 static void watchdog_nmi_disable(int cpu)
382 {
383 	struct perf_event *event = per_cpu(watchdog_ev, cpu);
384 
385 	if (event) {
386 		perf_event_disable(event);
387 		per_cpu(watchdog_ev, cpu) = NULL;
388 
389 		/* should be in cleanup, but blocks oprofile */
390 		perf_event_release_kernel(event);
391 	}
392 	return;
393 }
394 #else
395 static int watchdog_nmi_enable(int cpu) { return 0; }
396 static void watchdog_nmi_disable(int cpu) { return; }
397 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
398 
399 /* prepare/enable/disable routines */
400 static int watchdog_prepare_cpu(int cpu)
401 {
402 	struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
403 
404 	WARN_ON(per_cpu(softlockup_watchdog, cpu));
405 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
406 	hrtimer->function = watchdog_timer_fn;
407 
408 	return 0;
409 }
410 
411 static int watchdog_enable(int cpu)
412 {
413 	struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
414 	int err;
415 
416 	/* enable the perf event */
417 	err = watchdog_nmi_enable(cpu);
418 	if (err)
419 		return err;
420 
421 	/* create the watchdog thread */
422 	if (!p) {
423 		p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
424 		if (IS_ERR(p)) {
425 			printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
426 			return PTR_ERR(p);
427 		}
428 		kthread_bind(p, cpu);
429 		per_cpu(watchdog_touch_ts, cpu) = 0;
430 		per_cpu(softlockup_watchdog, cpu) = p;
431 		wake_up_process(p);
432 	}
433 
434 	/* if any cpu succeeds, watchdog is considered enabled for the system */
435 	watchdog_enabled = 1;
436 
437 	return 0;
438 }
439 
440 static void watchdog_disable(int cpu)
441 {
442 	struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
443 	struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
444 
445 	/*
446 	 * cancel the timer first to stop incrementing the stats
447 	 * and waking up the kthread
448 	 */
449 	hrtimer_cancel(hrtimer);
450 
451 	/* disable the perf event */
452 	watchdog_nmi_disable(cpu);
453 
454 	/* stop the watchdog thread */
455 	if (p) {
456 		per_cpu(softlockup_watchdog, cpu) = NULL;
457 		kthread_stop(p);
458 	}
459 }
460 
461 static void watchdog_enable_all_cpus(void)
462 {
463 	int cpu;
464 	int result = 0;
465 
466 	for_each_online_cpu(cpu)
467 		result += watchdog_enable(cpu);
468 
469 	if (result)
470 		printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
471 
472 }
473 
474 static void watchdog_disable_all_cpus(void)
475 {
476 	int cpu;
477 
478 	if (no_watchdog)
479 		return;
480 
481 	for_each_online_cpu(cpu)
482 		watchdog_disable(cpu);
483 
484 	/* if all watchdogs are disabled, then they are disabled for the system */
485 	watchdog_enabled = 0;
486 }
487 
488 
489 /* sysctl functions */
490 #ifdef CONFIG_SYSCTL
491 /*
492  * proc handler for /proc/sys/kernel/nmi_watchdog
493  */
494 
495 int proc_dowatchdog_enabled(struct ctl_table *table, int write,
496 		     void __user *buffer, size_t *length, loff_t *ppos)
497 {
498 	proc_dointvec(table, write, buffer, length, ppos);
499 
500 	if (watchdog_enabled)
501 		watchdog_enable_all_cpus();
502 	else
503 		watchdog_disable_all_cpus();
504 	return 0;
505 }
506 
507 int proc_dowatchdog_thresh(struct ctl_table *table, int write,
508 			     void __user *buffer,
509 			     size_t *lenp, loff_t *ppos)
510 {
511 	return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
512 }
513 #endif /* CONFIG_SYSCTL */
514 
515 
516 /*
517  * Create/destroy watchdog threads as CPUs come and go:
518  */
519 static int __cpuinit
520 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
521 {
522 	int hotcpu = (unsigned long)hcpu;
523 	int err = 0;
524 
525 	switch (action) {
526 	case CPU_UP_PREPARE:
527 	case CPU_UP_PREPARE_FROZEN:
528 		err = watchdog_prepare_cpu(hotcpu);
529 		break;
530 	case CPU_ONLINE:
531 	case CPU_ONLINE_FROZEN:
532 		err = watchdog_enable(hotcpu);
533 		break;
534 #ifdef CONFIG_HOTPLUG_CPU
535 	case CPU_UP_CANCELED:
536 	case CPU_UP_CANCELED_FROZEN:
537 		watchdog_disable(hotcpu);
538 		break;
539 	case CPU_DEAD:
540 	case CPU_DEAD_FROZEN:
541 		watchdog_disable(hotcpu);
542 		break;
543 #endif /* CONFIG_HOTPLUG_CPU */
544 	}
545 	return notifier_from_errno(err);
546 }
547 
548 static struct notifier_block __cpuinitdata cpu_nfb = {
549 	.notifier_call = cpu_callback
550 };
551 
552 void __init lockup_detector_init(void)
553 {
554 	void *cpu = (void *)(long)smp_processor_id();
555 	int err;
556 
557 	if (no_watchdog)
558 		return;
559 
560 	err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
561 	WARN_ON(notifier_to_errno(err));
562 
563 	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
564 	register_cpu_notifier(&cpu_nfb);
565 
566 	return;
567 }
568