xref: /openbmc/linux/kernel/watchdog.c (revision 609e478b)
1 /*
2  * Detect hard and soft lockups on a system
3  *
4  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  * Note: Most of this code is borrowed heavily from the original softlockup
7  * detector, so thanks to Ingo for the initial implementation.
8  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9  * to those contributors as well.
10  */
11 
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13 
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 
23 #include <asm/irq_regs.h>
24 #include <linux/kvm_para.h>
25 #include <linux/perf_event.h>
26 
27 int watchdog_user_enabled = 1;
28 int __read_mostly watchdog_thresh = 10;
29 #ifdef CONFIG_SMP
30 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
31 #else
32 #define sysctl_softlockup_all_cpu_backtrace 0
33 #endif
34 
35 static int __read_mostly watchdog_running;
36 static u64 __read_mostly sample_period;
37 
38 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
39 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
40 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
41 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
42 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
43 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
44 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
45 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
46 #ifdef CONFIG_HARDLOCKUP_DETECTOR
47 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
48 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
49 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
50 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
51 #endif
52 static unsigned long soft_lockup_nmi_warn;
53 
54 /* boot commands */
55 /*
56  * Should we panic when a soft-lockup or hard-lockup occurs:
57  */
58 #ifdef CONFIG_HARDLOCKUP_DETECTOR
59 static int hardlockup_panic =
60 			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
61 
62 static bool hardlockup_detector_enabled = true;
63 /*
64  * We may not want to enable hard lockup detection by default in all cases,
65  * for example when running the kernel as a guest on a hypervisor. In these
66  * cases this function can be called to disable hard lockup detection. This
67  * function should only be executed once by the boot processor before the
68  * kernel command line parameters are parsed, because otherwise it is not
69  * possible to override this in hardlockup_panic_setup().
70  */
71 void watchdog_enable_hardlockup_detector(bool val)
72 {
73 	hardlockup_detector_enabled = val;
74 }
75 
76 bool watchdog_hardlockup_detector_is_enabled(void)
77 {
78 	return hardlockup_detector_enabled;
79 }
80 
81 static int __init hardlockup_panic_setup(char *str)
82 {
83 	if (!strncmp(str, "panic", 5))
84 		hardlockup_panic = 1;
85 	else if (!strncmp(str, "nopanic", 7))
86 		hardlockup_panic = 0;
87 	else if (!strncmp(str, "0", 1))
88 		watchdog_user_enabled = 0;
89 	else if (!strncmp(str, "1", 1) || !strncmp(str, "2", 1)) {
90 		/*
91 		 * Setting 'nmi_watchdog=1' or 'nmi_watchdog=2' (legacy option)
92 		 * has the same effect.
93 		 */
94 		watchdog_user_enabled = 1;
95 		watchdog_enable_hardlockup_detector(true);
96 	}
97 	return 1;
98 }
99 __setup("nmi_watchdog=", hardlockup_panic_setup);
100 #endif
101 
102 unsigned int __read_mostly softlockup_panic =
103 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
104 
105 static int __init softlockup_panic_setup(char *str)
106 {
107 	softlockup_panic = simple_strtoul(str, NULL, 0);
108 
109 	return 1;
110 }
111 __setup("softlockup_panic=", softlockup_panic_setup);
112 
113 static int __init nowatchdog_setup(char *str)
114 {
115 	watchdog_user_enabled = 0;
116 	return 1;
117 }
118 __setup("nowatchdog", nowatchdog_setup);
119 
120 /* deprecated */
121 static int __init nosoftlockup_setup(char *str)
122 {
123 	watchdog_user_enabled = 0;
124 	return 1;
125 }
126 __setup("nosoftlockup", nosoftlockup_setup);
127 /*  */
128 #ifdef CONFIG_SMP
129 static int __init softlockup_all_cpu_backtrace_setup(char *str)
130 {
131 	sysctl_softlockup_all_cpu_backtrace =
132 		!!simple_strtol(str, NULL, 0);
133 	return 1;
134 }
135 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
136 #endif
137 
138 /*
139  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
140  * lockups can have false positives under extreme conditions. So we generally
141  * want a higher threshold for soft lockups than for hard lockups. So we couple
142  * the thresholds with a factor: we make the soft threshold twice the amount of
143  * time the hard threshold is.
144  */
145 static int get_softlockup_thresh(void)
146 {
147 	return watchdog_thresh * 2;
148 }
149 
150 /*
151  * Returns seconds, approximately.  We don't need nanosecond
152  * resolution, and we don't need to waste time with a big divide when
153  * 2^30ns == 1.074s.
154  */
155 static unsigned long get_timestamp(void)
156 {
157 	return local_clock() >> 30LL;  /* 2^30 ~= 10^9 */
158 }
159 
160 static void set_sample_period(void)
161 {
162 	/*
163 	 * convert watchdog_thresh from seconds to ns
164 	 * the divide by 5 is to give hrtimer several chances (two
165 	 * or three with the current relation between the soft
166 	 * and hard thresholds) to increment before the
167 	 * hardlockup detector generates a warning
168 	 */
169 	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
170 }
171 
172 /* Commands for resetting the watchdog */
173 static void __touch_watchdog(void)
174 {
175 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
176 }
177 
178 void touch_softlockup_watchdog(void)
179 {
180 	/*
181 	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
182 	 * gets zeroed here, so use the raw_ operation.
183 	 */
184 	raw_cpu_write(watchdog_touch_ts, 0);
185 }
186 EXPORT_SYMBOL(touch_softlockup_watchdog);
187 
188 void touch_all_softlockup_watchdogs(void)
189 {
190 	int cpu;
191 
192 	/*
193 	 * this is done lockless
194 	 * do we care if a 0 races with a timestamp?
195 	 * all it means is the softlock check starts one cycle later
196 	 */
197 	for_each_online_cpu(cpu)
198 		per_cpu(watchdog_touch_ts, cpu) = 0;
199 }
200 
201 #ifdef CONFIG_HARDLOCKUP_DETECTOR
202 void touch_nmi_watchdog(void)
203 {
204 	/*
205 	 * Using __raw here because some code paths have
206 	 * preemption enabled.  If preemption is enabled
207 	 * then interrupts should be enabled too, in which
208 	 * case we shouldn't have to worry about the watchdog
209 	 * going off.
210 	 */
211 	raw_cpu_write(watchdog_nmi_touch, true);
212 	touch_softlockup_watchdog();
213 }
214 EXPORT_SYMBOL(touch_nmi_watchdog);
215 
216 #endif
217 
218 void touch_softlockup_watchdog_sync(void)
219 {
220 	__this_cpu_write(softlockup_touch_sync, true);
221 	__this_cpu_write(watchdog_touch_ts, 0);
222 }
223 
224 #ifdef CONFIG_HARDLOCKUP_DETECTOR
225 /* watchdog detector functions */
226 static int is_hardlockup(void)
227 {
228 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
229 
230 	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
231 		return 1;
232 
233 	__this_cpu_write(hrtimer_interrupts_saved, hrint);
234 	return 0;
235 }
236 #endif
237 
238 static int is_softlockup(unsigned long touch_ts)
239 {
240 	unsigned long now = get_timestamp();
241 
242 	/* Warn about unreasonable delays: */
243 	if (time_after(now, touch_ts + get_softlockup_thresh()))
244 		return now - touch_ts;
245 
246 	return 0;
247 }
248 
249 #ifdef CONFIG_HARDLOCKUP_DETECTOR
250 
251 static struct perf_event_attr wd_hw_attr = {
252 	.type		= PERF_TYPE_HARDWARE,
253 	.config		= PERF_COUNT_HW_CPU_CYCLES,
254 	.size		= sizeof(struct perf_event_attr),
255 	.pinned		= 1,
256 	.disabled	= 1,
257 };
258 
259 /* Callback function for perf event subsystem */
260 static void watchdog_overflow_callback(struct perf_event *event,
261 		 struct perf_sample_data *data,
262 		 struct pt_regs *regs)
263 {
264 	/* Ensure the watchdog never gets throttled */
265 	event->hw.interrupts = 0;
266 
267 	if (__this_cpu_read(watchdog_nmi_touch) == true) {
268 		__this_cpu_write(watchdog_nmi_touch, false);
269 		return;
270 	}
271 
272 	/* check for a hardlockup
273 	 * This is done by making sure our timer interrupt
274 	 * is incrementing.  The timer interrupt should have
275 	 * fired multiple times before we overflow'd.  If it hasn't
276 	 * then this is a good indication the cpu is stuck
277 	 */
278 	if (is_hardlockup()) {
279 		int this_cpu = smp_processor_id();
280 
281 		/* only print hardlockups once */
282 		if (__this_cpu_read(hard_watchdog_warn) == true)
283 			return;
284 
285 		if (hardlockup_panic)
286 			panic("Watchdog detected hard LOCKUP on cpu %d",
287 			      this_cpu);
288 		else
289 			WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
290 			     this_cpu);
291 
292 		__this_cpu_write(hard_watchdog_warn, true);
293 		return;
294 	}
295 
296 	__this_cpu_write(hard_watchdog_warn, false);
297 	return;
298 }
299 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
300 
301 static void watchdog_interrupt_count(void)
302 {
303 	__this_cpu_inc(hrtimer_interrupts);
304 }
305 
306 static int watchdog_nmi_enable(unsigned int cpu);
307 static void watchdog_nmi_disable(unsigned int cpu);
308 
309 /* watchdog kicker functions */
310 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
311 {
312 	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
313 	struct pt_regs *regs = get_irq_regs();
314 	int duration;
315 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
316 
317 	/* kick the hardlockup detector */
318 	watchdog_interrupt_count();
319 
320 	/* kick the softlockup detector */
321 	wake_up_process(__this_cpu_read(softlockup_watchdog));
322 
323 	/* .. and repeat */
324 	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
325 
326 	if (touch_ts == 0) {
327 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
328 			/*
329 			 * If the time stamp was touched atomically
330 			 * make sure the scheduler tick is up to date.
331 			 */
332 			__this_cpu_write(softlockup_touch_sync, false);
333 			sched_clock_tick();
334 		}
335 
336 		/* Clear the guest paused flag on watchdog reset */
337 		kvm_check_and_clear_guest_paused();
338 		__touch_watchdog();
339 		return HRTIMER_RESTART;
340 	}
341 
342 	/* check for a softlockup
343 	 * This is done by making sure a high priority task is
344 	 * being scheduled.  The task touches the watchdog to
345 	 * indicate it is getting cpu time.  If it hasn't then
346 	 * this is a good indication some task is hogging the cpu
347 	 */
348 	duration = is_softlockup(touch_ts);
349 	if (unlikely(duration)) {
350 		/*
351 		 * If a virtual machine is stopped by the host it can look to
352 		 * the watchdog like a soft lockup, check to see if the host
353 		 * stopped the vm before we issue the warning
354 		 */
355 		if (kvm_check_and_clear_guest_paused())
356 			return HRTIMER_RESTART;
357 
358 		/* only warn once */
359 		if (__this_cpu_read(soft_watchdog_warn) == true) {
360 			/*
361 			 * When multiple processes are causing softlockups the
362 			 * softlockup detector only warns on the first one
363 			 * because the code relies on a full quiet cycle to
364 			 * re-arm.  The second process prevents the quiet cycle
365 			 * and never gets reported.  Use task pointers to detect
366 			 * this.
367 			 */
368 			if (__this_cpu_read(softlockup_task_ptr_saved) !=
369 			    current) {
370 				__this_cpu_write(soft_watchdog_warn, false);
371 				__touch_watchdog();
372 			}
373 			return HRTIMER_RESTART;
374 		}
375 
376 		if (softlockup_all_cpu_backtrace) {
377 			/* Prevent multiple soft-lockup reports if one cpu is already
378 			 * engaged in dumping cpu back traces
379 			 */
380 			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
381 				/* Someone else will report us. Let's give up */
382 				__this_cpu_write(soft_watchdog_warn, true);
383 				return HRTIMER_RESTART;
384 			}
385 		}
386 
387 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
388 			smp_processor_id(), duration,
389 			current->comm, task_pid_nr(current));
390 		__this_cpu_write(softlockup_task_ptr_saved, current);
391 		print_modules();
392 		print_irqtrace_events(current);
393 		if (regs)
394 			show_regs(regs);
395 		else
396 			dump_stack();
397 
398 		if (softlockup_all_cpu_backtrace) {
399 			/* Avoid generating two back traces for current
400 			 * given that one is already made above
401 			 */
402 			trigger_allbutself_cpu_backtrace();
403 
404 			clear_bit(0, &soft_lockup_nmi_warn);
405 			/* Barrier to sync with other cpus */
406 			smp_mb__after_atomic();
407 		}
408 
409 		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
410 		if (softlockup_panic)
411 			panic("softlockup: hung tasks");
412 		__this_cpu_write(soft_watchdog_warn, true);
413 	} else
414 		__this_cpu_write(soft_watchdog_warn, false);
415 
416 	return HRTIMER_RESTART;
417 }
418 
419 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
420 {
421 	struct sched_param param = { .sched_priority = prio };
422 
423 	sched_setscheduler(current, policy, &param);
424 }
425 
426 static void watchdog_enable(unsigned int cpu)
427 {
428 	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
429 
430 	/* kick off the timer for the hardlockup detector */
431 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
432 	hrtimer->function = watchdog_timer_fn;
433 
434 	/* Enable the perf event */
435 	watchdog_nmi_enable(cpu);
436 
437 	/* done here because hrtimer_start can only pin to smp_processor_id() */
438 	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
439 		      HRTIMER_MODE_REL_PINNED);
440 
441 	/* initialize timestamp */
442 	watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
443 	__touch_watchdog();
444 }
445 
446 static void watchdog_disable(unsigned int cpu)
447 {
448 	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
449 
450 	watchdog_set_prio(SCHED_NORMAL, 0);
451 	hrtimer_cancel(hrtimer);
452 	/* disable the perf event */
453 	watchdog_nmi_disable(cpu);
454 }
455 
456 static void watchdog_cleanup(unsigned int cpu, bool online)
457 {
458 	watchdog_disable(cpu);
459 }
460 
461 static int watchdog_should_run(unsigned int cpu)
462 {
463 	return __this_cpu_read(hrtimer_interrupts) !=
464 		__this_cpu_read(soft_lockup_hrtimer_cnt);
465 }
466 
467 /*
468  * The watchdog thread function - touches the timestamp.
469  *
470  * It only runs once every sample_period seconds (4 seconds by
471  * default) to reset the softlockup timestamp. If this gets delayed
472  * for more than 2*watchdog_thresh seconds then the debug-printout
473  * triggers in watchdog_timer_fn().
474  */
475 static void watchdog(unsigned int cpu)
476 {
477 	__this_cpu_write(soft_lockup_hrtimer_cnt,
478 			 __this_cpu_read(hrtimer_interrupts));
479 	__touch_watchdog();
480 }
481 
482 #ifdef CONFIG_HARDLOCKUP_DETECTOR
483 /*
484  * People like the simple clean cpu node info on boot.
485  * Reduce the watchdog noise by only printing messages
486  * that are different from what cpu0 displayed.
487  */
488 static unsigned long cpu0_err;
489 
490 static int watchdog_nmi_enable(unsigned int cpu)
491 {
492 	struct perf_event_attr *wd_attr;
493 	struct perf_event *event = per_cpu(watchdog_ev, cpu);
494 
495 	/*
496 	 * Some kernels need to default hard lockup detection to
497 	 * 'disabled', for example a guest on a hypervisor.
498 	 */
499 	if (!watchdog_hardlockup_detector_is_enabled()) {
500 		event = ERR_PTR(-ENOENT);
501 		goto handle_err;
502 	}
503 
504 	/* is it already setup and enabled? */
505 	if (event && event->state > PERF_EVENT_STATE_OFF)
506 		goto out;
507 
508 	/* it is setup but not enabled */
509 	if (event != NULL)
510 		goto out_enable;
511 
512 	wd_attr = &wd_hw_attr;
513 	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
514 
515 	/* Try to register using hardware perf events */
516 	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
517 
518 handle_err:
519 	/* save cpu0 error for future comparision */
520 	if (cpu == 0 && IS_ERR(event))
521 		cpu0_err = PTR_ERR(event);
522 
523 	if (!IS_ERR(event)) {
524 		/* only print for cpu0 or different than cpu0 */
525 		if (cpu == 0 || cpu0_err)
526 			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
527 		goto out_save;
528 	}
529 
530 	/* skip displaying the same error again */
531 	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
532 		return PTR_ERR(event);
533 
534 	/* vary the KERN level based on the returned errno */
535 	if (PTR_ERR(event) == -EOPNOTSUPP)
536 		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
537 	else if (PTR_ERR(event) == -ENOENT)
538 		pr_warn("disabled (cpu%i): hardware events not enabled\n",
539 			 cpu);
540 	else
541 		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
542 			cpu, PTR_ERR(event));
543 	return PTR_ERR(event);
544 
545 	/* success path */
546 out_save:
547 	per_cpu(watchdog_ev, cpu) = event;
548 out_enable:
549 	perf_event_enable(per_cpu(watchdog_ev, cpu));
550 out:
551 	return 0;
552 }
553 
554 static void watchdog_nmi_disable(unsigned int cpu)
555 {
556 	struct perf_event *event = per_cpu(watchdog_ev, cpu);
557 
558 	if (event) {
559 		perf_event_disable(event);
560 		per_cpu(watchdog_ev, cpu) = NULL;
561 
562 		/* should be in cleanup, but blocks oprofile */
563 		perf_event_release_kernel(event);
564 	}
565 	if (cpu == 0) {
566 		/* watchdog_nmi_enable() expects this to be zero initially. */
567 		cpu0_err = 0;
568 	}
569 }
570 #else
571 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
572 static void watchdog_nmi_disable(unsigned int cpu) { return; }
573 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
574 
575 static struct smp_hotplug_thread watchdog_threads = {
576 	.store			= &softlockup_watchdog,
577 	.thread_should_run	= watchdog_should_run,
578 	.thread_fn		= watchdog,
579 	.thread_comm		= "watchdog/%u",
580 	.setup			= watchdog_enable,
581 	.cleanup		= watchdog_cleanup,
582 	.park			= watchdog_disable,
583 	.unpark			= watchdog_enable,
584 };
585 
586 static void restart_watchdog_hrtimer(void *info)
587 {
588 	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
589 	int ret;
590 
591 	/*
592 	 * No need to cancel and restart hrtimer if it is currently executing
593 	 * because it will reprogram itself with the new period now.
594 	 * We should never see it unqueued here because we are running per-cpu
595 	 * with interrupts disabled.
596 	 */
597 	ret = hrtimer_try_to_cancel(hrtimer);
598 	if (ret == 1)
599 		hrtimer_start(hrtimer, ns_to_ktime(sample_period),
600 				HRTIMER_MODE_REL_PINNED);
601 }
602 
603 static void update_timers(int cpu)
604 {
605 	/*
606 	 * Make sure that perf event counter will adopt to a new
607 	 * sampling period. Updating the sampling period directly would
608 	 * be much nicer but we do not have an API for that now so
609 	 * let's use a big hammer.
610 	 * Hrtimer will adopt the new period on the next tick but this
611 	 * might be late already so we have to restart the timer as well.
612 	 */
613 	watchdog_nmi_disable(cpu);
614 	smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
615 	watchdog_nmi_enable(cpu);
616 }
617 
618 static void update_timers_all_cpus(void)
619 {
620 	int cpu;
621 
622 	get_online_cpus();
623 	for_each_online_cpu(cpu)
624 		update_timers(cpu);
625 	put_online_cpus();
626 }
627 
628 static int watchdog_enable_all_cpus(bool sample_period_changed)
629 {
630 	int err = 0;
631 
632 	if (!watchdog_running) {
633 		err = smpboot_register_percpu_thread(&watchdog_threads);
634 		if (err)
635 			pr_err("Failed to create watchdog threads, disabled\n");
636 		else
637 			watchdog_running = 1;
638 	} else if (sample_period_changed) {
639 		update_timers_all_cpus();
640 	}
641 
642 	return err;
643 }
644 
645 /* prepare/enable/disable routines */
646 /* sysctl functions */
647 #ifdef CONFIG_SYSCTL
648 static void watchdog_disable_all_cpus(void)
649 {
650 	if (watchdog_running) {
651 		watchdog_running = 0;
652 		smpboot_unregister_percpu_thread(&watchdog_threads);
653 	}
654 }
655 
656 /*
657  * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
658  */
659 
660 int proc_dowatchdog(struct ctl_table *table, int write,
661 		    void __user *buffer, size_t *lenp, loff_t *ppos)
662 {
663 	int err, old_thresh, old_enabled;
664 	bool old_hardlockup;
665 	static DEFINE_MUTEX(watchdog_proc_mutex);
666 
667 	mutex_lock(&watchdog_proc_mutex);
668 	old_thresh = ACCESS_ONCE(watchdog_thresh);
669 	old_enabled = ACCESS_ONCE(watchdog_user_enabled);
670 	old_hardlockup = watchdog_hardlockup_detector_is_enabled();
671 
672 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
673 	if (err || !write)
674 		goto out;
675 
676 	set_sample_period();
677 	/*
678 	 * Watchdog threads shouldn't be enabled if they are
679 	 * disabled. The 'watchdog_running' variable check in
680 	 * watchdog_*_all_cpus() function takes care of this.
681 	 */
682 	if (watchdog_user_enabled && watchdog_thresh) {
683 		/*
684 		 * Prevent a change in watchdog_thresh accidentally overriding
685 		 * the enablement of the hardlockup detector.
686 		 */
687 		if (watchdog_user_enabled != old_enabled)
688 			watchdog_enable_hardlockup_detector(true);
689 		err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
690 	} else
691 		watchdog_disable_all_cpus();
692 
693 	/* Restore old values on failure */
694 	if (err) {
695 		watchdog_thresh = old_thresh;
696 		watchdog_user_enabled = old_enabled;
697 		watchdog_enable_hardlockup_detector(old_hardlockup);
698 	}
699 out:
700 	mutex_unlock(&watchdog_proc_mutex);
701 	return err;
702 }
703 #endif /* CONFIG_SYSCTL */
704 
705 void __init lockup_detector_init(void)
706 {
707 	set_sample_period();
708 
709 	if (watchdog_user_enabled)
710 		watchdog_enable_all_cpus(false);
711 }
712