xref: /openbmc/linux/kernel/watchdog.c (revision abe9af53)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Detect hard and soft lockups on a system
4  *
5  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6  *
7  * Note: Most of this code is borrowed heavily from the original softlockup
8  * detector, so thanks to Ingo for the initial implementation.
9  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10  * to those contributors as well.
11  */
12 
13 #define pr_fmt(fmt) "watchdog: " fmt
14 
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/tick.h>
22 #include <linux/sched/clock.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/isolation.h>
25 #include <linux/stop_machine.h>
26 
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
29 
30 static DEFINE_MUTEX(watchdog_mutex);
31 
32 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33 # define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34 # define NMI_WATCHDOG_DEFAULT	1
35 #else
36 # define WATCHDOG_DEFAULT	(SOFT_WATCHDOG_ENABLED)
37 # define NMI_WATCHDOG_DEFAULT	0
38 #endif
39 
40 unsigned long __read_mostly watchdog_enabled;
41 int __read_mostly watchdog_user_enabled = 1;
42 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43 int __read_mostly soft_watchdog_user_enabled = 1;
44 int __read_mostly watchdog_thresh = 10;
45 static int __read_mostly nmi_watchdog_available;
46 
47 struct cpumask watchdog_cpumask __read_mostly;
48 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49 
50 #ifdef CONFIG_HARDLOCKUP_DETECTOR
51 
52 # ifdef CONFIG_SMP
53 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
54 # endif /* CONFIG_SMP */
55 
56 /*
57  * Should we panic when a soft-lockup or hard-lockup occurs:
58  */
59 unsigned int __read_mostly hardlockup_panic =
60 			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
61 /*
62  * We may not want to enable hard lockup detection by default in all cases,
63  * for example when running the kernel as a guest on a hypervisor. In these
64  * cases this function can be called to disable hard lockup detection. This
65  * function should only be executed once by the boot processor before the
66  * kernel command line parameters are parsed, because otherwise it is not
67  * possible to override this in hardlockup_panic_setup().
68  */
69 void __init hardlockup_detector_disable(void)
70 {
71 	nmi_watchdog_user_enabled = 0;
72 }
73 
74 static int __init hardlockup_panic_setup(char *str)
75 {
76 	if (!strncmp(str, "panic", 5))
77 		hardlockup_panic = 1;
78 	else if (!strncmp(str, "nopanic", 7))
79 		hardlockup_panic = 0;
80 	else if (!strncmp(str, "0", 1))
81 		nmi_watchdog_user_enabled = 0;
82 	else if (!strncmp(str, "1", 1))
83 		nmi_watchdog_user_enabled = 1;
84 	return 1;
85 }
86 __setup("nmi_watchdog=", hardlockup_panic_setup);
87 
88 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
89 
90 /*
91  * These functions can be overridden if an architecture implements its
92  * own hardlockup detector.
93  *
94  * watchdog_nmi_enable/disable can be implemented to start and stop when
95  * softlockup watchdog threads start and stop. The arch must select the
96  * SOFTLOCKUP_DETECTOR Kconfig.
97  */
98 int __weak watchdog_nmi_enable(unsigned int cpu)
99 {
100 	hardlockup_detector_perf_enable();
101 	return 0;
102 }
103 
104 void __weak watchdog_nmi_disable(unsigned int cpu)
105 {
106 	hardlockup_detector_perf_disable();
107 }
108 
109 /* Return 0, if a NMI watchdog is available. Error code otherwise */
110 int __weak __init watchdog_nmi_probe(void)
111 {
112 	return hardlockup_detector_perf_init();
113 }
114 
115 /**
116  * watchdog_nmi_stop - Stop the watchdog for reconfiguration
117  *
118  * The reconfiguration steps are:
119  * watchdog_nmi_stop();
120  * update_variables();
121  * watchdog_nmi_start();
122  */
123 void __weak watchdog_nmi_stop(void) { }
124 
125 /**
126  * watchdog_nmi_start - Start the watchdog after reconfiguration
127  *
128  * Counterpart to watchdog_nmi_stop().
129  *
130  * The following variables have been updated in update_variables() and
131  * contain the currently valid configuration:
132  * - watchdog_enabled
133  * - watchdog_thresh
134  * - watchdog_cpumask
135  */
136 void __weak watchdog_nmi_start(void) { }
137 
138 /**
139  * lockup_detector_update_enable - Update the sysctl enable bit
140  *
141  * Caller needs to make sure that the NMI/perf watchdogs are off, so this
142  * can't race with watchdog_nmi_disable().
143  */
144 static void lockup_detector_update_enable(void)
145 {
146 	watchdog_enabled = 0;
147 	if (!watchdog_user_enabled)
148 		return;
149 	if (nmi_watchdog_available && nmi_watchdog_user_enabled)
150 		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
151 	if (soft_watchdog_user_enabled)
152 		watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
153 }
154 
155 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
156 
157 #define SOFTLOCKUP_RESET	ULONG_MAX
158 
159 #ifdef CONFIG_SMP
160 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
161 #endif
162 
163 static struct cpumask watchdog_allowed_mask __read_mostly;
164 
165 /* Global variables, exported for sysctl */
166 unsigned int __read_mostly softlockup_panic =
167 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
168 
169 static bool softlockup_initialized __read_mostly;
170 static u64 __read_mostly sample_period;
171 
172 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
173 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
174 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
175 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
176 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
177 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
178 static unsigned long soft_lockup_nmi_warn;
179 
180 static int __init nowatchdog_setup(char *str)
181 {
182 	watchdog_user_enabled = 0;
183 	return 1;
184 }
185 __setup("nowatchdog", nowatchdog_setup);
186 
187 static int __init nosoftlockup_setup(char *str)
188 {
189 	soft_watchdog_user_enabled = 0;
190 	return 1;
191 }
192 __setup("nosoftlockup", nosoftlockup_setup);
193 
194 static int __init watchdog_thresh_setup(char *str)
195 {
196 	get_option(&str, &watchdog_thresh);
197 	return 1;
198 }
199 __setup("watchdog_thresh=", watchdog_thresh_setup);
200 
201 static void __lockup_detector_cleanup(void);
202 
203 /*
204  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
205  * lockups can have false positives under extreme conditions. So we generally
206  * want a higher threshold for soft lockups than for hard lockups. So we couple
207  * the thresholds with a factor: we make the soft threshold twice the amount of
208  * time the hard threshold is.
209  */
210 static int get_softlockup_thresh(void)
211 {
212 	return watchdog_thresh * 2;
213 }
214 
215 /*
216  * Returns seconds, approximately.  We don't need nanosecond
217  * resolution, and we don't need to waste time with a big divide when
218  * 2^30ns == 1.074s.
219  */
220 static unsigned long get_timestamp(void)
221 {
222 	return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
223 }
224 
225 static void set_sample_period(void)
226 {
227 	/*
228 	 * convert watchdog_thresh from seconds to ns
229 	 * the divide by 5 is to give hrtimer several chances (two
230 	 * or three with the current relation between the soft
231 	 * and hard thresholds) to increment before the
232 	 * hardlockup detector generates a warning
233 	 */
234 	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
235 	watchdog_update_hrtimer_threshold(sample_period);
236 }
237 
238 /* Commands for resetting the watchdog */
239 static void __touch_watchdog(void)
240 {
241 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
242 }
243 
244 /**
245  * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
246  *
247  * Call when the scheduler may have stalled for legitimate reasons
248  * preventing the watchdog task from executing - e.g. the scheduler
249  * entering idle state.  This should only be used for scheduler events.
250  * Use touch_softlockup_watchdog() for everything else.
251  */
252 notrace void touch_softlockup_watchdog_sched(void)
253 {
254 	/*
255 	 * Preemption can be enabled.  It doesn't matter which CPU's timestamp
256 	 * gets zeroed here, so use the raw_ operation.
257 	 */
258 	raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
259 }
260 
261 notrace void touch_softlockup_watchdog(void)
262 {
263 	touch_softlockup_watchdog_sched();
264 	wq_watchdog_touch(raw_smp_processor_id());
265 }
266 EXPORT_SYMBOL(touch_softlockup_watchdog);
267 
268 void touch_all_softlockup_watchdogs(void)
269 {
270 	int cpu;
271 
272 	/*
273 	 * watchdog_mutex cannpt be taken here, as this might be called
274 	 * from (soft)interrupt context, so the access to
275 	 * watchdog_allowed_cpumask might race with a concurrent update.
276 	 *
277 	 * The watchdog time stamp can race against a concurrent real
278 	 * update as well, the only side effect might be a cycle delay for
279 	 * the softlockup check.
280 	 */
281 	for_each_cpu(cpu, &watchdog_allowed_mask)
282 		per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
283 	wq_watchdog_touch(-1);
284 }
285 
286 void touch_softlockup_watchdog_sync(void)
287 {
288 	__this_cpu_write(softlockup_touch_sync, true);
289 	__this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
290 }
291 
292 static int is_softlockup(unsigned long touch_ts)
293 {
294 	unsigned long now = get_timestamp();
295 
296 	if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
297 		/* Warn about unreasonable delays. */
298 		if (time_after(now, touch_ts + get_softlockup_thresh()))
299 			return now - touch_ts;
300 	}
301 	return 0;
302 }
303 
304 /* watchdog detector functions */
305 bool is_hardlockup(void)
306 {
307 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
308 
309 	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
310 		return true;
311 
312 	__this_cpu_write(hrtimer_interrupts_saved, hrint);
313 	return false;
314 }
315 
316 static void watchdog_interrupt_count(void)
317 {
318 	__this_cpu_inc(hrtimer_interrupts);
319 }
320 
321 static DEFINE_PER_CPU(struct completion, softlockup_completion);
322 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
323 
324 /*
325  * The watchdog thread function - touches the timestamp.
326  *
327  * It only runs once every sample_period seconds (4 seconds by
328  * default) to reset the softlockup timestamp. If this gets delayed
329  * for more than 2*watchdog_thresh seconds then the debug-printout
330  * triggers in watchdog_timer_fn().
331  */
332 static int softlockup_fn(void *data)
333 {
334 	__touch_watchdog();
335 	complete(this_cpu_ptr(&softlockup_completion));
336 
337 	return 0;
338 }
339 
340 /* watchdog kicker functions */
341 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
342 {
343 	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
344 	struct pt_regs *regs = get_irq_regs();
345 	int duration;
346 	int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
347 
348 	if (!watchdog_enabled)
349 		return HRTIMER_NORESTART;
350 
351 	/* kick the hardlockup detector */
352 	watchdog_interrupt_count();
353 
354 	/* kick the softlockup detector */
355 	if (completion_done(this_cpu_ptr(&softlockup_completion))) {
356 		reinit_completion(this_cpu_ptr(&softlockup_completion));
357 		stop_one_cpu_nowait(smp_processor_id(),
358 				softlockup_fn, NULL,
359 				this_cpu_ptr(&softlockup_stop_work));
360 	}
361 
362 	/* .. and repeat */
363 	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
364 
365 	if (touch_ts == SOFTLOCKUP_RESET) {
366 		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
367 			/*
368 			 * If the time stamp was touched atomically
369 			 * make sure the scheduler tick is up to date.
370 			 */
371 			__this_cpu_write(softlockup_touch_sync, false);
372 			sched_clock_tick();
373 		}
374 
375 		/* Clear the guest paused flag on watchdog reset */
376 		kvm_check_and_clear_guest_paused();
377 		__touch_watchdog();
378 		return HRTIMER_RESTART;
379 	}
380 
381 	/* check for a softlockup
382 	 * This is done by making sure a high priority task is
383 	 * being scheduled.  The task touches the watchdog to
384 	 * indicate it is getting cpu time.  If it hasn't then
385 	 * this is a good indication some task is hogging the cpu
386 	 */
387 	duration = is_softlockup(touch_ts);
388 	if (unlikely(duration)) {
389 		/*
390 		 * If a virtual machine is stopped by the host it can look to
391 		 * the watchdog like a soft lockup, check to see if the host
392 		 * stopped the vm before we issue the warning
393 		 */
394 		if (kvm_check_and_clear_guest_paused())
395 			return HRTIMER_RESTART;
396 
397 		/* only warn once */
398 		if (__this_cpu_read(soft_watchdog_warn) == true)
399 			return HRTIMER_RESTART;
400 
401 		if (softlockup_all_cpu_backtrace) {
402 			/* Prevent multiple soft-lockup reports if one cpu is already
403 			 * engaged in dumping cpu back traces
404 			 */
405 			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
406 				/* Someone else will report us. Let's give up */
407 				__this_cpu_write(soft_watchdog_warn, true);
408 				return HRTIMER_RESTART;
409 			}
410 		}
411 
412 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
413 			smp_processor_id(), duration,
414 			current->comm, task_pid_nr(current));
415 		print_modules();
416 		print_irqtrace_events(current);
417 		if (regs)
418 			show_regs(regs);
419 		else
420 			dump_stack();
421 
422 		if (softlockup_all_cpu_backtrace) {
423 			/* Avoid generating two back traces for current
424 			 * given that one is already made above
425 			 */
426 			trigger_allbutself_cpu_backtrace();
427 
428 			clear_bit(0, &soft_lockup_nmi_warn);
429 			/* Barrier to sync with other cpus */
430 			smp_mb__after_atomic();
431 		}
432 
433 		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
434 		if (softlockup_panic)
435 			panic("softlockup: hung tasks");
436 		__this_cpu_write(soft_watchdog_warn, true);
437 	} else
438 		__this_cpu_write(soft_watchdog_warn, false);
439 
440 	return HRTIMER_RESTART;
441 }
442 
443 static void watchdog_enable(unsigned int cpu)
444 {
445 	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
446 	struct completion *done = this_cpu_ptr(&softlockup_completion);
447 
448 	WARN_ON_ONCE(cpu != smp_processor_id());
449 
450 	init_completion(done);
451 	complete(done);
452 
453 	/*
454 	 * Start the timer first to prevent the NMI watchdog triggering
455 	 * before the timer has a chance to fire.
456 	 */
457 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
458 	hrtimer->function = watchdog_timer_fn;
459 	hrtimer_start(hrtimer, ns_to_ktime(sample_period),
460 		      HRTIMER_MODE_REL_PINNED_HARD);
461 
462 	/* Initialize timestamp */
463 	__touch_watchdog();
464 	/* Enable the perf event */
465 	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
466 		watchdog_nmi_enable(cpu);
467 }
468 
469 static void watchdog_disable(unsigned int cpu)
470 {
471 	struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
472 
473 	WARN_ON_ONCE(cpu != smp_processor_id());
474 
475 	/*
476 	 * Disable the perf event first. That prevents that a large delay
477 	 * between disabling the timer and disabling the perf event causes
478 	 * the perf NMI to detect a false positive.
479 	 */
480 	watchdog_nmi_disable(cpu);
481 	hrtimer_cancel(hrtimer);
482 	wait_for_completion(this_cpu_ptr(&softlockup_completion));
483 }
484 
485 static int softlockup_stop_fn(void *data)
486 {
487 	watchdog_disable(smp_processor_id());
488 	return 0;
489 }
490 
491 static void softlockup_stop_all(void)
492 {
493 	int cpu;
494 
495 	if (!softlockup_initialized)
496 		return;
497 
498 	for_each_cpu(cpu, &watchdog_allowed_mask)
499 		smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
500 
501 	cpumask_clear(&watchdog_allowed_mask);
502 }
503 
504 static int softlockup_start_fn(void *data)
505 {
506 	watchdog_enable(smp_processor_id());
507 	return 0;
508 }
509 
510 static void softlockup_start_all(void)
511 {
512 	int cpu;
513 
514 	cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
515 	for_each_cpu(cpu, &watchdog_allowed_mask)
516 		smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
517 }
518 
519 int lockup_detector_online_cpu(unsigned int cpu)
520 {
521 	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
522 		watchdog_enable(cpu);
523 	return 0;
524 }
525 
526 int lockup_detector_offline_cpu(unsigned int cpu)
527 {
528 	if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
529 		watchdog_disable(cpu);
530 	return 0;
531 }
532 
533 static void lockup_detector_reconfigure(void)
534 {
535 	cpus_read_lock();
536 	watchdog_nmi_stop();
537 
538 	softlockup_stop_all();
539 	set_sample_period();
540 	lockup_detector_update_enable();
541 	if (watchdog_enabled && watchdog_thresh)
542 		softlockup_start_all();
543 
544 	watchdog_nmi_start();
545 	cpus_read_unlock();
546 	/*
547 	 * Must be called outside the cpus locked section to prevent
548 	 * recursive locking in the perf code.
549 	 */
550 	__lockup_detector_cleanup();
551 }
552 
553 /*
554  * Create the watchdog thread infrastructure and configure the detector(s).
555  *
556  * The threads are not unparked as watchdog_allowed_mask is empty.  When
557  * the threads are successfully initialized, take the proper locks and
558  * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
559  */
560 static __init void lockup_detector_setup(void)
561 {
562 	/*
563 	 * If sysctl is off and watchdog got disabled on the command line,
564 	 * nothing to do here.
565 	 */
566 	lockup_detector_update_enable();
567 
568 	if (!IS_ENABLED(CONFIG_SYSCTL) &&
569 	    !(watchdog_enabled && watchdog_thresh))
570 		return;
571 
572 	mutex_lock(&watchdog_mutex);
573 	lockup_detector_reconfigure();
574 	softlockup_initialized = true;
575 	mutex_unlock(&watchdog_mutex);
576 }
577 
578 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
579 static void lockup_detector_reconfigure(void)
580 {
581 	cpus_read_lock();
582 	watchdog_nmi_stop();
583 	lockup_detector_update_enable();
584 	watchdog_nmi_start();
585 	cpus_read_unlock();
586 }
587 static inline void lockup_detector_setup(void)
588 {
589 	lockup_detector_reconfigure();
590 }
591 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
592 
593 static void __lockup_detector_cleanup(void)
594 {
595 	lockdep_assert_held(&watchdog_mutex);
596 	hardlockup_detector_perf_cleanup();
597 }
598 
599 /**
600  * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
601  *
602  * Caller must not hold the cpu hotplug rwsem.
603  */
604 void lockup_detector_cleanup(void)
605 {
606 	mutex_lock(&watchdog_mutex);
607 	__lockup_detector_cleanup();
608 	mutex_unlock(&watchdog_mutex);
609 }
610 
611 /**
612  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
613  *
614  * Special interface for parisc. It prevents lockup detector warnings from
615  * the default pm_poweroff() function which busy loops forever.
616  */
617 void lockup_detector_soft_poweroff(void)
618 {
619 	watchdog_enabled = 0;
620 }
621 
622 #ifdef CONFIG_SYSCTL
623 
624 /* Propagate any changes to the watchdog threads */
625 static void proc_watchdog_update(void)
626 {
627 	/* Remove impossible cpus to keep sysctl output clean. */
628 	cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
629 	lockup_detector_reconfigure();
630 }
631 
632 /*
633  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
634  *
635  * caller             | table->data points to      | 'which'
636  * -------------------|----------------------------|--------------------------
637  * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
638  *                    |                            | SOFT_WATCHDOG_ENABLED
639  * -------------------|----------------------------|--------------------------
640  * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
641  * -------------------|----------------------------|--------------------------
642  * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
643  */
644 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
645 				void *buffer, size_t *lenp, loff_t *ppos)
646 {
647 	int err, old, *param = table->data;
648 
649 	mutex_lock(&watchdog_mutex);
650 
651 	if (!write) {
652 		/*
653 		 * On read synchronize the userspace interface. This is a
654 		 * racy snapshot.
655 		 */
656 		*param = (watchdog_enabled & which) != 0;
657 		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
658 	} else {
659 		old = READ_ONCE(*param);
660 		err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
661 		if (!err && old != READ_ONCE(*param))
662 			proc_watchdog_update();
663 	}
664 	mutex_unlock(&watchdog_mutex);
665 	return err;
666 }
667 
668 /*
669  * /proc/sys/kernel/watchdog
670  */
671 int proc_watchdog(struct ctl_table *table, int write,
672 		  void *buffer, size_t *lenp, loff_t *ppos)
673 {
674 	return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
675 				    table, write, buffer, lenp, ppos);
676 }
677 
678 /*
679  * /proc/sys/kernel/nmi_watchdog
680  */
681 int proc_nmi_watchdog(struct ctl_table *table, int write,
682 		      void *buffer, size_t *lenp, loff_t *ppos)
683 {
684 	if (!nmi_watchdog_available && write)
685 		return -ENOTSUPP;
686 	return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
687 				    table, write, buffer, lenp, ppos);
688 }
689 
690 /*
691  * /proc/sys/kernel/soft_watchdog
692  */
693 int proc_soft_watchdog(struct ctl_table *table, int write,
694 			void *buffer, size_t *lenp, loff_t *ppos)
695 {
696 	return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
697 				    table, write, buffer, lenp, ppos);
698 }
699 
700 /*
701  * /proc/sys/kernel/watchdog_thresh
702  */
703 int proc_watchdog_thresh(struct ctl_table *table, int write,
704 			 void *buffer, size_t *lenp, loff_t *ppos)
705 {
706 	int err, old;
707 
708 	mutex_lock(&watchdog_mutex);
709 
710 	old = READ_ONCE(watchdog_thresh);
711 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
712 
713 	if (!err && write && old != READ_ONCE(watchdog_thresh))
714 		proc_watchdog_update();
715 
716 	mutex_unlock(&watchdog_mutex);
717 	return err;
718 }
719 
720 /*
721  * The cpumask is the mask of possible cpus that the watchdog can run
722  * on, not the mask of cpus it is actually running on.  This allows the
723  * user to specify a mask that will include cpus that have not yet
724  * been brought online, if desired.
725  */
726 int proc_watchdog_cpumask(struct ctl_table *table, int write,
727 			  void *buffer, size_t *lenp, loff_t *ppos)
728 {
729 	int err;
730 
731 	mutex_lock(&watchdog_mutex);
732 
733 	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
734 	if (!err && write)
735 		proc_watchdog_update();
736 
737 	mutex_unlock(&watchdog_mutex);
738 	return err;
739 }
740 #endif /* CONFIG_SYSCTL */
741 
742 void __init lockup_detector_init(void)
743 {
744 	if (tick_nohz_full_enabled())
745 		pr_info("Disabling watchdog on nohz_full cores by default\n");
746 
747 	cpumask_copy(&watchdog_cpumask,
748 		     housekeeping_cpumask(HK_FLAG_TIMER));
749 
750 	if (!watchdog_nmi_probe())
751 		nmi_watchdog_available = true;
752 	lockup_detector_setup();
753 }
754