158687acbSDon Zickus /* 258687acbSDon Zickus * Detect hard and soft lockups on a system 358687acbSDon Zickus * 458687acbSDon Zickus * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 558687acbSDon Zickus * 686f5e6a7SFernando Luis Vázquez Cao * Note: Most of this code is borrowed heavily from the original softlockup 786f5e6a7SFernando Luis Vázquez Cao * detector, so thanks to Ingo for the initial implementation. 886f5e6a7SFernando Luis Vázquez Cao * Some chunks also taken from the old x86-specific nmi watchdog code, thanks 958687acbSDon Zickus * to those contributors as well. 1058687acbSDon Zickus */ 1158687acbSDon Zickus 124501980aSAndrew Morton #define pr_fmt(fmt) "NMI watchdog: " fmt 134501980aSAndrew Morton 1458687acbSDon Zickus #include <linux/mm.h> 1558687acbSDon Zickus #include <linux/cpu.h> 1658687acbSDon Zickus #include <linux/nmi.h> 1758687acbSDon Zickus #include <linux/init.h> 1858687acbSDon Zickus #include <linux/module.h> 1958687acbSDon Zickus #include <linux/sysctl.h> 20bcd951cfSThomas Gleixner #include <linux/smpboot.h> 218bd75c77SClark Williams #include <linux/sched/rt.h> 2258687acbSDon Zickus 2358687acbSDon Zickus #include <asm/irq_regs.h> 245d1c0f4aSEric B Munson #include <linux/kvm_para.h> 2558687acbSDon Zickus #include <linux/perf_event.h> 2658687acbSDon Zickus 2784d56e66SUlrich Obergfell /* 2884d56e66SUlrich Obergfell * The run state of the lockup detectors is controlled by the content of the 2984d56e66SUlrich Obergfell * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 3084d56e66SUlrich Obergfell * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 3184d56e66SUlrich Obergfell * 3284d56e66SUlrich Obergfell * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 3384d56e66SUlrich Obergfell * are variables that are only used as an 'interface' between the parameters 3484d56e66SUlrich Obergfell * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 3584d56e66SUlrich Obergfell * 'watchdog_thresh' variable is handled differently because its value is not 3684d56e66SUlrich Obergfell * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 3784d56e66SUlrich Obergfell * is equal zero. 3884d56e66SUlrich Obergfell */ 3984d56e66SUlrich Obergfell #define NMI_WATCHDOG_ENABLED_BIT 0 4084d56e66SUlrich Obergfell #define SOFT_WATCHDOG_ENABLED_BIT 1 4184d56e66SUlrich Obergfell #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) 4284d56e66SUlrich Obergfell #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) 4384d56e66SUlrich Obergfell 4484d56e66SUlrich Obergfell #ifdef CONFIG_HARDLOCKUP_DETECTOR 4584d56e66SUlrich Obergfell static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; 4684d56e66SUlrich Obergfell #else 4784d56e66SUlrich Obergfell static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; 4884d56e66SUlrich Obergfell #endif 4984d56e66SUlrich Obergfell int __read_mostly nmi_watchdog_enabled; 5084d56e66SUlrich Obergfell int __read_mostly soft_watchdog_enabled; 5184d56e66SUlrich Obergfell int __read_mostly watchdog_user_enabled; 524eec42f3SMandeep Singh Baines int __read_mostly watchdog_thresh = 10; 5384d56e66SUlrich Obergfell 54ed235875SAaron Tomlin #ifdef CONFIG_SMP 55ed235875SAaron Tomlin int __read_mostly sysctl_softlockup_all_cpu_backtrace; 56ed235875SAaron Tomlin #else 57ed235875SAaron Tomlin #define sysctl_softlockup_all_cpu_backtrace 0 58ed235875SAaron Tomlin #endif 59ed235875SAaron Tomlin 603c00ea82SFrederic Weisbecker static int __read_mostly watchdog_running; 610f34c400SChuansheng Liu static u64 __read_mostly sample_period; 6258687acbSDon Zickus 6358687acbSDon Zickus static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 6458687acbSDon Zickus static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 6558687acbSDon Zickus static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); 6658687acbSDon Zickus static DEFINE_PER_CPU(bool, softlockup_touch_sync); 6758687acbSDon Zickus static DEFINE_PER_CPU(bool, soft_watchdog_warn); 68bcd951cfSThomas Gleixner static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); 69bcd951cfSThomas Gleixner static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); 70b1a8de1fSchai wen static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); 7123637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 72cafcd80dSDon Zickus static DEFINE_PER_CPU(bool, hard_watchdog_warn); 73cafcd80dSDon Zickus static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 7458687acbSDon Zickus static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 7558687acbSDon Zickus static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 7658687acbSDon Zickus #endif 77ed235875SAaron Tomlin static unsigned long soft_lockup_nmi_warn; 7858687acbSDon Zickus 7958687acbSDon Zickus /* boot commands */ 8058687acbSDon Zickus /* 8158687acbSDon Zickus * Should we panic when a soft-lockup or hard-lockup occurs: 8258687acbSDon Zickus */ 8323637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 84fef2c9bcSDon Zickus static int hardlockup_panic = 85fef2c9bcSDon Zickus CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; 8658687acbSDon Zickus 876e7458a6SUlrich Obergfell static bool hardlockup_detector_enabled = true; 886e7458a6SUlrich Obergfell /* 896e7458a6SUlrich Obergfell * We may not want to enable hard lockup detection by default in all cases, 906e7458a6SUlrich Obergfell * for example when running the kernel as a guest on a hypervisor. In these 916e7458a6SUlrich Obergfell * cases this function can be called to disable hard lockup detection. This 926e7458a6SUlrich Obergfell * function should only be executed once by the boot processor before the 936e7458a6SUlrich Obergfell * kernel command line parameters are parsed, because otherwise it is not 946e7458a6SUlrich Obergfell * possible to override this in hardlockup_panic_setup(). 956e7458a6SUlrich Obergfell */ 966e7458a6SUlrich Obergfell void watchdog_enable_hardlockup_detector(bool val) 976e7458a6SUlrich Obergfell { 986e7458a6SUlrich Obergfell hardlockup_detector_enabled = val; 996e7458a6SUlrich Obergfell } 1006e7458a6SUlrich Obergfell 1016e7458a6SUlrich Obergfell bool watchdog_hardlockup_detector_is_enabled(void) 1026e7458a6SUlrich Obergfell { 1036e7458a6SUlrich Obergfell return hardlockup_detector_enabled; 1046e7458a6SUlrich Obergfell } 1056e7458a6SUlrich Obergfell 10658687acbSDon Zickus static int __init hardlockup_panic_setup(char *str) 10758687acbSDon Zickus { 10858687acbSDon Zickus if (!strncmp(str, "panic", 5)) 10958687acbSDon Zickus hardlockup_panic = 1; 110fef2c9bcSDon Zickus else if (!strncmp(str, "nopanic", 7)) 111fef2c9bcSDon Zickus hardlockup_panic = 0; 1125dc30558SDon Zickus else if (!strncmp(str, "0", 1)) 1133c00ea82SFrederic Weisbecker watchdog_user_enabled = 0; 1146e7458a6SUlrich Obergfell else if (!strncmp(str, "1", 1) || !strncmp(str, "2", 1)) { 1156e7458a6SUlrich Obergfell /* 1166e7458a6SUlrich Obergfell * Setting 'nmi_watchdog=1' or 'nmi_watchdog=2' (legacy option) 1176e7458a6SUlrich Obergfell * has the same effect. 1186e7458a6SUlrich Obergfell */ 1196e7458a6SUlrich Obergfell watchdog_user_enabled = 1; 1206e7458a6SUlrich Obergfell watchdog_enable_hardlockup_detector(true); 1216e7458a6SUlrich Obergfell } 12258687acbSDon Zickus return 1; 12358687acbSDon Zickus } 12458687acbSDon Zickus __setup("nmi_watchdog=", hardlockup_panic_setup); 12558687acbSDon Zickus #endif 12658687acbSDon Zickus 12758687acbSDon Zickus unsigned int __read_mostly softlockup_panic = 12858687acbSDon Zickus CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; 12958687acbSDon Zickus 13058687acbSDon Zickus static int __init softlockup_panic_setup(char *str) 13158687acbSDon Zickus { 13258687acbSDon Zickus softlockup_panic = simple_strtoul(str, NULL, 0); 13358687acbSDon Zickus 13458687acbSDon Zickus return 1; 13558687acbSDon Zickus } 13658687acbSDon Zickus __setup("softlockup_panic=", softlockup_panic_setup); 13758687acbSDon Zickus 13858687acbSDon Zickus static int __init nowatchdog_setup(char *str) 13958687acbSDon Zickus { 1403c00ea82SFrederic Weisbecker watchdog_user_enabled = 0; 14158687acbSDon Zickus return 1; 14258687acbSDon Zickus } 14358687acbSDon Zickus __setup("nowatchdog", nowatchdog_setup); 14458687acbSDon Zickus 14558687acbSDon Zickus /* deprecated */ 14658687acbSDon Zickus static int __init nosoftlockup_setup(char *str) 14758687acbSDon Zickus { 1483c00ea82SFrederic Weisbecker watchdog_user_enabled = 0; 14958687acbSDon Zickus return 1; 15058687acbSDon Zickus } 15158687acbSDon Zickus __setup("nosoftlockup", nosoftlockup_setup); 15258687acbSDon Zickus /* */ 153ed235875SAaron Tomlin #ifdef CONFIG_SMP 154ed235875SAaron Tomlin static int __init softlockup_all_cpu_backtrace_setup(char *str) 155ed235875SAaron Tomlin { 156ed235875SAaron Tomlin sysctl_softlockup_all_cpu_backtrace = 157ed235875SAaron Tomlin !!simple_strtol(str, NULL, 0); 158ed235875SAaron Tomlin return 1; 159ed235875SAaron Tomlin } 160ed235875SAaron Tomlin __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); 161ed235875SAaron Tomlin #endif 16258687acbSDon Zickus 1634eec42f3SMandeep Singh Baines /* 1644eec42f3SMandeep Singh Baines * Hard-lockup warnings should be triggered after just a few seconds. Soft- 1654eec42f3SMandeep Singh Baines * lockups can have false positives under extreme conditions. So we generally 1664eec42f3SMandeep Singh Baines * want a higher threshold for soft lockups than for hard lockups. So we couple 1674eec42f3SMandeep Singh Baines * the thresholds with a factor: we make the soft threshold twice the amount of 1684eec42f3SMandeep Singh Baines * time the hard threshold is. 1694eec42f3SMandeep Singh Baines */ 1706e9101aeSIngo Molnar static int get_softlockup_thresh(void) 1714eec42f3SMandeep Singh Baines { 1724eec42f3SMandeep Singh Baines return watchdog_thresh * 2; 1734eec42f3SMandeep Singh Baines } 17458687acbSDon Zickus 17558687acbSDon Zickus /* 17658687acbSDon Zickus * Returns seconds, approximately. We don't need nanosecond 17758687acbSDon Zickus * resolution, and we don't need to waste time with a big divide when 17858687acbSDon Zickus * 2^30ns == 1.074s. 17958687acbSDon Zickus */ 180c06b4f19SNamhyung Kim static unsigned long get_timestamp(void) 18158687acbSDon Zickus { 182545a2bf7SCyril Bur return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ 18358687acbSDon Zickus } 18458687acbSDon Zickus 1850f34c400SChuansheng Liu static void set_sample_period(void) 18658687acbSDon Zickus { 18758687acbSDon Zickus /* 188586692a5SMandeep Singh Baines * convert watchdog_thresh from seconds to ns 18986f5e6a7SFernando Luis Vázquez Cao * the divide by 5 is to give hrtimer several chances (two 19086f5e6a7SFernando Luis Vázquez Cao * or three with the current relation between the soft 19186f5e6a7SFernando Luis Vázquez Cao * and hard thresholds) to increment before the 19286f5e6a7SFernando Luis Vázquez Cao * hardlockup detector generates a warning 19358687acbSDon Zickus */ 1940f34c400SChuansheng Liu sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); 19558687acbSDon Zickus } 19658687acbSDon Zickus 19758687acbSDon Zickus /* Commands for resetting the watchdog */ 19858687acbSDon Zickus static void __touch_watchdog(void) 19958687acbSDon Zickus { 200c06b4f19SNamhyung Kim __this_cpu_write(watchdog_touch_ts, get_timestamp()); 20158687acbSDon Zickus } 20258687acbSDon Zickus 203332fbdbcSDon Zickus void touch_softlockup_watchdog(void) 20458687acbSDon Zickus { 2057861144bSAndrew Morton /* 2067861144bSAndrew Morton * Preemption can be enabled. It doesn't matter which CPU's timestamp 2077861144bSAndrew Morton * gets zeroed here, so use the raw_ operation. 2087861144bSAndrew Morton */ 2097861144bSAndrew Morton raw_cpu_write(watchdog_touch_ts, 0); 21058687acbSDon Zickus } 2110167c781SIngo Molnar EXPORT_SYMBOL(touch_softlockup_watchdog); 21258687acbSDon Zickus 213332fbdbcSDon Zickus void touch_all_softlockup_watchdogs(void) 21458687acbSDon Zickus { 21558687acbSDon Zickus int cpu; 21658687acbSDon Zickus 21758687acbSDon Zickus /* 21858687acbSDon Zickus * this is done lockless 21958687acbSDon Zickus * do we care if a 0 races with a timestamp? 22058687acbSDon Zickus * all it means is the softlock check starts one cycle later 22158687acbSDon Zickus */ 22258687acbSDon Zickus for_each_online_cpu(cpu) 22358687acbSDon Zickus per_cpu(watchdog_touch_ts, cpu) = 0; 22458687acbSDon Zickus } 22558687acbSDon Zickus 226cafcd80dSDon Zickus #ifdef CONFIG_HARDLOCKUP_DETECTOR 22758687acbSDon Zickus void touch_nmi_watchdog(void) 22858687acbSDon Zickus { 22962572e29SBen Zhang /* 23062572e29SBen Zhang * Using __raw here because some code paths have 23162572e29SBen Zhang * preemption enabled. If preemption is enabled 23262572e29SBen Zhang * then interrupts should be enabled too, in which 23362572e29SBen Zhang * case we shouldn't have to worry about the watchdog 23462572e29SBen Zhang * going off. 23562572e29SBen Zhang */ 236f7f66b05SChristoph Lameter raw_cpu_write(watchdog_nmi_touch, true); 237332fbdbcSDon Zickus touch_softlockup_watchdog(); 23858687acbSDon Zickus } 23958687acbSDon Zickus EXPORT_SYMBOL(touch_nmi_watchdog); 24058687acbSDon Zickus 241cafcd80dSDon Zickus #endif 242cafcd80dSDon Zickus 24358687acbSDon Zickus void touch_softlockup_watchdog_sync(void) 24458687acbSDon Zickus { 245f7f66b05SChristoph Lameter __this_cpu_write(softlockup_touch_sync, true); 246f7f66b05SChristoph Lameter __this_cpu_write(watchdog_touch_ts, 0); 24758687acbSDon Zickus } 24858687acbSDon Zickus 24923637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 25058687acbSDon Zickus /* watchdog detector functions */ 25126e09c6eSDon Zickus static int is_hardlockup(void) 25258687acbSDon Zickus { 253909ea964SChristoph Lameter unsigned long hrint = __this_cpu_read(hrtimer_interrupts); 25458687acbSDon Zickus 255909ea964SChristoph Lameter if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) 25658687acbSDon Zickus return 1; 25758687acbSDon Zickus 258909ea964SChristoph Lameter __this_cpu_write(hrtimer_interrupts_saved, hrint); 25958687acbSDon Zickus return 0; 26058687acbSDon Zickus } 26158687acbSDon Zickus #endif 26258687acbSDon Zickus 26326e09c6eSDon Zickus static int is_softlockup(unsigned long touch_ts) 26458687acbSDon Zickus { 265c06b4f19SNamhyung Kim unsigned long now = get_timestamp(); 26658687acbSDon Zickus 26758687acbSDon Zickus /* Warn about unreasonable delays: */ 2684eec42f3SMandeep Singh Baines if (time_after(now, touch_ts + get_softlockup_thresh())) 26958687acbSDon Zickus return now - touch_ts; 27058687acbSDon Zickus 27158687acbSDon Zickus return 0; 27258687acbSDon Zickus } 27358687acbSDon Zickus 27423637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 2751880c4aeSCyrill Gorcunov 27658687acbSDon Zickus static struct perf_event_attr wd_hw_attr = { 27758687acbSDon Zickus .type = PERF_TYPE_HARDWARE, 27858687acbSDon Zickus .config = PERF_COUNT_HW_CPU_CYCLES, 27958687acbSDon Zickus .size = sizeof(struct perf_event_attr), 28058687acbSDon Zickus .pinned = 1, 28158687acbSDon Zickus .disabled = 1, 28258687acbSDon Zickus }; 28358687acbSDon Zickus 28458687acbSDon Zickus /* Callback function for perf event subsystem */ 285a8b0ca17SPeter Zijlstra static void watchdog_overflow_callback(struct perf_event *event, 28658687acbSDon Zickus struct perf_sample_data *data, 28758687acbSDon Zickus struct pt_regs *regs) 28858687acbSDon Zickus { 289c6db67cdSPeter Zijlstra /* Ensure the watchdog never gets throttled */ 290c6db67cdSPeter Zijlstra event->hw.interrupts = 0; 291c6db67cdSPeter Zijlstra 292909ea964SChristoph Lameter if (__this_cpu_read(watchdog_nmi_touch) == true) { 293909ea964SChristoph Lameter __this_cpu_write(watchdog_nmi_touch, false); 29458687acbSDon Zickus return; 29558687acbSDon Zickus } 29658687acbSDon Zickus 29758687acbSDon Zickus /* check for a hardlockup 29858687acbSDon Zickus * This is done by making sure our timer interrupt 29958687acbSDon Zickus * is incrementing. The timer interrupt should have 30058687acbSDon Zickus * fired multiple times before we overflow'd. If it hasn't 30158687acbSDon Zickus * then this is a good indication the cpu is stuck 30258687acbSDon Zickus */ 30326e09c6eSDon Zickus if (is_hardlockup()) { 30426e09c6eSDon Zickus int this_cpu = smp_processor_id(); 30526e09c6eSDon Zickus 30658687acbSDon Zickus /* only print hardlockups once */ 307909ea964SChristoph Lameter if (__this_cpu_read(hard_watchdog_warn) == true) 30858687acbSDon Zickus return; 30958687acbSDon Zickus 31058687acbSDon Zickus if (hardlockup_panic) 311656c3b79SFabian Frederick panic("Watchdog detected hard LOCKUP on cpu %d", 312656c3b79SFabian Frederick this_cpu); 31358687acbSDon Zickus else 314656c3b79SFabian Frederick WARN(1, "Watchdog detected hard LOCKUP on cpu %d", 315656c3b79SFabian Frederick this_cpu); 31658687acbSDon Zickus 317909ea964SChristoph Lameter __this_cpu_write(hard_watchdog_warn, true); 31858687acbSDon Zickus return; 31958687acbSDon Zickus } 32058687acbSDon Zickus 321909ea964SChristoph Lameter __this_cpu_write(hard_watchdog_warn, false); 32258687acbSDon Zickus return; 32358687acbSDon Zickus } 324bcd951cfSThomas Gleixner #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 325bcd951cfSThomas Gleixner 32658687acbSDon Zickus static void watchdog_interrupt_count(void) 32758687acbSDon Zickus { 328909ea964SChristoph Lameter __this_cpu_inc(hrtimer_interrupts); 32958687acbSDon Zickus } 330bcd951cfSThomas Gleixner 331bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu); 332bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu); 33358687acbSDon Zickus 33458687acbSDon Zickus /* watchdog kicker functions */ 33558687acbSDon Zickus static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 33658687acbSDon Zickus { 337909ea964SChristoph Lameter unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); 33858687acbSDon Zickus struct pt_regs *regs = get_irq_regs(); 33958687acbSDon Zickus int duration; 340ed235875SAaron Tomlin int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 34158687acbSDon Zickus 34258687acbSDon Zickus /* kick the hardlockup detector */ 34358687acbSDon Zickus watchdog_interrupt_count(); 34458687acbSDon Zickus 34558687acbSDon Zickus /* kick the softlockup detector */ 346909ea964SChristoph Lameter wake_up_process(__this_cpu_read(softlockup_watchdog)); 34758687acbSDon Zickus 34858687acbSDon Zickus /* .. and repeat */ 3490f34c400SChuansheng Liu hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); 35058687acbSDon Zickus 35158687acbSDon Zickus if (touch_ts == 0) { 352909ea964SChristoph Lameter if (unlikely(__this_cpu_read(softlockup_touch_sync))) { 35358687acbSDon Zickus /* 35458687acbSDon Zickus * If the time stamp was touched atomically 35558687acbSDon Zickus * make sure the scheduler tick is up to date. 35658687acbSDon Zickus */ 357909ea964SChristoph Lameter __this_cpu_write(softlockup_touch_sync, false); 35858687acbSDon Zickus sched_clock_tick(); 35958687acbSDon Zickus } 3605d1c0f4aSEric B Munson 3615d1c0f4aSEric B Munson /* Clear the guest paused flag on watchdog reset */ 3625d1c0f4aSEric B Munson kvm_check_and_clear_guest_paused(); 36358687acbSDon Zickus __touch_watchdog(); 36458687acbSDon Zickus return HRTIMER_RESTART; 36558687acbSDon Zickus } 36658687acbSDon Zickus 36758687acbSDon Zickus /* check for a softlockup 36858687acbSDon Zickus * This is done by making sure a high priority task is 36958687acbSDon Zickus * being scheduled. The task touches the watchdog to 37058687acbSDon Zickus * indicate it is getting cpu time. If it hasn't then 37158687acbSDon Zickus * this is a good indication some task is hogging the cpu 37258687acbSDon Zickus */ 37326e09c6eSDon Zickus duration = is_softlockup(touch_ts); 37458687acbSDon Zickus if (unlikely(duration)) { 3755d1c0f4aSEric B Munson /* 3765d1c0f4aSEric B Munson * If a virtual machine is stopped by the host it can look to 3775d1c0f4aSEric B Munson * the watchdog like a soft lockup, check to see if the host 3785d1c0f4aSEric B Munson * stopped the vm before we issue the warning 3795d1c0f4aSEric B Munson */ 3805d1c0f4aSEric B Munson if (kvm_check_and_clear_guest_paused()) 3815d1c0f4aSEric B Munson return HRTIMER_RESTART; 3825d1c0f4aSEric B Munson 38358687acbSDon Zickus /* only warn once */ 384b1a8de1fSchai wen if (__this_cpu_read(soft_watchdog_warn) == true) { 385b1a8de1fSchai wen /* 386b1a8de1fSchai wen * When multiple processes are causing softlockups the 387b1a8de1fSchai wen * softlockup detector only warns on the first one 388b1a8de1fSchai wen * because the code relies on a full quiet cycle to 389b1a8de1fSchai wen * re-arm. The second process prevents the quiet cycle 390b1a8de1fSchai wen * and never gets reported. Use task pointers to detect 391b1a8de1fSchai wen * this. 392b1a8de1fSchai wen */ 393b1a8de1fSchai wen if (__this_cpu_read(softlockup_task_ptr_saved) != 394b1a8de1fSchai wen current) { 395b1a8de1fSchai wen __this_cpu_write(soft_watchdog_warn, false); 396b1a8de1fSchai wen __touch_watchdog(); 397b1a8de1fSchai wen } 39858687acbSDon Zickus return HRTIMER_RESTART; 399b1a8de1fSchai wen } 40058687acbSDon Zickus 401ed235875SAaron Tomlin if (softlockup_all_cpu_backtrace) { 402ed235875SAaron Tomlin /* Prevent multiple soft-lockup reports if one cpu is already 403ed235875SAaron Tomlin * engaged in dumping cpu back traces 404ed235875SAaron Tomlin */ 405ed235875SAaron Tomlin if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { 406ed235875SAaron Tomlin /* Someone else will report us. Let's give up */ 407ed235875SAaron Tomlin __this_cpu_write(soft_watchdog_warn, true); 408ed235875SAaron Tomlin return HRTIMER_RESTART; 409ed235875SAaron Tomlin } 410ed235875SAaron Tomlin } 411ed235875SAaron Tomlin 412656c3b79SFabian Frederick pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 41326e09c6eSDon Zickus smp_processor_id(), duration, 41458687acbSDon Zickus current->comm, task_pid_nr(current)); 415b1a8de1fSchai wen __this_cpu_write(softlockup_task_ptr_saved, current); 41658687acbSDon Zickus print_modules(); 41758687acbSDon Zickus print_irqtrace_events(current); 41858687acbSDon Zickus if (regs) 41958687acbSDon Zickus show_regs(regs); 42058687acbSDon Zickus else 42158687acbSDon Zickus dump_stack(); 42258687acbSDon Zickus 423ed235875SAaron Tomlin if (softlockup_all_cpu_backtrace) { 424ed235875SAaron Tomlin /* Avoid generating two back traces for current 425ed235875SAaron Tomlin * given that one is already made above 426ed235875SAaron Tomlin */ 427ed235875SAaron Tomlin trigger_allbutself_cpu_backtrace(); 428ed235875SAaron Tomlin 429ed235875SAaron Tomlin clear_bit(0, &soft_lockup_nmi_warn); 430ed235875SAaron Tomlin /* Barrier to sync with other cpus */ 431ed235875SAaron Tomlin smp_mb__after_atomic(); 432ed235875SAaron Tomlin } 433ed235875SAaron Tomlin 43469361eefSJosh Hunt add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 43558687acbSDon Zickus if (softlockup_panic) 43658687acbSDon Zickus panic("softlockup: hung tasks"); 437909ea964SChristoph Lameter __this_cpu_write(soft_watchdog_warn, true); 43858687acbSDon Zickus } else 439909ea964SChristoph Lameter __this_cpu_write(soft_watchdog_warn, false); 44058687acbSDon Zickus 44158687acbSDon Zickus return HRTIMER_RESTART; 44258687acbSDon Zickus } 44358687acbSDon Zickus 444bcd951cfSThomas Gleixner static void watchdog_set_prio(unsigned int policy, unsigned int prio) 44558687acbSDon Zickus { 446bcd951cfSThomas Gleixner struct sched_param param = { .sched_priority = prio }; 447bcd951cfSThomas Gleixner 448bcd951cfSThomas Gleixner sched_setscheduler(current, policy, ¶m); 449bcd951cfSThomas Gleixner } 450bcd951cfSThomas Gleixner 451bcd951cfSThomas Gleixner static void watchdog_enable(unsigned int cpu) 452bcd951cfSThomas Gleixner { 453f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 45458687acbSDon Zickus 4553935e895SBjørn Mork /* kick off the timer for the hardlockup detector */ 4563935e895SBjørn Mork hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4573935e895SBjørn Mork hrtimer->function = watchdog_timer_fn; 4583935e895SBjørn Mork 459bcd951cfSThomas Gleixner /* Enable the perf event */ 460bcd951cfSThomas Gleixner watchdog_nmi_enable(cpu); 46158687acbSDon Zickus 46258687acbSDon Zickus /* done here because hrtimer_start can only pin to smp_processor_id() */ 4630f34c400SChuansheng Liu hrtimer_start(hrtimer, ns_to_ktime(sample_period), 46458687acbSDon Zickus HRTIMER_MODE_REL_PINNED); 46558687acbSDon Zickus 466bcd951cfSThomas Gleixner /* initialize timestamp */ 467bcd951cfSThomas Gleixner watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); 46858687acbSDon Zickus __touch_watchdog(); 46958687acbSDon Zickus } 470bcd951cfSThomas Gleixner 471bcd951cfSThomas Gleixner static void watchdog_disable(unsigned int cpu) 472bcd951cfSThomas Gleixner { 473f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 474bcd951cfSThomas Gleixner 475bcd951cfSThomas Gleixner watchdog_set_prio(SCHED_NORMAL, 0); 476bcd951cfSThomas Gleixner hrtimer_cancel(hrtimer); 477bcd951cfSThomas Gleixner /* disable the perf event */ 478bcd951cfSThomas Gleixner watchdog_nmi_disable(cpu); 479bcd951cfSThomas Gleixner } 480bcd951cfSThomas Gleixner 481b8900bc0SFrederic Weisbecker static void watchdog_cleanup(unsigned int cpu, bool online) 482b8900bc0SFrederic Weisbecker { 483b8900bc0SFrederic Weisbecker watchdog_disable(cpu); 484b8900bc0SFrederic Weisbecker } 485b8900bc0SFrederic Weisbecker 486bcd951cfSThomas Gleixner static int watchdog_should_run(unsigned int cpu) 487bcd951cfSThomas Gleixner { 488bcd951cfSThomas Gleixner return __this_cpu_read(hrtimer_interrupts) != 489bcd951cfSThomas Gleixner __this_cpu_read(soft_lockup_hrtimer_cnt); 490bcd951cfSThomas Gleixner } 491bcd951cfSThomas Gleixner 492b60f796cSAndrew Morton /* 493bcd951cfSThomas Gleixner * The watchdog thread function - touches the timestamp. 494bcd951cfSThomas Gleixner * 4950f34c400SChuansheng Liu * It only runs once every sample_period seconds (4 seconds by 496bcd951cfSThomas Gleixner * default) to reset the softlockup timestamp. If this gets delayed 497bcd951cfSThomas Gleixner * for more than 2*watchdog_thresh seconds then the debug-printout 498bcd951cfSThomas Gleixner * triggers in watchdog_timer_fn(). 499b60f796cSAndrew Morton */ 500bcd951cfSThomas Gleixner static void watchdog(unsigned int cpu) 501bcd951cfSThomas Gleixner { 502bcd951cfSThomas Gleixner __this_cpu_write(soft_lockup_hrtimer_cnt, 503bcd951cfSThomas Gleixner __this_cpu_read(hrtimer_interrupts)); 504bcd951cfSThomas Gleixner __touch_watchdog(); 50558687acbSDon Zickus } 50658687acbSDon Zickus 50723637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 508a7027046SDon Zickus /* 509a7027046SDon Zickus * People like the simple clean cpu node info on boot. 510a7027046SDon Zickus * Reduce the watchdog noise by only printing messages 511a7027046SDon Zickus * that are different from what cpu0 displayed. 512a7027046SDon Zickus */ 513a7027046SDon Zickus static unsigned long cpu0_err; 514a7027046SDon Zickus 515bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu) 51658687acbSDon Zickus { 51758687acbSDon Zickus struct perf_event_attr *wd_attr; 51858687acbSDon Zickus struct perf_event *event = per_cpu(watchdog_ev, cpu); 51958687acbSDon Zickus 5206e7458a6SUlrich Obergfell /* 5216e7458a6SUlrich Obergfell * Some kernels need to default hard lockup detection to 5226e7458a6SUlrich Obergfell * 'disabled', for example a guest on a hypervisor. 5236e7458a6SUlrich Obergfell */ 5246e7458a6SUlrich Obergfell if (!watchdog_hardlockup_detector_is_enabled()) { 5256e7458a6SUlrich Obergfell event = ERR_PTR(-ENOENT); 5266e7458a6SUlrich Obergfell goto handle_err; 5276e7458a6SUlrich Obergfell } 5286e7458a6SUlrich Obergfell 52958687acbSDon Zickus /* is it already setup and enabled? */ 53058687acbSDon Zickus if (event && event->state > PERF_EVENT_STATE_OFF) 53158687acbSDon Zickus goto out; 53258687acbSDon Zickus 53358687acbSDon Zickus /* it is setup but not enabled */ 53458687acbSDon Zickus if (event != NULL) 53558687acbSDon Zickus goto out_enable; 53658687acbSDon Zickus 53758687acbSDon Zickus wd_attr = &wd_hw_attr; 5384eec42f3SMandeep Singh Baines wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); 5391880c4aeSCyrill Gorcunov 5401880c4aeSCyrill Gorcunov /* Try to register using hardware perf events */ 5414dc0da86SAvi Kivity event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 542a7027046SDon Zickus 5436e7458a6SUlrich Obergfell handle_err: 544a7027046SDon Zickus /* save cpu0 error for future comparision */ 545a7027046SDon Zickus if (cpu == 0 && IS_ERR(event)) 546a7027046SDon Zickus cpu0_err = PTR_ERR(event); 547a7027046SDon Zickus 54858687acbSDon Zickus if (!IS_ERR(event)) { 549a7027046SDon Zickus /* only print for cpu0 or different than cpu0 */ 550a7027046SDon Zickus if (cpu == 0 || cpu0_err) 551a7027046SDon Zickus pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); 55258687acbSDon Zickus goto out_save; 55358687acbSDon Zickus } 55458687acbSDon Zickus 555a7027046SDon Zickus /* skip displaying the same error again */ 556a7027046SDon Zickus if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) 557a7027046SDon Zickus return PTR_ERR(event); 5585651f7f4SDon Zickus 5595651f7f4SDon Zickus /* vary the KERN level based on the returned errno */ 5605651f7f4SDon Zickus if (PTR_ERR(event) == -EOPNOTSUPP) 5614501980aSAndrew Morton pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); 5625651f7f4SDon Zickus else if (PTR_ERR(event) == -ENOENT) 563656c3b79SFabian Frederick pr_warn("disabled (cpu%i): hardware events not enabled\n", 5644501980aSAndrew Morton cpu); 5655651f7f4SDon Zickus else 5664501980aSAndrew Morton pr_err("disabled (cpu%i): unable to create perf event: %ld\n", 5674501980aSAndrew Morton cpu, PTR_ERR(event)); 568eac24335SAkinobu Mita return PTR_ERR(event); 56958687acbSDon Zickus 57058687acbSDon Zickus /* success path */ 57158687acbSDon Zickus out_save: 57258687acbSDon Zickus per_cpu(watchdog_ev, cpu) = event; 57358687acbSDon Zickus out_enable: 57458687acbSDon Zickus perf_event_enable(per_cpu(watchdog_ev, cpu)); 57558687acbSDon Zickus out: 57658687acbSDon Zickus return 0; 57758687acbSDon Zickus } 57858687acbSDon Zickus 579bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu) 58058687acbSDon Zickus { 58158687acbSDon Zickus struct perf_event *event = per_cpu(watchdog_ev, cpu); 58258687acbSDon Zickus 58358687acbSDon Zickus if (event) { 58458687acbSDon Zickus perf_event_disable(event); 58558687acbSDon Zickus per_cpu(watchdog_ev, cpu) = NULL; 58658687acbSDon Zickus 58758687acbSDon Zickus /* should be in cleanup, but blocks oprofile */ 58858687acbSDon Zickus perf_event_release_kernel(event); 58958687acbSDon Zickus } 590df577149SUlrich Obergfell if (cpu == 0) { 591df577149SUlrich Obergfell /* watchdog_nmi_enable() expects this to be zero initially. */ 592df577149SUlrich Obergfell cpu0_err = 0; 593df577149SUlrich Obergfell } 59458687acbSDon Zickus } 59558687acbSDon Zickus #else 596bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu) { return 0; } 597bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu) { return; } 59823637d47SFrederic Weisbecker #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 59958687acbSDon Zickus 600b8900bc0SFrederic Weisbecker static struct smp_hotplug_thread watchdog_threads = { 601b8900bc0SFrederic Weisbecker .store = &softlockup_watchdog, 602b8900bc0SFrederic Weisbecker .thread_should_run = watchdog_should_run, 603b8900bc0SFrederic Weisbecker .thread_fn = watchdog, 604b8900bc0SFrederic Weisbecker .thread_comm = "watchdog/%u", 605b8900bc0SFrederic Weisbecker .setup = watchdog_enable, 606b8900bc0SFrederic Weisbecker .cleanup = watchdog_cleanup, 607b8900bc0SFrederic Weisbecker .park = watchdog_disable, 608b8900bc0SFrederic Weisbecker .unpark = watchdog_enable, 609b8900bc0SFrederic Weisbecker }; 610b8900bc0SFrederic Weisbecker 6119809b18fSMichal Hocko static void restart_watchdog_hrtimer(void *info) 6129809b18fSMichal Hocko { 613f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 6149809b18fSMichal Hocko int ret; 6159809b18fSMichal Hocko 6169809b18fSMichal Hocko /* 6179809b18fSMichal Hocko * No need to cancel and restart hrtimer if it is currently executing 6189809b18fSMichal Hocko * because it will reprogram itself with the new period now. 6199809b18fSMichal Hocko * We should never see it unqueued here because we are running per-cpu 6209809b18fSMichal Hocko * with interrupts disabled. 6219809b18fSMichal Hocko */ 6229809b18fSMichal Hocko ret = hrtimer_try_to_cancel(hrtimer); 6239809b18fSMichal Hocko if (ret == 1) 6249809b18fSMichal Hocko hrtimer_start(hrtimer, ns_to_ktime(sample_period), 6259809b18fSMichal Hocko HRTIMER_MODE_REL_PINNED); 6269809b18fSMichal Hocko } 6279809b18fSMichal Hocko 6289809b18fSMichal Hocko static void update_timers(int cpu) 6299809b18fSMichal Hocko { 6309809b18fSMichal Hocko /* 6319809b18fSMichal Hocko * Make sure that perf event counter will adopt to a new 6329809b18fSMichal Hocko * sampling period. Updating the sampling period directly would 6339809b18fSMichal Hocko * be much nicer but we do not have an API for that now so 6349809b18fSMichal Hocko * let's use a big hammer. 6359809b18fSMichal Hocko * Hrtimer will adopt the new period on the next tick but this 6369809b18fSMichal Hocko * might be late already so we have to restart the timer as well. 6379809b18fSMichal Hocko */ 6389809b18fSMichal Hocko watchdog_nmi_disable(cpu); 639e0a23b06SFrederic Weisbecker smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1); 6409809b18fSMichal Hocko watchdog_nmi_enable(cpu); 6419809b18fSMichal Hocko } 6429809b18fSMichal Hocko 6439809b18fSMichal Hocko static void update_timers_all_cpus(void) 6449809b18fSMichal Hocko { 6459809b18fSMichal Hocko int cpu; 6469809b18fSMichal Hocko 6479809b18fSMichal Hocko get_online_cpus(); 6489809b18fSMichal Hocko for_each_online_cpu(cpu) 6499809b18fSMichal Hocko update_timers(cpu); 6509809b18fSMichal Hocko put_online_cpus(); 6519809b18fSMichal Hocko } 6529809b18fSMichal Hocko 6539809b18fSMichal Hocko static int watchdog_enable_all_cpus(bool sample_period_changed) 654b8900bc0SFrederic Weisbecker { 655b8900bc0SFrederic Weisbecker int err = 0; 656b8900bc0SFrederic Weisbecker 6573c00ea82SFrederic Weisbecker if (!watchdog_running) { 658b8900bc0SFrederic Weisbecker err = smpboot_register_percpu_thread(&watchdog_threads); 659b8900bc0SFrederic Weisbecker if (err) 660b8900bc0SFrederic Weisbecker pr_err("Failed to create watchdog threads, disabled\n"); 661b8900bc0SFrederic Weisbecker else 6623c00ea82SFrederic Weisbecker watchdog_running = 1; 6639809b18fSMichal Hocko } else if (sample_period_changed) { 6649809b18fSMichal Hocko update_timers_all_cpus(); 665b8900bc0SFrederic Weisbecker } 666b8900bc0SFrederic Weisbecker 667b8900bc0SFrederic Weisbecker return err; 668b8900bc0SFrederic Weisbecker } 669b8900bc0SFrederic Weisbecker 67058687acbSDon Zickus /* prepare/enable/disable routines */ 6714ff81951SVasily Averin /* sysctl functions */ 6724ff81951SVasily Averin #ifdef CONFIG_SYSCTL 67358687acbSDon Zickus static void watchdog_disable_all_cpus(void) 67458687acbSDon Zickus { 6753c00ea82SFrederic Weisbecker if (watchdog_running) { 6763c00ea82SFrederic Weisbecker watchdog_running = 0; 677b8900bc0SFrederic Weisbecker smpboot_unregister_percpu_thread(&watchdog_threads); 67858687acbSDon Zickus } 679bcd951cfSThomas Gleixner } 68058687acbSDon Zickus 68158687acbSDon Zickus /* 682a0c9cbb9SUlrich Obergfell * Update the run state of the lockup detectors. 683a0c9cbb9SUlrich Obergfell */ 684a0c9cbb9SUlrich Obergfell static int proc_watchdog_update(void) 685a0c9cbb9SUlrich Obergfell { 686a0c9cbb9SUlrich Obergfell int err = 0; 687a0c9cbb9SUlrich Obergfell 688a0c9cbb9SUlrich Obergfell /* 689a0c9cbb9SUlrich Obergfell * Watchdog threads won't be started if they are already active. 690a0c9cbb9SUlrich Obergfell * The 'watchdog_running' variable in watchdog_*_all_cpus() takes 691a0c9cbb9SUlrich Obergfell * care of this. If those threads are already active, the sample 692a0c9cbb9SUlrich Obergfell * period will be updated and the lockup detectors will be enabled 693a0c9cbb9SUlrich Obergfell * or disabled 'on the fly'. 694a0c9cbb9SUlrich Obergfell */ 695a0c9cbb9SUlrich Obergfell if (watchdog_enabled && watchdog_thresh) 696a0c9cbb9SUlrich Obergfell err = watchdog_enable_all_cpus(true); 697a0c9cbb9SUlrich Obergfell else 698a0c9cbb9SUlrich Obergfell watchdog_disable_all_cpus(); 699a0c9cbb9SUlrich Obergfell 700a0c9cbb9SUlrich Obergfell return err; 701a0c9cbb9SUlrich Obergfell 702a0c9cbb9SUlrich Obergfell } 703a0c9cbb9SUlrich Obergfell 704f54c2274SUlrich Obergfell static DEFINE_MUTEX(watchdog_proc_mutex); 705f54c2274SUlrich Obergfell 706a0c9cbb9SUlrich Obergfell /* 707ef246a21SUlrich Obergfell * common function for watchdog, nmi_watchdog and soft_watchdog parameter 708ef246a21SUlrich Obergfell * 709ef246a21SUlrich Obergfell * caller | table->data points to | 'which' contains the flag(s) 710ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 711ef246a21SUlrich Obergfell * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed 712ef246a21SUlrich Obergfell * | | with SOFT_WATCHDOG_ENABLED 713ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 714ef246a21SUlrich Obergfell * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED 715ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 716ef246a21SUlrich Obergfell * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED 717ef246a21SUlrich Obergfell */ 718ef246a21SUlrich Obergfell static int proc_watchdog_common(int which, struct ctl_table *table, int write, 719ef246a21SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 720ef246a21SUlrich Obergfell { 721ef246a21SUlrich Obergfell int err, old, new; 722ef246a21SUlrich Obergfell int *watchdog_param = (int *)table->data; 723ef246a21SUlrich Obergfell 724ef246a21SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 725ef246a21SUlrich Obergfell 726ef246a21SUlrich Obergfell /* 727ef246a21SUlrich Obergfell * If the parameter is being read return the state of the corresponding 728ef246a21SUlrich Obergfell * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the 729ef246a21SUlrich Obergfell * run state of the lockup detectors. 730ef246a21SUlrich Obergfell */ 731ef246a21SUlrich Obergfell if (!write) { 732ef246a21SUlrich Obergfell *watchdog_param = (watchdog_enabled & which) != 0; 733ef246a21SUlrich Obergfell err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 734ef246a21SUlrich Obergfell } else { 735ef246a21SUlrich Obergfell err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 736ef246a21SUlrich Obergfell if (err) 737ef246a21SUlrich Obergfell goto out; 738ef246a21SUlrich Obergfell 739ef246a21SUlrich Obergfell /* 740ef246a21SUlrich Obergfell * There is a race window between fetching the current value 741ef246a21SUlrich Obergfell * from 'watchdog_enabled' and storing the new value. During 742ef246a21SUlrich Obergfell * this race window, watchdog_nmi_enable() can sneak in and 743ef246a21SUlrich Obergfell * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'. 744ef246a21SUlrich Obergfell * The 'cmpxchg' detects this race and the loop retries. 745ef246a21SUlrich Obergfell */ 746ef246a21SUlrich Obergfell do { 747ef246a21SUlrich Obergfell old = watchdog_enabled; 748ef246a21SUlrich Obergfell /* 749ef246a21SUlrich Obergfell * If the parameter value is not zero set the 750ef246a21SUlrich Obergfell * corresponding bit(s), else clear it(them). 751ef246a21SUlrich Obergfell */ 752ef246a21SUlrich Obergfell if (*watchdog_param) 753ef246a21SUlrich Obergfell new = old | which; 754ef246a21SUlrich Obergfell else 755ef246a21SUlrich Obergfell new = old & ~which; 756ef246a21SUlrich Obergfell } while (cmpxchg(&watchdog_enabled, old, new) != old); 757ef246a21SUlrich Obergfell 758ef246a21SUlrich Obergfell /* 759ef246a21SUlrich Obergfell * Update the run state of the lockup detectors. 760ef246a21SUlrich Obergfell * Restore 'watchdog_enabled' on failure. 761ef246a21SUlrich Obergfell */ 762ef246a21SUlrich Obergfell err = proc_watchdog_update(); 763ef246a21SUlrich Obergfell if (err) 764ef246a21SUlrich Obergfell watchdog_enabled = old; 765ef246a21SUlrich Obergfell } 766ef246a21SUlrich Obergfell out: 767ef246a21SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 768ef246a21SUlrich Obergfell return err; 769ef246a21SUlrich Obergfell } 770ef246a21SUlrich Obergfell 771ef246a21SUlrich Obergfell /* 772586692a5SMandeep Singh Baines * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh 77358687acbSDon Zickus */ 77458687acbSDon Zickus 775586692a5SMandeep Singh Baines int proc_dowatchdog(struct ctl_table *table, int write, 776586692a5SMandeep Singh Baines void __user *buffer, size_t *lenp, loff_t *ppos) 77758687acbSDon Zickus { 778b8900bc0SFrederic Weisbecker int err, old_thresh, old_enabled; 7796e7458a6SUlrich Obergfell bool old_hardlockup; 78058687acbSDon Zickus 781359e6fabSMichal Hocko mutex_lock(&watchdog_proc_mutex); 782b8900bc0SFrederic Weisbecker old_thresh = ACCESS_ONCE(watchdog_thresh); 7833c00ea82SFrederic Weisbecker old_enabled = ACCESS_ONCE(watchdog_user_enabled); 7846e7458a6SUlrich Obergfell old_hardlockup = watchdog_hardlockup_detector_is_enabled(); 785bcd951cfSThomas Gleixner 786b8900bc0SFrederic Weisbecker err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 787b8900bc0SFrederic Weisbecker if (err || !write) 788359e6fabSMichal Hocko goto out; 789e04ab2bcSMandeep Singh Baines 7900f34c400SChuansheng Liu set_sample_period(); 791b66a2356Sanish kumar /* 792b66a2356Sanish kumar * Watchdog threads shouldn't be enabled if they are 7933c00ea82SFrederic Weisbecker * disabled. The 'watchdog_running' variable check in 794b66a2356Sanish kumar * watchdog_*_all_cpus() function takes care of this. 795b66a2356Sanish kumar */ 7966e7458a6SUlrich Obergfell if (watchdog_user_enabled && watchdog_thresh) { 7976e7458a6SUlrich Obergfell /* 7986e7458a6SUlrich Obergfell * Prevent a change in watchdog_thresh accidentally overriding 7996e7458a6SUlrich Obergfell * the enablement of the hardlockup detector. 8006e7458a6SUlrich Obergfell */ 8016e7458a6SUlrich Obergfell if (watchdog_user_enabled != old_enabled) 8026e7458a6SUlrich Obergfell watchdog_enable_hardlockup_detector(true); 8039809b18fSMichal Hocko err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); 8046e7458a6SUlrich Obergfell } else 80558687acbSDon Zickus watchdog_disable_all_cpus(); 806e04ab2bcSMandeep Singh Baines 807b8900bc0SFrederic Weisbecker /* Restore old values on failure */ 808b8900bc0SFrederic Weisbecker if (err) { 809b8900bc0SFrederic Weisbecker watchdog_thresh = old_thresh; 8103c00ea82SFrederic Weisbecker watchdog_user_enabled = old_enabled; 8116e7458a6SUlrich Obergfell watchdog_enable_hardlockup_detector(old_hardlockup); 812b8900bc0SFrederic Weisbecker } 813359e6fabSMichal Hocko out: 814359e6fabSMichal Hocko mutex_unlock(&watchdog_proc_mutex); 815b8900bc0SFrederic Weisbecker return err; 81658687acbSDon Zickus } 81758687acbSDon Zickus #endif /* CONFIG_SYSCTL */ 81858687acbSDon Zickus 819004417a6SPeter Zijlstra void __init lockup_detector_init(void) 82058687acbSDon Zickus { 8210f34c400SChuansheng Liu set_sample_period(); 822b8900bc0SFrederic Weisbecker 8233c00ea82SFrederic Weisbecker if (watchdog_user_enabled) 8249809b18fSMichal Hocko watchdog_enable_all_cpus(false); 82558687acbSDon Zickus } 826