158687acbSDon Zickus /* 258687acbSDon Zickus * Detect hard and soft lockups on a system 358687acbSDon Zickus * 458687acbSDon Zickus * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 558687acbSDon Zickus * 686f5e6a7SFernando Luis Vázquez Cao * Note: Most of this code is borrowed heavily from the original softlockup 786f5e6a7SFernando Luis Vázquez Cao * detector, so thanks to Ingo for the initial implementation. 886f5e6a7SFernando Luis Vázquez Cao * Some chunks also taken from the old x86-specific nmi watchdog code, thanks 958687acbSDon Zickus * to those contributors as well. 1058687acbSDon Zickus */ 1158687acbSDon Zickus 124501980aSAndrew Morton #define pr_fmt(fmt) "NMI watchdog: " fmt 134501980aSAndrew Morton 1458687acbSDon Zickus #include <linux/mm.h> 1558687acbSDon Zickus #include <linux/cpu.h> 1658687acbSDon Zickus #include <linux/nmi.h> 1758687acbSDon Zickus #include <linux/init.h> 1858687acbSDon Zickus #include <linux/module.h> 1958687acbSDon Zickus #include <linux/sysctl.h> 20bcd951cfSThomas Gleixner #include <linux/smpboot.h> 218bd75c77SClark Williams #include <linux/sched/rt.h> 22fe4ba3c3SChris Metcalf #include <linux/tick.h> 2358687acbSDon Zickus 2458687acbSDon Zickus #include <asm/irq_regs.h> 255d1c0f4aSEric B Munson #include <linux/kvm_para.h> 2658687acbSDon Zickus #include <linux/perf_event.h> 2781a4beefSUlrich Obergfell #include <linux/kthread.h> 2858687acbSDon Zickus 2984d56e66SUlrich Obergfell /* 3084d56e66SUlrich Obergfell * The run state of the lockup detectors is controlled by the content of the 3184d56e66SUlrich Obergfell * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 3284d56e66SUlrich Obergfell * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 3384d56e66SUlrich Obergfell * 3484d56e66SUlrich Obergfell * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 3584d56e66SUlrich Obergfell * are variables that are only used as an 'interface' between the parameters 3684d56e66SUlrich Obergfell * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 3784d56e66SUlrich Obergfell * 'watchdog_thresh' variable is handled differently because its value is not 3884d56e66SUlrich Obergfell * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 3984d56e66SUlrich Obergfell * is equal zero. 4084d56e66SUlrich Obergfell */ 4184d56e66SUlrich Obergfell #define NMI_WATCHDOG_ENABLED_BIT 0 4284d56e66SUlrich Obergfell #define SOFT_WATCHDOG_ENABLED_BIT 1 4384d56e66SUlrich Obergfell #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) 4484d56e66SUlrich Obergfell #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) 4584d56e66SUlrich Obergfell 46ab992dc3SPeter Zijlstra static DEFINE_MUTEX(watchdog_proc_mutex); 47ab992dc3SPeter Zijlstra 4884d56e66SUlrich Obergfell #ifdef CONFIG_HARDLOCKUP_DETECTOR 4984d56e66SUlrich Obergfell static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; 5084d56e66SUlrich Obergfell #else 5184d56e66SUlrich Obergfell static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; 5284d56e66SUlrich Obergfell #endif 5384d56e66SUlrich Obergfell int __read_mostly nmi_watchdog_enabled; 5484d56e66SUlrich Obergfell int __read_mostly soft_watchdog_enabled; 5584d56e66SUlrich Obergfell int __read_mostly watchdog_user_enabled; 564eec42f3SMandeep Singh Baines int __read_mostly watchdog_thresh = 10; 5784d56e66SUlrich Obergfell 58ed235875SAaron Tomlin #ifdef CONFIG_SMP 59ed235875SAaron Tomlin int __read_mostly sysctl_softlockup_all_cpu_backtrace; 60ed235875SAaron Tomlin #else 61ed235875SAaron Tomlin #define sysctl_softlockup_all_cpu_backtrace 0 62ed235875SAaron Tomlin #endif 63fe4ba3c3SChris Metcalf static struct cpumask watchdog_cpumask __read_mostly; 64fe4ba3c3SChris Metcalf unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); 65fe4ba3c3SChris Metcalf 66fe4ba3c3SChris Metcalf /* Helper for online, unparked cpus. */ 67fe4ba3c3SChris Metcalf #define for_each_watchdog_cpu(cpu) \ 68fe4ba3c3SChris Metcalf for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 69ed235875SAaron Tomlin 70ec6a9066SUlrich Obergfell /* 71ec6a9066SUlrich Obergfell * The 'watchdog_running' variable is set to 1 when the watchdog threads 72ec6a9066SUlrich Obergfell * are registered/started and is set to 0 when the watchdog threads are 73ec6a9066SUlrich Obergfell * unregistered/stopped, so it is an indicator whether the threads exist. 74ec6a9066SUlrich Obergfell */ 753c00ea82SFrederic Weisbecker static int __read_mostly watchdog_running; 76ec6a9066SUlrich Obergfell /* 77ec6a9066SUlrich Obergfell * If a subsystem has a need to deactivate the watchdog temporarily, it 78ec6a9066SUlrich Obergfell * can use the suspend/resume interface to achieve this. The content of 79ec6a9066SUlrich Obergfell * the 'watchdog_suspended' variable reflects this state. Existing threads 80ec6a9066SUlrich Obergfell * are parked/unparked by the lockup_detector_{suspend|resume} functions 81ec6a9066SUlrich Obergfell * (see comment blocks pertaining to those functions for further details). 82ec6a9066SUlrich Obergfell * 83ec6a9066SUlrich Obergfell * 'watchdog_suspended' also prevents threads from being registered/started 84ec6a9066SUlrich Obergfell * or unregistered/stopped via parameters in /proc/sys/kernel, so the state 85ec6a9066SUlrich Obergfell * of 'watchdog_running' cannot change while the watchdog is deactivated 86ec6a9066SUlrich Obergfell * temporarily (see related code in 'proc' handlers). 87ec6a9066SUlrich Obergfell */ 88ec6a9066SUlrich Obergfell static int __read_mostly watchdog_suspended; 89ec6a9066SUlrich Obergfell 900f34c400SChuansheng Liu static u64 __read_mostly sample_period; 9158687acbSDon Zickus 9258687acbSDon Zickus static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 9358687acbSDon Zickus static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 9458687acbSDon Zickus static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); 9558687acbSDon Zickus static DEFINE_PER_CPU(bool, softlockup_touch_sync); 9658687acbSDon Zickus static DEFINE_PER_CPU(bool, soft_watchdog_warn); 97bcd951cfSThomas Gleixner static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); 98bcd951cfSThomas Gleixner static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); 99b1a8de1fSchai wen static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); 10023637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 101cafcd80dSDon Zickus static DEFINE_PER_CPU(bool, hard_watchdog_warn); 102cafcd80dSDon Zickus static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 10358687acbSDon Zickus static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 10458687acbSDon Zickus static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 10558687acbSDon Zickus #endif 106ed235875SAaron Tomlin static unsigned long soft_lockup_nmi_warn; 10758687acbSDon Zickus 10858687acbSDon Zickus /* boot commands */ 10958687acbSDon Zickus /* 11058687acbSDon Zickus * Should we panic when a soft-lockup or hard-lockup occurs: 11158687acbSDon Zickus */ 11223637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 113fef2c9bcSDon Zickus static int hardlockup_panic = 114fef2c9bcSDon Zickus CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; 1156e7458a6SUlrich Obergfell /* 1166e7458a6SUlrich Obergfell * We may not want to enable hard lockup detection by default in all cases, 1176e7458a6SUlrich Obergfell * for example when running the kernel as a guest on a hypervisor. In these 1186e7458a6SUlrich Obergfell * cases this function can be called to disable hard lockup detection. This 1196e7458a6SUlrich Obergfell * function should only be executed once by the boot processor before the 1206e7458a6SUlrich Obergfell * kernel command line parameters are parsed, because otherwise it is not 1216e7458a6SUlrich Obergfell * possible to override this in hardlockup_panic_setup(). 1226e7458a6SUlrich Obergfell */ 123692297d8SUlrich Obergfell void hardlockup_detector_disable(void) 1246e7458a6SUlrich Obergfell { 125692297d8SUlrich Obergfell watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; 1266e7458a6SUlrich Obergfell } 1276e7458a6SUlrich Obergfell 12858687acbSDon Zickus static int __init hardlockup_panic_setup(char *str) 12958687acbSDon Zickus { 13058687acbSDon Zickus if (!strncmp(str, "panic", 5)) 13158687acbSDon Zickus hardlockup_panic = 1; 132fef2c9bcSDon Zickus else if (!strncmp(str, "nopanic", 7)) 133fef2c9bcSDon Zickus hardlockup_panic = 0; 1345dc30558SDon Zickus else if (!strncmp(str, "0", 1)) 135195daf66SUlrich Obergfell watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; 136195daf66SUlrich Obergfell else if (!strncmp(str, "1", 1)) 137195daf66SUlrich Obergfell watchdog_enabled |= NMI_WATCHDOG_ENABLED; 13858687acbSDon Zickus return 1; 13958687acbSDon Zickus } 14058687acbSDon Zickus __setup("nmi_watchdog=", hardlockup_panic_setup); 14158687acbSDon Zickus #endif 14258687acbSDon Zickus 14358687acbSDon Zickus unsigned int __read_mostly softlockup_panic = 14458687acbSDon Zickus CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; 14558687acbSDon Zickus 14658687acbSDon Zickus static int __init softlockup_panic_setup(char *str) 14758687acbSDon Zickus { 14858687acbSDon Zickus softlockup_panic = simple_strtoul(str, NULL, 0); 14958687acbSDon Zickus 15058687acbSDon Zickus return 1; 15158687acbSDon Zickus } 15258687acbSDon Zickus __setup("softlockup_panic=", softlockup_panic_setup); 15358687acbSDon Zickus 15458687acbSDon Zickus static int __init nowatchdog_setup(char *str) 15558687acbSDon Zickus { 156195daf66SUlrich Obergfell watchdog_enabled = 0; 15758687acbSDon Zickus return 1; 15858687acbSDon Zickus } 15958687acbSDon Zickus __setup("nowatchdog", nowatchdog_setup); 16058687acbSDon Zickus 16158687acbSDon Zickus static int __init nosoftlockup_setup(char *str) 16258687acbSDon Zickus { 163195daf66SUlrich Obergfell watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED; 16458687acbSDon Zickus return 1; 16558687acbSDon Zickus } 16658687acbSDon Zickus __setup("nosoftlockup", nosoftlockup_setup); 167195daf66SUlrich Obergfell 168ed235875SAaron Tomlin #ifdef CONFIG_SMP 169ed235875SAaron Tomlin static int __init softlockup_all_cpu_backtrace_setup(char *str) 170ed235875SAaron Tomlin { 171ed235875SAaron Tomlin sysctl_softlockup_all_cpu_backtrace = 172ed235875SAaron Tomlin !!simple_strtol(str, NULL, 0); 173ed235875SAaron Tomlin return 1; 174ed235875SAaron Tomlin } 175ed235875SAaron Tomlin __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); 176ed235875SAaron Tomlin #endif 17758687acbSDon Zickus 1784eec42f3SMandeep Singh Baines /* 1794eec42f3SMandeep Singh Baines * Hard-lockup warnings should be triggered after just a few seconds. Soft- 1804eec42f3SMandeep Singh Baines * lockups can have false positives under extreme conditions. So we generally 1814eec42f3SMandeep Singh Baines * want a higher threshold for soft lockups than for hard lockups. So we couple 1824eec42f3SMandeep Singh Baines * the thresholds with a factor: we make the soft threshold twice the amount of 1834eec42f3SMandeep Singh Baines * time the hard threshold is. 1844eec42f3SMandeep Singh Baines */ 1856e9101aeSIngo Molnar static int get_softlockup_thresh(void) 1864eec42f3SMandeep Singh Baines { 1874eec42f3SMandeep Singh Baines return watchdog_thresh * 2; 1884eec42f3SMandeep Singh Baines } 18958687acbSDon Zickus 19058687acbSDon Zickus /* 19158687acbSDon Zickus * Returns seconds, approximately. We don't need nanosecond 19258687acbSDon Zickus * resolution, and we don't need to waste time with a big divide when 19358687acbSDon Zickus * 2^30ns == 1.074s. 19458687acbSDon Zickus */ 195c06b4f19SNamhyung Kim static unsigned long get_timestamp(void) 19658687acbSDon Zickus { 197545a2bf7SCyril Bur return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ 19858687acbSDon Zickus } 19958687acbSDon Zickus 2000f34c400SChuansheng Liu static void set_sample_period(void) 20158687acbSDon Zickus { 20258687acbSDon Zickus /* 203586692a5SMandeep Singh Baines * convert watchdog_thresh from seconds to ns 20486f5e6a7SFernando Luis Vázquez Cao * the divide by 5 is to give hrtimer several chances (two 20586f5e6a7SFernando Luis Vázquez Cao * or three with the current relation between the soft 20686f5e6a7SFernando Luis Vázquez Cao * and hard thresholds) to increment before the 20786f5e6a7SFernando Luis Vázquez Cao * hardlockup detector generates a warning 20858687acbSDon Zickus */ 2090f34c400SChuansheng Liu sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); 21058687acbSDon Zickus } 21158687acbSDon Zickus 21258687acbSDon Zickus /* Commands for resetting the watchdog */ 21358687acbSDon Zickus static void __touch_watchdog(void) 21458687acbSDon Zickus { 215c06b4f19SNamhyung Kim __this_cpu_write(watchdog_touch_ts, get_timestamp()); 21658687acbSDon Zickus } 21758687acbSDon Zickus 218332fbdbcSDon Zickus void touch_softlockup_watchdog(void) 21958687acbSDon Zickus { 2207861144bSAndrew Morton /* 2217861144bSAndrew Morton * Preemption can be enabled. It doesn't matter which CPU's timestamp 2227861144bSAndrew Morton * gets zeroed here, so use the raw_ operation. 2237861144bSAndrew Morton */ 2247861144bSAndrew Morton raw_cpu_write(watchdog_touch_ts, 0); 22558687acbSDon Zickus } 2260167c781SIngo Molnar EXPORT_SYMBOL(touch_softlockup_watchdog); 22758687acbSDon Zickus 228332fbdbcSDon Zickus void touch_all_softlockup_watchdogs(void) 22958687acbSDon Zickus { 23058687acbSDon Zickus int cpu; 23158687acbSDon Zickus 23258687acbSDon Zickus /* 23358687acbSDon Zickus * this is done lockless 23458687acbSDon Zickus * do we care if a 0 races with a timestamp? 23558687acbSDon Zickus * all it means is the softlock check starts one cycle later 23658687acbSDon Zickus */ 237fe4ba3c3SChris Metcalf for_each_watchdog_cpu(cpu) 23858687acbSDon Zickus per_cpu(watchdog_touch_ts, cpu) = 0; 23958687acbSDon Zickus } 24058687acbSDon Zickus 241cafcd80dSDon Zickus #ifdef CONFIG_HARDLOCKUP_DETECTOR 24258687acbSDon Zickus void touch_nmi_watchdog(void) 24358687acbSDon Zickus { 24462572e29SBen Zhang /* 24562572e29SBen Zhang * Using __raw here because some code paths have 24662572e29SBen Zhang * preemption enabled. If preemption is enabled 24762572e29SBen Zhang * then interrupts should be enabled too, in which 24862572e29SBen Zhang * case we shouldn't have to worry about the watchdog 24962572e29SBen Zhang * going off. 25062572e29SBen Zhang */ 251f7f66b05SChristoph Lameter raw_cpu_write(watchdog_nmi_touch, true); 252332fbdbcSDon Zickus touch_softlockup_watchdog(); 25358687acbSDon Zickus } 25458687acbSDon Zickus EXPORT_SYMBOL(touch_nmi_watchdog); 25558687acbSDon Zickus 256cafcd80dSDon Zickus #endif 257cafcd80dSDon Zickus 25858687acbSDon Zickus void touch_softlockup_watchdog_sync(void) 25958687acbSDon Zickus { 260f7f66b05SChristoph Lameter __this_cpu_write(softlockup_touch_sync, true); 261f7f66b05SChristoph Lameter __this_cpu_write(watchdog_touch_ts, 0); 26258687acbSDon Zickus } 26358687acbSDon Zickus 26423637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 26558687acbSDon Zickus /* watchdog detector functions */ 266451637e4SYaowei Bai static bool is_hardlockup(void) 26758687acbSDon Zickus { 268909ea964SChristoph Lameter unsigned long hrint = __this_cpu_read(hrtimer_interrupts); 26958687acbSDon Zickus 270909ea964SChristoph Lameter if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) 271451637e4SYaowei Bai return true; 27258687acbSDon Zickus 273909ea964SChristoph Lameter __this_cpu_write(hrtimer_interrupts_saved, hrint); 274451637e4SYaowei Bai return false; 27558687acbSDon Zickus } 27658687acbSDon Zickus #endif 27758687acbSDon Zickus 27826e09c6eSDon Zickus static int is_softlockup(unsigned long touch_ts) 27958687acbSDon Zickus { 280c06b4f19SNamhyung Kim unsigned long now = get_timestamp(); 28158687acbSDon Zickus 282195daf66SUlrich Obergfell if (watchdog_enabled & SOFT_WATCHDOG_ENABLED) { 283195daf66SUlrich Obergfell /* Warn about unreasonable delays. */ 2844eec42f3SMandeep Singh Baines if (time_after(now, touch_ts + get_softlockup_thresh())) 28558687acbSDon Zickus return now - touch_ts; 286195daf66SUlrich Obergfell } 28758687acbSDon Zickus return 0; 28858687acbSDon Zickus } 28958687acbSDon Zickus 29023637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 2911880c4aeSCyrill Gorcunov 29258687acbSDon Zickus static struct perf_event_attr wd_hw_attr = { 29358687acbSDon Zickus .type = PERF_TYPE_HARDWARE, 29458687acbSDon Zickus .config = PERF_COUNT_HW_CPU_CYCLES, 29558687acbSDon Zickus .size = sizeof(struct perf_event_attr), 29658687acbSDon Zickus .pinned = 1, 29758687acbSDon Zickus .disabled = 1, 29858687acbSDon Zickus }; 29958687acbSDon Zickus 30058687acbSDon Zickus /* Callback function for perf event subsystem */ 301a8b0ca17SPeter Zijlstra static void watchdog_overflow_callback(struct perf_event *event, 30258687acbSDon Zickus struct perf_sample_data *data, 30358687acbSDon Zickus struct pt_regs *regs) 30458687acbSDon Zickus { 305c6db67cdSPeter Zijlstra /* Ensure the watchdog never gets throttled */ 306c6db67cdSPeter Zijlstra event->hw.interrupts = 0; 307c6db67cdSPeter Zijlstra 308909ea964SChristoph Lameter if (__this_cpu_read(watchdog_nmi_touch) == true) { 309909ea964SChristoph Lameter __this_cpu_write(watchdog_nmi_touch, false); 31058687acbSDon Zickus return; 31158687acbSDon Zickus } 31258687acbSDon Zickus 31358687acbSDon Zickus /* check for a hardlockup 31458687acbSDon Zickus * This is done by making sure our timer interrupt 31558687acbSDon Zickus * is incrementing. The timer interrupt should have 31658687acbSDon Zickus * fired multiple times before we overflow'd. If it hasn't 31758687acbSDon Zickus * then this is a good indication the cpu is stuck 31858687acbSDon Zickus */ 31926e09c6eSDon Zickus if (is_hardlockup()) { 32026e09c6eSDon Zickus int this_cpu = smp_processor_id(); 32126e09c6eSDon Zickus 32258687acbSDon Zickus /* only print hardlockups once */ 323909ea964SChristoph Lameter if (__this_cpu_read(hard_watchdog_warn) == true) 32458687acbSDon Zickus return; 32558687acbSDon Zickus 32658687acbSDon Zickus if (hardlockup_panic) 327656c3b79SFabian Frederick panic("Watchdog detected hard LOCKUP on cpu %d", 328656c3b79SFabian Frederick this_cpu); 32958687acbSDon Zickus else 330656c3b79SFabian Frederick WARN(1, "Watchdog detected hard LOCKUP on cpu %d", 331656c3b79SFabian Frederick this_cpu); 33258687acbSDon Zickus 333909ea964SChristoph Lameter __this_cpu_write(hard_watchdog_warn, true); 33458687acbSDon Zickus return; 33558687acbSDon Zickus } 33658687acbSDon Zickus 337909ea964SChristoph Lameter __this_cpu_write(hard_watchdog_warn, false); 33858687acbSDon Zickus return; 33958687acbSDon Zickus } 340bcd951cfSThomas Gleixner #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 341bcd951cfSThomas Gleixner 34258687acbSDon Zickus static void watchdog_interrupt_count(void) 34358687acbSDon Zickus { 344909ea964SChristoph Lameter __this_cpu_inc(hrtimer_interrupts); 34558687acbSDon Zickus } 346bcd951cfSThomas Gleixner 347bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu); 348bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu); 34958687acbSDon Zickus 35058687acbSDon Zickus /* watchdog kicker functions */ 35158687acbSDon Zickus static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 35258687acbSDon Zickus { 353909ea964SChristoph Lameter unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); 35458687acbSDon Zickus struct pt_regs *regs = get_irq_regs(); 35558687acbSDon Zickus int duration; 356ed235875SAaron Tomlin int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 35758687acbSDon Zickus 35858687acbSDon Zickus /* kick the hardlockup detector */ 35958687acbSDon Zickus watchdog_interrupt_count(); 36058687acbSDon Zickus 36158687acbSDon Zickus /* kick the softlockup detector */ 362909ea964SChristoph Lameter wake_up_process(__this_cpu_read(softlockup_watchdog)); 36358687acbSDon Zickus 36458687acbSDon Zickus /* .. and repeat */ 3650f34c400SChuansheng Liu hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); 36658687acbSDon Zickus 36758687acbSDon Zickus if (touch_ts == 0) { 368909ea964SChristoph Lameter if (unlikely(__this_cpu_read(softlockup_touch_sync))) { 36958687acbSDon Zickus /* 37058687acbSDon Zickus * If the time stamp was touched atomically 37158687acbSDon Zickus * make sure the scheduler tick is up to date. 37258687acbSDon Zickus */ 373909ea964SChristoph Lameter __this_cpu_write(softlockup_touch_sync, false); 37458687acbSDon Zickus sched_clock_tick(); 37558687acbSDon Zickus } 3765d1c0f4aSEric B Munson 3775d1c0f4aSEric B Munson /* Clear the guest paused flag on watchdog reset */ 3785d1c0f4aSEric B Munson kvm_check_and_clear_guest_paused(); 37958687acbSDon Zickus __touch_watchdog(); 38058687acbSDon Zickus return HRTIMER_RESTART; 38158687acbSDon Zickus } 38258687acbSDon Zickus 38358687acbSDon Zickus /* check for a softlockup 38458687acbSDon Zickus * This is done by making sure a high priority task is 38558687acbSDon Zickus * being scheduled. The task touches the watchdog to 38658687acbSDon Zickus * indicate it is getting cpu time. If it hasn't then 38758687acbSDon Zickus * this is a good indication some task is hogging the cpu 38858687acbSDon Zickus */ 38926e09c6eSDon Zickus duration = is_softlockup(touch_ts); 39058687acbSDon Zickus if (unlikely(duration)) { 3915d1c0f4aSEric B Munson /* 3925d1c0f4aSEric B Munson * If a virtual machine is stopped by the host it can look to 3935d1c0f4aSEric B Munson * the watchdog like a soft lockup, check to see if the host 3945d1c0f4aSEric B Munson * stopped the vm before we issue the warning 3955d1c0f4aSEric B Munson */ 3965d1c0f4aSEric B Munson if (kvm_check_and_clear_guest_paused()) 3975d1c0f4aSEric B Munson return HRTIMER_RESTART; 3985d1c0f4aSEric B Munson 39958687acbSDon Zickus /* only warn once */ 400b1a8de1fSchai wen if (__this_cpu_read(soft_watchdog_warn) == true) { 401b1a8de1fSchai wen /* 402b1a8de1fSchai wen * When multiple processes are causing softlockups the 403b1a8de1fSchai wen * softlockup detector only warns on the first one 404b1a8de1fSchai wen * because the code relies on a full quiet cycle to 405b1a8de1fSchai wen * re-arm. The second process prevents the quiet cycle 406b1a8de1fSchai wen * and never gets reported. Use task pointers to detect 407b1a8de1fSchai wen * this. 408b1a8de1fSchai wen */ 409b1a8de1fSchai wen if (__this_cpu_read(softlockup_task_ptr_saved) != 410b1a8de1fSchai wen current) { 411b1a8de1fSchai wen __this_cpu_write(soft_watchdog_warn, false); 412b1a8de1fSchai wen __touch_watchdog(); 413b1a8de1fSchai wen } 41458687acbSDon Zickus return HRTIMER_RESTART; 415b1a8de1fSchai wen } 41658687acbSDon Zickus 417ed235875SAaron Tomlin if (softlockup_all_cpu_backtrace) { 418ed235875SAaron Tomlin /* Prevent multiple soft-lockup reports if one cpu is already 419ed235875SAaron Tomlin * engaged in dumping cpu back traces 420ed235875SAaron Tomlin */ 421ed235875SAaron Tomlin if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { 422ed235875SAaron Tomlin /* Someone else will report us. Let's give up */ 423ed235875SAaron Tomlin __this_cpu_write(soft_watchdog_warn, true); 424ed235875SAaron Tomlin return HRTIMER_RESTART; 425ed235875SAaron Tomlin } 426ed235875SAaron Tomlin } 427ed235875SAaron Tomlin 428656c3b79SFabian Frederick pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 42926e09c6eSDon Zickus smp_processor_id(), duration, 43058687acbSDon Zickus current->comm, task_pid_nr(current)); 431b1a8de1fSchai wen __this_cpu_write(softlockup_task_ptr_saved, current); 43258687acbSDon Zickus print_modules(); 43358687acbSDon Zickus print_irqtrace_events(current); 43458687acbSDon Zickus if (regs) 43558687acbSDon Zickus show_regs(regs); 43658687acbSDon Zickus else 43758687acbSDon Zickus dump_stack(); 43858687acbSDon Zickus 439ed235875SAaron Tomlin if (softlockup_all_cpu_backtrace) { 440ed235875SAaron Tomlin /* Avoid generating two back traces for current 441ed235875SAaron Tomlin * given that one is already made above 442ed235875SAaron Tomlin */ 443ed235875SAaron Tomlin trigger_allbutself_cpu_backtrace(); 444ed235875SAaron Tomlin 445ed235875SAaron Tomlin clear_bit(0, &soft_lockup_nmi_warn); 446ed235875SAaron Tomlin /* Barrier to sync with other cpus */ 447ed235875SAaron Tomlin smp_mb__after_atomic(); 448ed235875SAaron Tomlin } 449ed235875SAaron Tomlin 45069361eefSJosh Hunt add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 45158687acbSDon Zickus if (softlockup_panic) 45258687acbSDon Zickus panic("softlockup: hung tasks"); 453909ea964SChristoph Lameter __this_cpu_write(soft_watchdog_warn, true); 45458687acbSDon Zickus } else 455909ea964SChristoph Lameter __this_cpu_write(soft_watchdog_warn, false); 45658687acbSDon Zickus 45758687acbSDon Zickus return HRTIMER_RESTART; 45858687acbSDon Zickus } 45958687acbSDon Zickus 460bcd951cfSThomas Gleixner static void watchdog_set_prio(unsigned int policy, unsigned int prio) 46158687acbSDon Zickus { 462bcd951cfSThomas Gleixner struct sched_param param = { .sched_priority = prio }; 463bcd951cfSThomas Gleixner 464bcd951cfSThomas Gleixner sched_setscheduler(current, policy, ¶m); 465bcd951cfSThomas Gleixner } 466bcd951cfSThomas Gleixner 467bcd951cfSThomas Gleixner static void watchdog_enable(unsigned int cpu) 468bcd951cfSThomas Gleixner { 469f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 47058687acbSDon Zickus 4713935e895SBjørn Mork /* kick off the timer for the hardlockup detector */ 4723935e895SBjørn Mork hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4733935e895SBjørn Mork hrtimer->function = watchdog_timer_fn; 4743935e895SBjørn Mork 475bcd951cfSThomas Gleixner /* Enable the perf event */ 476bcd951cfSThomas Gleixner watchdog_nmi_enable(cpu); 47758687acbSDon Zickus 47858687acbSDon Zickus /* done here because hrtimer_start can only pin to smp_processor_id() */ 4790f34c400SChuansheng Liu hrtimer_start(hrtimer, ns_to_ktime(sample_period), 48058687acbSDon Zickus HRTIMER_MODE_REL_PINNED); 48158687acbSDon Zickus 482bcd951cfSThomas Gleixner /* initialize timestamp */ 483bcd951cfSThomas Gleixner watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); 48458687acbSDon Zickus __touch_watchdog(); 48558687acbSDon Zickus } 486bcd951cfSThomas Gleixner 487bcd951cfSThomas Gleixner static void watchdog_disable(unsigned int cpu) 488bcd951cfSThomas Gleixner { 489f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 490bcd951cfSThomas Gleixner 491bcd951cfSThomas Gleixner watchdog_set_prio(SCHED_NORMAL, 0); 492bcd951cfSThomas Gleixner hrtimer_cancel(hrtimer); 493bcd951cfSThomas Gleixner /* disable the perf event */ 494bcd951cfSThomas Gleixner watchdog_nmi_disable(cpu); 495bcd951cfSThomas Gleixner } 496bcd951cfSThomas Gleixner 497b8900bc0SFrederic Weisbecker static void watchdog_cleanup(unsigned int cpu, bool online) 498b8900bc0SFrederic Weisbecker { 499b8900bc0SFrederic Weisbecker watchdog_disable(cpu); 500b8900bc0SFrederic Weisbecker } 501b8900bc0SFrederic Weisbecker 502bcd951cfSThomas Gleixner static int watchdog_should_run(unsigned int cpu) 503bcd951cfSThomas Gleixner { 504bcd951cfSThomas Gleixner return __this_cpu_read(hrtimer_interrupts) != 505bcd951cfSThomas Gleixner __this_cpu_read(soft_lockup_hrtimer_cnt); 506bcd951cfSThomas Gleixner } 507bcd951cfSThomas Gleixner 508b60f796cSAndrew Morton /* 509bcd951cfSThomas Gleixner * The watchdog thread function - touches the timestamp. 510bcd951cfSThomas Gleixner * 5110f34c400SChuansheng Liu * It only runs once every sample_period seconds (4 seconds by 512bcd951cfSThomas Gleixner * default) to reset the softlockup timestamp. If this gets delayed 513bcd951cfSThomas Gleixner * for more than 2*watchdog_thresh seconds then the debug-printout 514bcd951cfSThomas Gleixner * triggers in watchdog_timer_fn(). 515b60f796cSAndrew Morton */ 516bcd951cfSThomas Gleixner static void watchdog(unsigned int cpu) 517bcd951cfSThomas Gleixner { 518bcd951cfSThomas Gleixner __this_cpu_write(soft_lockup_hrtimer_cnt, 519bcd951cfSThomas Gleixner __this_cpu_read(hrtimer_interrupts)); 520bcd951cfSThomas Gleixner __touch_watchdog(); 521bcfba4f4SUlrich Obergfell 522bcfba4f4SUlrich Obergfell /* 523bcfba4f4SUlrich Obergfell * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the 524bcfba4f4SUlrich Obergfell * failure path. Check for failures that can occur asynchronously - 525bcfba4f4SUlrich Obergfell * for example, when CPUs are on-lined - and shut down the hardware 526bcfba4f4SUlrich Obergfell * perf event on each CPU accordingly. 527bcfba4f4SUlrich Obergfell * 528bcfba4f4SUlrich Obergfell * The only non-obvious place this bit can be cleared is through 529bcfba4f4SUlrich Obergfell * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a 530bcfba4f4SUlrich Obergfell * pr_info here would be too noisy as it would result in a message 531bcfba4f4SUlrich Obergfell * every few seconds if the hardlockup was disabled but the softlockup 532bcfba4f4SUlrich Obergfell * enabled. 533bcfba4f4SUlrich Obergfell */ 534bcfba4f4SUlrich Obergfell if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) 535bcfba4f4SUlrich Obergfell watchdog_nmi_disable(cpu); 53658687acbSDon Zickus } 53758687acbSDon Zickus 53823637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 539a7027046SDon Zickus /* 540a7027046SDon Zickus * People like the simple clean cpu node info on boot. 541a7027046SDon Zickus * Reduce the watchdog noise by only printing messages 542a7027046SDon Zickus * that are different from what cpu0 displayed. 543a7027046SDon Zickus */ 544a7027046SDon Zickus static unsigned long cpu0_err; 545a7027046SDon Zickus 546bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu) 54758687acbSDon Zickus { 54858687acbSDon Zickus struct perf_event_attr *wd_attr; 54958687acbSDon Zickus struct perf_event *event = per_cpu(watchdog_ev, cpu); 55058687acbSDon Zickus 551195daf66SUlrich Obergfell /* nothing to do if the hard lockup detector is disabled */ 552195daf66SUlrich Obergfell if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) 553195daf66SUlrich Obergfell goto out; 5546e7458a6SUlrich Obergfell 55558687acbSDon Zickus /* is it already setup and enabled? */ 55658687acbSDon Zickus if (event && event->state > PERF_EVENT_STATE_OFF) 55758687acbSDon Zickus goto out; 55858687acbSDon Zickus 55958687acbSDon Zickus /* it is setup but not enabled */ 56058687acbSDon Zickus if (event != NULL) 56158687acbSDon Zickus goto out_enable; 56258687acbSDon Zickus 56358687acbSDon Zickus wd_attr = &wd_hw_attr; 5644eec42f3SMandeep Singh Baines wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); 5651880c4aeSCyrill Gorcunov 5661880c4aeSCyrill Gorcunov /* Try to register using hardware perf events */ 5674dc0da86SAvi Kivity event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 568a7027046SDon Zickus 569a7027046SDon Zickus /* save cpu0 error for future comparision */ 570a7027046SDon Zickus if (cpu == 0 && IS_ERR(event)) 571a7027046SDon Zickus cpu0_err = PTR_ERR(event); 572a7027046SDon Zickus 57358687acbSDon Zickus if (!IS_ERR(event)) { 574a7027046SDon Zickus /* only print for cpu0 or different than cpu0 */ 575a7027046SDon Zickus if (cpu == 0 || cpu0_err) 576a7027046SDon Zickus pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); 57758687acbSDon Zickus goto out_save; 57858687acbSDon Zickus } 57958687acbSDon Zickus 580bcfba4f4SUlrich Obergfell /* 581bcfba4f4SUlrich Obergfell * Disable the hard lockup detector if _any_ CPU fails to set up 582bcfba4f4SUlrich Obergfell * set up the hardware perf event. The watchdog() function checks 583bcfba4f4SUlrich Obergfell * the NMI_WATCHDOG_ENABLED bit periodically. 584bcfba4f4SUlrich Obergfell * 585bcfba4f4SUlrich Obergfell * The barriers are for syncing up watchdog_enabled across all the 586bcfba4f4SUlrich Obergfell * cpus, as clear_bit() does not use barriers. 587bcfba4f4SUlrich Obergfell */ 588bcfba4f4SUlrich Obergfell smp_mb__before_atomic(); 589bcfba4f4SUlrich Obergfell clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); 590bcfba4f4SUlrich Obergfell smp_mb__after_atomic(); 591bcfba4f4SUlrich Obergfell 592a7027046SDon Zickus /* skip displaying the same error again */ 593a7027046SDon Zickus if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) 594a7027046SDon Zickus return PTR_ERR(event); 5955651f7f4SDon Zickus 5965651f7f4SDon Zickus /* vary the KERN level based on the returned errno */ 5975651f7f4SDon Zickus if (PTR_ERR(event) == -EOPNOTSUPP) 5984501980aSAndrew Morton pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); 5995651f7f4SDon Zickus else if (PTR_ERR(event) == -ENOENT) 600656c3b79SFabian Frederick pr_warn("disabled (cpu%i): hardware events not enabled\n", 6014501980aSAndrew Morton cpu); 6025651f7f4SDon Zickus else 6034501980aSAndrew Morton pr_err("disabled (cpu%i): unable to create perf event: %ld\n", 6044501980aSAndrew Morton cpu, PTR_ERR(event)); 605bcfba4f4SUlrich Obergfell 606bcfba4f4SUlrich Obergfell pr_info("Shutting down hard lockup detector on all cpus\n"); 607bcfba4f4SUlrich Obergfell 608eac24335SAkinobu Mita return PTR_ERR(event); 60958687acbSDon Zickus 61058687acbSDon Zickus /* success path */ 61158687acbSDon Zickus out_save: 61258687acbSDon Zickus per_cpu(watchdog_ev, cpu) = event; 61358687acbSDon Zickus out_enable: 61458687acbSDon Zickus perf_event_enable(per_cpu(watchdog_ev, cpu)); 61558687acbSDon Zickus out: 61658687acbSDon Zickus return 0; 61758687acbSDon Zickus } 61858687acbSDon Zickus 619bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu) 62058687acbSDon Zickus { 62158687acbSDon Zickus struct perf_event *event = per_cpu(watchdog_ev, cpu); 62258687acbSDon Zickus 62358687acbSDon Zickus if (event) { 62458687acbSDon Zickus perf_event_disable(event); 62558687acbSDon Zickus per_cpu(watchdog_ev, cpu) = NULL; 62658687acbSDon Zickus 62758687acbSDon Zickus /* should be in cleanup, but blocks oprofile */ 62858687acbSDon Zickus perf_event_release_kernel(event); 62958687acbSDon Zickus } 630df577149SUlrich Obergfell if (cpu == 0) { 631df577149SUlrich Obergfell /* watchdog_nmi_enable() expects this to be zero initially. */ 632df577149SUlrich Obergfell cpu0_err = 0; 633df577149SUlrich Obergfell } 63458687acbSDon Zickus } 635b3738d29SStephane Eranian 63658687acbSDon Zickus #else 637bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu) { return 0; } 638bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu) { return; } 63923637d47SFrederic Weisbecker #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 64058687acbSDon Zickus 641b8900bc0SFrederic Weisbecker static struct smp_hotplug_thread watchdog_threads = { 642b8900bc0SFrederic Weisbecker .store = &softlockup_watchdog, 643b8900bc0SFrederic Weisbecker .thread_should_run = watchdog_should_run, 644b8900bc0SFrederic Weisbecker .thread_fn = watchdog, 645b8900bc0SFrederic Weisbecker .thread_comm = "watchdog/%u", 646b8900bc0SFrederic Weisbecker .setup = watchdog_enable, 647b8900bc0SFrederic Weisbecker .cleanup = watchdog_cleanup, 648b8900bc0SFrederic Weisbecker .park = watchdog_disable, 649b8900bc0SFrederic Weisbecker .unpark = watchdog_enable, 650b8900bc0SFrederic Weisbecker }; 651b8900bc0SFrederic Weisbecker 65281a4beefSUlrich Obergfell /* 65381a4beefSUlrich Obergfell * park all watchdog threads that are specified in 'watchdog_cpumask' 65481a4beefSUlrich Obergfell */ 65581a4beefSUlrich Obergfell static int watchdog_park_threads(void) 65681a4beefSUlrich Obergfell { 65781a4beefSUlrich Obergfell int cpu, ret = 0; 65881a4beefSUlrich Obergfell 65981a4beefSUlrich Obergfell get_online_cpus(); 66081a4beefSUlrich Obergfell for_each_watchdog_cpu(cpu) { 66181a4beefSUlrich Obergfell ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); 66281a4beefSUlrich Obergfell if (ret) 66381a4beefSUlrich Obergfell break; 66481a4beefSUlrich Obergfell } 66581a4beefSUlrich Obergfell if (ret) { 66681a4beefSUlrich Obergfell for_each_watchdog_cpu(cpu) 66781a4beefSUlrich Obergfell kthread_unpark(per_cpu(softlockup_watchdog, cpu)); 66881a4beefSUlrich Obergfell } 66981a4beefSUlrich Obergfell put_online_cpus(); 67081a4beefSUlrich Obergfell 67181a4beefSUlrich Obergfell return ret; 67281a4beefSUlrich Obergfell } 67381a4beefSUlrich Obergfell 67481a4beefSUlrich Obergfell /* 67581a4beefSUlrich Obergfell * unpark all watchdog threads that are specified in 'watchdog_cpumask' 67681a4beefSUlrich Obergfell */ 67781a4beefSUlrich Obergfell static void watchdog_unpark_threads(void) 67881a4beefSUlrich Obergfell { 67981a4beefSUlrich Obergfell int cpu; 68081a4beefSUlrich Obergfell 68181a4beefSUlrich Obergfell get_online_cpus(); 68281a4beefSUlrich Obergfell for_each_watchdog_cpu(cpu) 68381a4beefSUlrich Obergfell kthread_unpark(per_cpu(softlockup_watchdog, cpu)); 68481a4beefSUlrich Obergfell put_online_cpus(); 68581a4beefSUlrich Obergfell } 68681a4beefSUlrich Obergfell 6878c073d27SUlrich Obergfell /* 6888c073d27SUlrich Obergfell * Suspend the hard and soft lockup detector by parking the watchdog threads. 6898c073d27SUlrich Obergfell */ 690ec6a9066SUlrich Obergfell int lockup_detector_suspend(void) 6918c073d27SUlrich Obergfell { 6928c073d27SUlrich Obergfell int ret = 0; 6938c073d27SUlrich Obergfell 6948c073d27SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 6958c073d27SUlrich Obergfell /* 6968c073d27SUlrich Obergfell * Multiple suspend requests can be active in parallel (counted by 6978c073d27SUlrich Obergfell * the 'watchdog_suspended' variable). If the watchdog threads are 6988c073d27SUlrich Obergfell * running, the first caller takes care that they will be parked. 6998c073d27SUlrich Obergfell * The state of 'watchdog_running' cannot change while a suspend 700ec6a9066SUlrich Obergfell * request is active (see related code in 'proc' handlers). 7018c073d27SUlrich Obergfell */ 7028c073d27SUlrich Obergfell if (watchdog_running && !watchdog_suspended) 7038c073d27SUlrich Obergfell ret = watchdog_park_threads(); 7048c073d27SUlrich Obergfell 7058c073d27SUlrich Obergfell if (ret == 0) 7068c073d27SUlrich Obergfell watchdog_suspended++; 7078c073d27SUlrich Obergfell 7088c073d27SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 7098c073d27SUlrich Obergfell 7108c073d27SUlrich Obergfell return ret; 7118c073d27SUlrich Obergfell } 7128c073d27SUlrich Obergfell 7138c073d27SUlrich Obergfell /* 7148c073d27SUlrich Obergfell * Resume the hard and soft lockup detector by unparking the watchdog threads. 7158c073d27SUlrich Obergfell */ 716ec6a9066SUlrich Obergfell void lockup_detector_resume(void) 7178c073d27SUlrich Obergfell { 7188c073d27SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 7198c073d27SUlrich Obergfell 7208c073d27SUlrich Obergfell watchdog_suspended--; 7218c073d27SUlrich Obergfell /* 7228c073d27SUlrich Obergfell * The watchdog threads are unparked if they were previously running 7238c073d27SUlrich Obergfell * and if there is no more active suspend request. 7248c073d27SUlrich Obergfell */ 7258c073d27SUlrich Obergfell if (watchdog_running && !watchdog_suspended) 7268c073d27SUlrich Obergfell watchdog_unpark_threads(); 7278c073d27SUlrich Obergfell 7288c073d27SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 7298c073d27SUlrich Obergfell } 7308c073d27SUlrich Obergfell 731b2f57c3aSUlrich Obergfell static void update_watchdog_all_cpus(void) 7329809b18fSMichal Hocko { 733d4bdd0b2SUlrich Obergfell watchdog_park_threads(); 734d4bdd0b2SUlrich Obergfell watchdog_unpark_threads(); 7359809b18fSMichal Hocko } 7369809b18fSMichal Hocko 737b2f57c3aSUlrich Obergfell static int watchdog_enable_all_cpus(void) 738b8900bc0SFrederic Weisbecker { 739b8900bc0SFrederic Weisbecker int err = 0; 740b8900bc0SFrederic Weisbecker 7413c00ea82SFrederic Weisbecker if (!watchdog_running) { 742230ec939SFrederic Weisbecker err = smpboot_register_percpu_thread_cpumask(&watchdog_threads, 743230ec939SFrederic Weisbecker &watchdog_cpumask); 744b8900bc0SFrederic Weisbecker if (err) 745b8900bc0SFrederic Weisbecker pr_err("Failed to create watchdog threads, disabled\n"); 746230ec939SFrederic Weisbecker else 7473c00ea82SFrederic Weisbecker watchdog_running = 1; 748b2f57c3aSUlrich Obergfell } else { 749b2f57c3aSUlrich Obergfell /* 750b2f57c3aSUlrich Obergfell * Enable/disable the lockup detectors or 751b2f57c3aSUlrich Obergfell * change the sample period 'on the fly'. 752b2f57c3aSUlrich Obergfell */ 753b2f57c3aSUlrich Obergfell update_watchdog_all_cpus(); 754b8900bc0SFrederic Weisbecker } 755b8900bc0SFrederic Weisbecker 756b8900bc0SFrederic Weisbecker return err; 757b8900bc0SFrederic Weisbecker } 758b8900bc0SFrederic Weisbecker 75958687acbSDon Zickus /* prepare/enable/disable routines */ 7604ff81951SVasily Averin /* sysctl functions */ 7614ff81951SVasily Averin #ifdef CONFIG_SYSCTL 76258687acbSDon Zickus static void watchdog_disable_all_cpus(void) 76358687acbSDon Zickus { 7643c00ea82SFrederic Weisbecker if (watchdog_running) { 7653c00ea82SFrederic Weisbecker watchdog_running = 0; 766b8900bc0SFrederic Weisbecker smpboot_unregister_percpu_thread(&watchdog_threads); 76758687acbSDon Zickus } 768bcd951cfSThomas Gleixner } 76958687acbSDon Zickus 77058687acbSDon Zickus /* 771a0c9cbb9SUlrich Obergfell * Update the run state of the lockup detectors. 77258687acbSDon Zickus */ 773a0c9cbb9SUlrich Obergfell static int proc_watchdog_update(void) 77458687acbSDon Zickus { 775a0c9cbb9SUlrich Obergfell int err = 0; 776a0c9cbb9SUlrich Obergfell 777a0c9cbb9SUlrich Obergfell /* 778a0c9cbb9SUlrich Obergfell * Watchdog threads won't be started if they are already active. 779a0c9cbb9SUlrich Obergfell * The 'watchdog_running' variable in watchdog_*_all_cpus() takes 780a0c9cbb9SUlrich Obergfell * care of this. If those threads are already active, the sample 781a0c9cbb9SUlrich Obergfell * period will be updated and the lockup detectors will be enabled 782a0c9cbb9SUlrich Obergfell * or disabled 'on the fly'. 783a0c9cbb9SUlrich Obergfell */ 784a0c9cbb9SUlrich Obergfell if (watchdog_enabled && watchdog_thresh) 785b2f57c3aSUlrich Obergfell err = watchdog_enable_all_cpus(); 786a0c9cbb9SUlrich Obergfell else 787a0c9cbb9SUlrich Obergfell watchdog_disable_all_cpus(); 788a0c9cbb9SUlrich Obergfell 789a0c9cbb9SUlrich Obergfell return err; 790a0c9cbb9SUlrich Obergfell 791a0c9cbb9SUlrich Obergfell } 792a0c9cbb9SUlrich Obergfell 793a0c9cbb9SUlrich Obergfell /* 794ef246a21SUlrich Obergfell * common function for watchdog, nmi_watchdog and soft_watchdog parameter 795ef246a21SUlrich Obergfell * 796ef246a21SUlrich Obergfell * caller | table->data points to | 'which' contains the flag(s) 797ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 798ef246a21SUlrich Obergfell * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed 799ef246a21SUlrich Obergfell * | | with SOFT_WATCHDOG_ENABLED 800ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 801ef246a21SUlrich Obergfell * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED 802ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 803ef246a21SUlrich Obergfell * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED 804ef246a21SUlrich Obergfell */ 805ef246a21SUlrich Obergfell static int proc_watchdog_common(int which, struct ctl_table *table, int write, 806ef246a21SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 807ef246a21SUlrich Obergfell { 808ef246a21SUlrich Obergfell int err, old, new; 809ef246a21SUlrich Obergfell int *watchdog_param = (int *)table->data; 810bcd951cfSThomas Gleixner 811ef246a21SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 812ef246a21SUlrich Obergfell 8138c073d27SUlrich Obergfell if (watchdog_suspended) { 8148c073d27SUlrich Obergfell /* no parameter changes allowed while watchdog is suspended */ 8158c073d27SUlrich Obergfell err = -EAGAIN; 8168c073d27SUlrich Obergfell goto out; 8178c073d27SUlrich Obergfell } 8188c073d27SUlrich Obergfell 819ef246a21SUlrich Obergfell /* 820ef246a21SUlrich Obergfell * If the parameter is being read return the state of the corresponding 821ef246a21SUlrich Obergfell * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the 822ef246a21SUlrich Obergfell * run state of the lockup detectors. 823ef246a21SUlrich Obergfell */ 824ef246a21SUlrich Obergfell if (!write) { 825ef246a21SUlrich Obergfell *watchdog_param = (watchdog_enabled & which) != 0; 826b8900bc0SFrederic Weisbecker err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 827ef246a21SUlrich Obergfell } else { 828ef246a21SUlrich Obergfell err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 829ef246a21SUlrich Obergfell if (err) 830ef246a21SUlrich Obergfell goto out; 831ef246a21SUlrich Obergfell 832ef246a21SUlrich Obergfell /* 833ef246a21SUlrich Obergfell * There is a race window between fetching the current value 834ef246a21SUlrich Obergfell * from 'watchdog_enabled' and storing the new value. During 835ef246a21SUlrich Obergfell * this race window, watchdog_nmi_enable() can sneak in and 836ef246a21SUlrich Obergfell * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'. 837ef246a21SUlrich Obergfell * The 'cmpxchg' detects this race and the loop retries. 838ef246a21SUlrich Obergfell */ 839ef246a21SUlrich Obergfell do { 840ef246a21SUlrich Obergfell old = watchdog_enabled; 841ef246a21SUlrich Obergfell /* 842ef246a21SUlrich Obergfell * If the parameter value is not zero set the 843ef246a21SUlrich Obergfell * corresponding bit(s), else clear it(them). 844ef246a21SUlrich Obergfell */ 845ef246a21SUlrich Obergfell if (*watchdog_param) 846ef246a21SUlrich Obergfell new = old | which; 847ef246a21SUlrich Obergfell else 848ef246a21SUlrich Obergfell new = old & ~which; 849ef246a21SUlrich Obergfell } while (cmpxchg(&watchdog_enabled, old, new) != old); 850ef246a21SUlrich Obergfell 851ef246a21SUlrich Obergfell /* 852ef246a21SUlrich Obergfell * Update the run state of the lockup detectors. 853ef246a21SUlrich Obergfell * Restore 'watchdog_enabled' on failure. 854ef246a21SUlrich Obergfell */ 855ef246a21SUlrich Obergfell err = proc_watchdog_update(); 856ef246a21SUlrich Obergfell if (err) 857ef246a21SUlrich Obergfell watchdog_enabled = old; 858ef246a21SUlrich Obergfell } 859ef246a21SUlrich Obergfell out: 860ef246a21SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 861ef246a21SUlrich Obergfell return err; 862ef246a21SUlrich Obergfell } 863ef246a21SUlrich Obergfell 864ef246a21SUlrich Obergfell /* 86583a80a39SUlrich Obergfell * /proc/sys/kernel/watchdog 86683a80a39SUlrich Obergfell */ 86783a80a39SUlrich Obergfell int proc_watchdog(struct ctl_table *table, int write, 86883a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 86983a80a39SUlrich Obergfell { 87083a80a39SUlrich Obergfell return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, 87183a80a39SUlrich Obergfell table, write, buffer, lenp, ppos); 87283a80a39SUlrich Obergfell } 87383a80a39SUlrich Obergfell 87483a80a39SUlrich Obergfell /* 87583a80a39SUlrich Obergfell * /proc/sys/kernel/nmi_watchdog 87683a80a39SUlrich Obergfell */ 87783a80a39SUlrich Obergfell int proc_nmi_watchdog(struct ctl_table *table, int write, 87883a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 87983a80a39SUlrich Obergfell { 88083a80a39SUlrich Obergfell return proc_watchdog_common(NMI_WATCHDOG_ENABLED, 88183a80a39SUlrich Obergfell table, write, buffer, lenp, ppos); 88283a80a39SUlrich Obergfell } 88383a80a39SUlrich Obergfell 88483a80a39SUlrich Obergfell /* 88583a80a39SUlrich Obergfell * /proc/sys/kernel/soft_watchdog 88683a80a39SUlrich Obergfell */ 88783a80a39SUlrich Obergfell int proc_soft_watchdog(struct ctl_table *table, int write, 88883a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 88983a80a39SUlrich Obergfell { 89083a80a39SUlrich Obergfell return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, 89183a80a39SUlrich Obergfell table, write, buffer, lenp, ppos); 89283a80a39SUlrich Obergfell } 89383a80a39SUlrich Obergfell 89483a80a39SUlrich Obergfell /* 89583a80a39SUlrich Obergfell * /proc/sys/kernel/watchdog_thresh 89683a80a39SUlrich Obergfell */ 89783a80a39SUlrich Obergfell int proc_watchdog_thresh(struct ctl_table *table, int write, 89883a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 89983a80a39SUlrich Obergfell { 90083a80a39SUlrich Obergfell int err, old; 90183a80a39SUlrich Obergfell 90283a80a39SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 90383a80a39SUlrich Obergfell 9048c073d27SUlrich Obergfell if (watchdog_suspended) { 9058c073d27SUlrich Obergfell /* no parameter changes allowed while watchdog is suspended */ 9068c073d27SUlrich Obergfell err = -EAGAIN; 9078c073d27SUlrich Obergfell goto out; 9088c073d27SUlrich Obergfell } 9098c073d27SUlrich Obergfell 91083a80a39SUlrich Obergfell old = ACCESS_ONCE(watchdog_thresh); 91183a80a39SUlrich Obergfell err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 91283a80a39SUlrich Obergfell 913b8900bc0SFrederic Weisbecker if (err || !write) 914359e6fabSMichal Hocko goto out; 915e04ab2bcSMandeep Singh Baines 91683a80a39SUlrich Obergfell /* 917d283c640SUlrich Obergfell * Update the sample period. Restore on failure. 91883a80a39SUlrich Obergfell */ 9190f34c400SChuansheng Liu set_sample_period(); 92083a80a39SUlrich Obergfell err = proc_watchdog_update(); 921d283c640SUlrich Obergfell if (err) { 92283a80a39SUlrich Obergfell watchdog_thresh = old; 923d283c640SUlrich Obergfell set_sample_period(); 924d283c640SUlrich Obergfell } 925359e6fabSMichal Hocko out: 926359e6fabSMichal Hocko mutex_unlock(&watchdog_proc_mutex); 927b8900bc0SFrederic Weisbecker return err; 92858687acbSDon Zickus } 929fe4ba3c3SChris Metcalf 930fe4ba3c3SChris Metcalf /* 931fe4ba3c3SChris Metcalf * The cpumask is the mask of possible cpus that the watchdog can run 932fe4ba3c3SChris Metcalf * on, not the mask of cpus it is actually running on. This allows the 933fe4ba3c3SChris Metcalf * user to specify a mask that will include cpus that have not yet 934fe4ba3c3SChris Metcalf * been brought online, if desired. 935fe4ba3c3SChris Metcalf */ 936fe4ba3c3SChris Metcalf int proc_watchdog_cpumask(struct ctl_table *table, int write, 937fe4ba3c3SChris Metcalf void __user *buffer, size_t *lenp, loff_t *ppos) 938fe4ba3c3SChris Metcalf { 939fe4ba3c3SChris Metcalf int err; 940fe4ba3c3SChris Metcalf 941fe4ba3c3SChris Metcalf mutex_lock(&watchdog_proc_mutex); 9428c073d27SUlrich Obergfell 9438c073d27SUlrich Obergfell if (watchdog_suspended) { 9448c073d27SUlrich Obergfell /* no parameter changes allowed while watchdog is suspended */ 9458c073d27SUlrich Obergfell err = -EAGAIN; 9468c073d27SUlrich Obergfell goto out; 9478c073d27SUlrich Obergfell } 9488c073d27SUlrich Obergfell 949fe4ba3c3SChris Metcalf err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); 950fe4ba3c3SChris Metcalf if (!err && write) { 951fe4ba3c3SChris Metcalf /* Remove impossible cpus to keep sysctl output cleaner. */ 952fe4ba3c3SChris Metcalf cpumask_and(&watchdog_cpumask, &watchdog_cpumask, 953fe4ba3c3SChris Metcalf cpu_possible_mask); 954fe4ba3c3SChris Metcalf 955fe4ba3c3SChris Metcalf if (watchdog_running) { 956fe4ba3c3SChris Metcalf /* 957fe4ba3c3SChris Metcalf * Failure would be due to being unable to allocate 958fe4ba3c3SChris Metcalf * a temporary cpumask, so we are likely not in a 959fe4ba3c3SChris Metcalf * position to do much else to make things better. 960fe4ba3c3SChris Metcalf */ 961fe4ba3c3SChris Metcalf if (smpboot_update_cpumask_percpu_thread( 962fe4ba3c3SChris Metcalf &watchdog_threads, &watchdog_cpumask) != 0) 963fe4ba3c3SChris Metcalf pr_err("cpumask update failed\n"); 964fe4ba3c3SChris Metcalf } 965fe4ba3c3SChris Metcalf } 9668c073d27SUlrich Obergfell out: 967fe4ba3c3SChris Metcalf mutex_unlock(&watchdog_proc_mutex); 968fe4ba3c3SChris Metcalf return err; 969fe4ba3c3SChris Metcalf } 970fe4ba3c3SChris Metcalf 97158687acbSDon Zickus #endif /* CONFIG_SYSCTL */ 97258687acbSDon Zickus 973004417a6SPeter Zijlstra void __init lockup_detector_init(void) 97458687acbSDon Zickus { 9750f34c400SChuansheng Liu set_sample_period(); 976b8900bc0SFrederic Weisbecker 977fe4ba3c3SChris Metcalf #ifdef CONFIG_NO_HZ_FULL 978fe4ba3c3SChris Metcalf if (tick_nohz_full_enabled()) { 979fe4ba3c3SChris Metcalf pr_info("Disabling watchdog on nohz_full cores by default\n"); 980314b08ffSFrederic Weisbecker cpumask_copy(&watchdog_cpumask, housekeeping_mask); 981fe4ba3c3SChris Metcalf } else 982fe4ba3c3SChris Metcalf cpumask_copy(&watchdog_cpumask, cpu_possible_mask); 983fe4ba3c3SChris Metcalf #else 984fe4ba3c3SChris Metcalf cpumask_copy(&watchdog_cpumask, cpu_possible_mask); 985fe4ba3c3SChris Metcalf #endif 986fe4ba3c3SChris Metcalf 987195daf66SUlrich Obergfell if (watchdog_enabled) 988b2f57c3aSUlrich Obergfell watchdog_enable_all_cpus(); 98958687acbSDon Zickus } 990