158687acbSDon Zickus /* 258687acbSDon Zickus * Detect hard and soft lockups on a system 358687acbSDon Zickus * 458687acbSDon Zickus * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 558687acbSDon Zickus * 686f5e6a7SFernando Luis Vázquez Cao * Note: Most of this code is borrowed heavily from the original softlockup 786f5e6a7SFernando Luis Vázquez Cao * detector, so thanks to Ingo for the initial implementation. 886f5e6a7SFernando Luis Vázquez Cao * Some chunks also taken from the old x86-specific nmi watchdog code, thanks 958687acbSDon Zickus * to those contributors as well. 1058687acbSDon Zickus */ 1158687acbSDon Zickus 124501980aSAndrew Morton #define pr_fmt(fmt) "NMI watchdog: " fmt 134501980aSAndrew Morton 1458687acbSDon Zickus #include <linux/mm.h> 1558687acbSDon Zickus #include <linux/cpu.h> 1658687acbSDon Zickus #include <linux/nmi.h> 1758687acbSDon Zickus #include <linux/init.h> 1858687acbSDon Zickus #include <linux/module.h> 1958687acbSDon Zickus #include <linux/sysctl.h> 20bcd951cfSThomas Gleixner #include <linux/smpboot.h> 218bd75c77SClark Williams #include <linux/sched/rt.h> 22fe4ba3c3SChris Metcalf #include <linux/tick.h> 2358687acbSDon Zickus 2458687acbSDon Zickus #include <asm/irq_regs.h> 255d1c0f4aSEric B Munson #include <linux/kvm_para.h> 2658687acbSDon Zickus #include <linux/perf_event.h> 2781a4beefSUlrich Obergfell #include <linux/kthread.h> 2858687acbSDon Zickus 2984d56e66SUlrich Obergfell /* 3084d56e66SUlrich Obergfell * The run state of the lockup detectors is controlled by the content of the 3184d56e66SUlrich Obergfell * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 3284d56e66SUlrich Obergfell * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 3384d56e66SUlrich Obergfell * 3484d56e66SUlrich Obergfell * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 3584d56e66SUlrich Obergfell * are variables that are only used as an 'interface' between the parameters 3684d56e66SUlrich Obergfell * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 3784d56e66SUlrich Obergfell * 'watchdog_thresh' variable is handled differently because its value is not 3884d56e66SUlrich Obergfell * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 3984d56e66SUlrich Obergfell * is equal zero. 4084d56e66SUlrich Obergfell */ 4184d56e66SUlrich Obergfell #define NMI_WATCHDOG_ENABLED_BIT 0 4284d56e66SUlrich Obergfell #define SOFT_WATCHDOG_ENABLED_BIT 1 4384d56e66SUlrich Obergfell #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) 4484d56e66SUlrich Obergfell #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) 4584d56e66SUlrich Obergfell 46ab992dc3SPeter Zijlstra static DEFINE_MUTEX(watchdog_proc_mutex); 47ab992dc3SPeter Zijlstra 4884d56e66SUlrich Obergfell #ifdef CONFIG_HARDLOCKUP_DETECTOR 4984d56e66SUlrich Obergfell static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; 5084d56e66SUlrich Obergfell #else 5184d56e66SUlrich Obergfell static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; 5284d56e66SUlrich Obergfell #endif 5384d56e66SUlrich Obergfell int __read_mostly nmi_watchdog_enabled; 5484d56e66SUlrich Obergfell int __read_mostly soft_watchdog_enabled; 5584d56e66SUlrich Obergfell int __read_mostly watchdog_user_enabled; 564eec42f3SMandeep Singh Baines int __read_mostly watchdog_thresh = 10; 5784d56e66SUlrich Obergfell 58ed235875SAaron Tomlin #ifdef CONFIG_SMP 59ed235875SAaron Tomlin int __read_mostly sysctl_softlockup_all_cpu_backtrace; 6055537871SJiri Kosina int __read_mostly sysctl_hardlockup_all_cpu_backtrace; 61ed235875SAaron Tomlin #else 62ed235875SAaron Tomlin #define sysctl_softlockup_all_cpu_backtrace 0 6355537871SJiri Kosina #define sysctl_hardlockup_all_cpu_backtrace 0 64ed235875SAaron Tomlin #endif 65fe4ba3c3SChris Metcalf static struct cpumask watchdog_cpumask __read_mostly; 66fe4ba3c3SChris Metcalf unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); 67fe4ba3c3SChris Metcalf 68fe4ba3c3SChris Metcalf /* Helper for online, unparked cpus. */ 69fe4ba3c3SChris Metcalf #define for_each_watchdog_cpu(cpu) \ 70fe4ba3c3SChris Metcalf for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 71ed235875SAaron Tomlin 72ec6a9066SUlrich Obergfell /* 73ec6a9066SUlrich Obergfell * The 'watchdog_running' variable is set to 1 when the watchdog threads 74ec6a9066SUlrich Obergfell * are registered/started and is set to 0 when the watchdog threads are 75ec6a9066SUlrich Obergfell * unregistered/stopped, so it is an indicator whether the threads exist. 76ec6a9066SUlrich Obergfell */ 773c00ea82SFrederic Weisbecker static int __read_mostly watchdog_running; 78ec6a9066SUlrich Obergfell /* 79ec6a9066SUlrich Obergfell * If a subsystem has a need to deactivate the watchdog temporarily, it 80ec6a9066SUlrich Obergfell * can use the suspend/resume interface to achieve this. The content of 81ec6a9066SUlrich Obergfell * the 'watchdog_suspended' variable reflects this state. Existing threads 82ec6a9066SUlrich Obergfell * are parked/unparked by the lockup_detector_{suspend|resume} functions 83ec6a9066SUlrich Obergfell * (see comment blocks pertaining to those functions for further details). 84ec6a9066SUlrich Obergfell * 85ec6a9066SUlrich Obergfell * 'watchdog_suspended' also prevents threads from being registered/started 86ec6a9066SUlrich Obergfell * or unregistered/stopped via parameters in /proc/sys/kernel, so the state 87ec6a9066SUlrich Obergfell * of 'watchdog_running' cannot change while the watchdog is deactivated 88ec6a9066SUlrich Obergfell * temporarily (see related code in 'proc' handlers). 89ec6a9066SUlrich Obergfell */ 90ec6a9066SUlrich Obergfell static int __read_mostly watchdog_suspended; 91ec6a9066SUlrich Obergfell 920f34c400SChuansheng Liu static u64 __read_mostly sample_period; 9358687acbSDon Zickus 9458687acbSDon Zickus static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 9558687acbSDon Zickus static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 9658687acbSDon Zickus static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); 9758687acbSDon Zickus static DEFINE_PER_CPU(bool, softlockup_touch_sync); 9858687acbSDon Zickus static DEFINE_PER_CPU(bool, soft_watchdog_warn); 99bcd951cfSThomas Gleixner static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); 100bcd951cfSThomas Gleixner static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); 101b1a8de1fSchai wen static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); 10223637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 103cafcd80dSDon Zickus static DEFINE_PER_CPU(bool, hard_watchdog_warn); 104cafcd80dSDon Zickus static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 10558687acbSDon Zickus static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 10658687acbSDon Zickus static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 10758687acbSDon Zickus #endif 108ed235875SAaron Tomlin static unsigned long soft_lockup_nmi_warn; 10958687acbSDon Zickus 11058687acbSDon Zickus /* boot commands */ 11158687acbSDon Zickus /* 11258687acbSDon Zickus * Should we panic when a soft-lockup or hard-lockup occurs: 11358687acbSDon Zickus */ 11423637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 115ac1f5912SDon Zickus unsigned int __read_mostly hardlockup_panic = 116fef2c9bcSDon Zickus CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; 11755537871SJiri Kosina static unsigned long hardlockup_allcpu_dumped; 1186e7458a6SUlrich Obergfell /* 1196e7458a6SUlrich Obergfell * We may not want to enable hard lockup detection by default in all cases, 1206e7458a6SUlrich Obergfell * for example when running the kernel as a guest on a hypervisor. In these 1216e7458a6SUlrich Obergfell * cases this function can be called to disable hard lockup detection. This 1226e7458a6SUlrich Obergfell * function should only be executed once by the boot processor before the 1236e7458a6SUlrich Obergfell * kernel command line parameters are parsed, because otherwise it is not 1246e7458a6SUlrich Obergfell * possible to override this in hardlockup_panic_setup(). 1256e7458a6SUlrich Obergfell */ 126692297d8SUlrich Obergfell void hardlockup_detector_disable(void) 1276e7458a6SUlrich Obergfell { 128692297d8SUlrich Obergfell watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; 1296e7458a6SUlrich Obergfell } 1306e7458a6SUlrich Obergfell 13158687acbSDon Zickus static int __init hardlockup_panic_setup(char *str) 13258687acbSDon Zickus { 13358687acbSDon Zickus if (!strncmp(str, "panic", 5)) 13458687acbSDon Zickus hardlockup_panic = 1; 135fef2c9bcSDon Zickus else if (!strncmp(str, "nopanic", 7)) 136fef2c9bcSDon Zickus hardlockup_panic = 0; 1375dc30558SDon Zickus else if (!strncmp(str, "0", 1)) 138195daf66SUlrich Obergfell watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; 139195daf66SUlrich Obergfell else if (!strncmp(str, "1", 1)) 140195daf66SUlrich Obergfell watchdog_enabled |= NMI_WATCHDOG_ENABLED; 14158687acbSDon Zickus return 1; 14258687acbSDon Zickus } 14358687acbSDon Zickus __setup("nmi_watchdog=", hardlockup_panic_setup); 14458687acbSDon Zickus #endif 14558687acbSDon Zickus 14658687acbSDon Zickus unsigned int __read_mostly softlockup_panic = 14758687acbSDon Zickus CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; 14858687acbSDon Zickus 14958687acbSDon Zickus static int __init softlockup_panic_setup(char *str) 15058687acbSDon Zickus { 15158687acbSDon Zickus softlockup_panic = simple_strtoul(str, NULL, 0); 15258687acbSDon Zickus 15358687acbSDon Zickus return 1; 15458687acbSDon Zickus } 15558687acbSDon Zickus __setup("softlockup_panic=", softlockup_panic_setup); 15658687acbSDon Zickus 15758687acbSDon Zickus static int __init nowatchdog_setup(char *str) 15858687acbSDon Zickus { 159195daf66SUlrich Obergfell watchdog_enabled = 0; 16058687acbSDon Zickus return 1; 16158687acbSDon Zickus } 16258687acbSDon Zickus __setup("nowatchdog", nowatchdog_setup); 16358687acbSDon Zickus 16458687acbSDon Zickus static int __init nosoftlockup_setup(char *str) 16558687acbSDon Zickus { 166195daf66SUlrich Obergfell watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED; 16758687acbSDon Zickus return 1; 16858687acbSDon Zickus } 16958687acbSDon Zickus __setup("nosoftlockup", nosoftlockup_setup); 170195daf66SUlrich Obergfell 171ed235875SAaron Tomlin #ifdef CONFIG_SMP 172ed235875SAaron Tomlin static int __init softlockup_all_cpu_backtrace_setup(char *str) 173ed235875SAaron Tomlin { 174ed235875SAaron Tomlin sysctl_softlockup_all_cpu_backtrace = 175ed235875SAaron Tomlin !!simple_strtol(str, NULL, 0); 176ed235875SAaron Tomlin return 1; 177ed235875SAaron Tomlin } 178ed235875SAaron Tomlin __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); 17955537871SJiri Kosina static int __init hardlockup_all_cpu_backtrace_setup(char *str) 18055537871SJiri Kosina { 18155537871SJiri Kosina sysctl_hardlockup_all_cpu_backtrace = 18255537871SJiri Kosina !!simple_strtol(str, NULL, 0); 18355537871SJiri Kosina return 1; 18455537871SJiri Kosina } 18555537871SJiri Kosina __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); 186ed235875SAaron Tomlin #endif 18758687acbSDon Zickus 1884eec42f3SMandeep Singh Baines /* 1894eec42f3SMandeep Singh Baines * Hard-lockup warnings should be triggered after just a few seconds. Soft- 1904eec42f3SMandeep Singh Baines * lockups can have false positives under extreme conditions. So we generally 1914eec42f3SMandeep Singh Baines * want a higher threshold for soft lockups than for hard lockups. So we couple 1924eec42f3SMandeep Singh Baines * the thresholds with a factor: we make the soft threshold twice the amount of 1934eec42f3SMandeep Singh Baines * time the hard threshold is. 1944eec42f3SMandeep Singh Baines */ 1956e9101aeSIngo Molnar static int get_softlockup_thresh(void) 1964eec42f3SMandeep Singh Baines { 1974eec42f3SMandeep Singh Baines return watchdog_thresh * 2; 1984eec42f3SMandeep Singh Baines } 19958687acbSDon Zickus 20058687acbSDon Zickus /* 20158687acbSDon Zickus * Returns seconds, approximately. We don't need nanosecond 20258687acbSDon Zickus * resolution, and we don't need to waste time with a big divide when 20358687acbSDon Zickus * 2^30ns == 1.074s. 20458687acbSDon Zickus */ 205c06b4f19SNamhyung Kim static unsigned long get_timestamp(void) 20658687acbSDon Zickus { 207545a2bf7SCyril Bur return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ 20858687acbSDon Zickus } 20958687acbSDon Zickus 2100f34c400SChuansheng Liu static void set_sample_period(void) 21158687acbSDon Zickus { 21258687acbSDon Zickus /* 213586692a5SMandeep Singh Baines * convert watchdog_thresh from seconds to ns 21486f5e6a7SFernando Luis Vázquez Cao * the divide by 5 is to give hrtimer several chances (two 21586f5e6a7SFernando Luis Vázquez Cao * or three with the current relation between the soft 21686f5e6a7SFernando Luis Vázquez Cao * and hard thresholds) to increment before the 21786f5e6a7SFernando Luis Vázquez Cao * hardlockup detector generates a warning 21858687acbSDon Zickus */ 2190f34c400SChuansheng Liu sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); 22058687acbSDon Zickus } 22158687acbSDon Zickus 22258687acbSDon Zickus /* Commands for resetting the watchdog */ 22358687acbSDon Zickus static void __touch_watchdog(void) 22458687acbSDon Zickus { 225c06b4f19SNamhyung Kim __this_cpu_write(watchdog_touch_ts, get_timestamp()); 22658687acbSDon Zickus } 22758687acbSDon Zickus 228332fbdbcSDon Zickus void touch_softlockup_watchdog(void) 22958687acbSDon Zickus { 2307861144bSAndrew Morton /* 2317861144bSAndrew Morton * Preemption can be enabled. It doesn't matter which CPU's timestamp 2327861144bSAndrew Morton * gets zeroed here, so use the raw_ operation. 2337861144bSAndrew Morton */ 2347861144bSAndrew Morton raw_cpu_write(watchdog_touch_ts, 0); 23558687acbSDon Zickus } 2360167c781SIngo Molnar EXPORT_SYMBOL(touch_softlockup_watchdog); 23758687acbSDon Zickus 238332fbdbcSDon Zickus void touch_all_softlockup_watchdogs(void) 23958687acbSDon Zickus { 24058687acbSDon Zickus int cpu; 24158687acbSDon Zickus 24258687acbSDon Zickus /* 24358687acbSDon Zickus * this is done lockless 24458687acbSDon Zickus * do we care if a 0 races with a timestamp? 24558687acbSDon Zickus * all it means is the softlock check starts one cycle later 24658687acbSDon Zickus */ 247fe4ba3c3SChris Metcalf for_each_watchdog_cpu(cpu) 24858687acbSDon Zickus per_cpu(watchdog_touch_ts, cpu) = 0; 24958687acbSDon Zickus } 25058687acbSDon Zickus 251cafcd80dSDon Zickus #ifdef CONFIG_HARDLOCKUP_DETECTOR 25258687acbSDon Zickus void touch_nmi_watchdog(void) 25358687acbSDon Zickus { 25462572e29SBen Zhang /* 25562572e29SBen Zhang * Using __raw here because some code paths have 25662572e29SBen Zhang * preemption enabled. If preemption is enabled 25762572e29SBen Zhang * then interrupts should be enabled too, in which 25862572e29SBen Zhang * case we shouldn't have to worry about the watchdog 25962572e29SBen Zhang * going off. 26062572e29SBen Zhang */ 261f7f66b05SChristoph Lameter raw_cpu_write(watchdog_nmi_touch, true); 262332fbdbcSDon Zickus touch_softlockup_watchdog(); 26358687acbSDon Zickus } 26458687acbSDon Zickus EXPORT_SYMBOL(touch_nmi_watchdog); 26558687acbSDon Zickus 266cafcd80dSDon Zickus #endif 267cafcd80dSDon Zickus 26858687acbSDon Zickus void touch_softlockup_watchdog_sync(void) 26958687acbSDon Zickus { 270f7f66b05SChristoph Lameter __this_cpu_write(softlockup_touch_sync, true); 271f7f66b05SChristoph Lameter __this_cpu_write(watchdog_touch_ts, 0); 27258687acbSDon Zickus } 27358687acbSDon Zickus 27423637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 27558687acbSDon Zickus /* watchdog detector functions */ 276451637e4SYaowei Bai static bool is_hardlockup(void) 27758687acbSDon Zickus { 278909ea964SChristoph Lameter unsigned long hrint = __this_cpu_read(hrtimer_interrupts); 27958687acbSDon Zickus 280909ea964SChristoph Lameter if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) 281451637e4SYaowei Bai return true; 28258687acbSDon Zickus 283909ea964SChristoph Lameter __this_cpu_write(hrtimer_interrupts_saved, hrint); 284451637e4SYaowei Bai return false; 28558687acbSDon Zickus } 28658687acbSDon Zickus #endif 28758687acbSDon Zickus 28826e09c6eSDon Zickus static int is_softlockup(unsigned long touch_ts) 28958687acbSDon Zickus { 290c06b4f19SNamhyung Kim unsigned long now = get_timestamp(); 29158687acbSDon Zickus 292195daf66SUlrich Obergfell if (watchdog_enabled & SOFT_WATCHDOG_ENABLED) { 293195daf66SUlrich Obergfell /* Warn about unreasonable delays. */ 2944eec42f3SMandeep Singh Baines if (time_after(now, touch_ts + get_softlockup_thresh())) 29558687acbSDon Zickus return now - touch_ts; 296195daf66SUlrich Obergfell } 29758687acbSDon Zickus return 0; 29858687acbSDon Zickus } 29958687acbSDon Zickus 30023637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 3011880c4aeSCyrill Gorcunov 30258687acbSDon Zickus static struct perf_event_attr wd_hw_attr = { 30358687acbSDon Zickus .type = PERF_TYPE_HARDWARE, 30458687acbSDon Zickus .config = PERF_COUNT_HW_CPU_CYCLES, 30558687acbSDon Zickus .size = sizeof(struct perf_event_attr), 30658687acbSDon Zickus .pinned = 1, 30758687acbSDon Zickus .disabled = 1, 30858687acbSDon Zickus }; 30958687acbSDon Zickus 31058687acbSDon Zickus /* Callback function for perf event subsystem */ 311a8b0ca17SPeter Zijlstra static void watchdog_overflow_callback(struct perf_event *event, 31258687acbSDon Zickus struct perf_sample_data *data, 31358687acbSDon Zickus struct pt_regs *regs) 31458687acbSDon Zickus { 315c6db67cdSPeter Zijlstra /* Ensure the watchdog never gets throttled */ 316c6db67cdSPeter Zijlstra event->hw.interrupts = 0; 317c6db67cdSPeter Zijlstra 318909ea964SChristoph Lameter if (__this_cpu_read(watchdog_nmi_touch) == true) { 319909ea964SChristoph Lameter __this_cpu_write(watchdog_nmi_touch, false); 32058687acbSDon Zickus return; 32158687acbSDon Zickus } 32258687acbSDon Zickus 32358687acbSDon Zickus /* check for a hardlockup 32458687acbSDon Zickus * This is done by making sure our timer interrupt 32558687acbSDon Zickus * is incrementing. The timer interrupt should have 32658687acbSDon Zickus * fired multiple times before we overflow'd. If it hasn't 32758687acbSDon Zickus * then this is a good indication the cpu is stuck 32858687acbSDon Zickus */ 32926e09c6eSDon Zickus if (is_hardlockup()) { 33026e09c6eSDon Zickus int this_cpu = smp_processor_id(); 33155537871SJiri Kosina struct pt_regs *regs = get_irq_regs(); 33226e09c6eSDon Zickus 33358687acbSDon Zickus /* only print hardlockups once */ 334909ea964SChristoph Lameter if (__this_cpu_read(hard_watchdog_warn) == true) 33558687acbSDon Zickus return; 33658687acbSDon Zickus 33755537871SJiri Kosina pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); 33855537871SJiri Kosina print_modules(); 33955537871SJiri Kosina print_irqtrace_events(current); 34055537871SJiri Kosina if (regs) 34155537871SJiri Kosina show_regs(regs); 34258687acbSDon Zickus else 34355537871SJiri Kosina dump_stack(); 34455537871SJiri Kosina 34555537871SJiri Kosina /* 34655537871SJiri Kosina * Perform all-CPU dump only once to avoid multiple hardlockups 34755537871SJiri Kosina * generating interleaving traces 34855537871SJiri Kosina */ 34955537871SJiri Kosina if (sysctl_hardlockup_all_cpu_backtrace && 35055537871SJiri Kosina !test_and_set_bit(0, &hardlockup_allcpu_dumped)) 35155537871SJiri Kosina trigger_allbutself_cpu_backtrace(); 35255537871SJiri Kosina 35355537871SJiri Kosina if (hardlockup_panic) 35455537871SJiri Kosina panic("Hard LOCKUP"); 35558687acbSDon Zickus 356909ea964SChristoph Lameter __this_cpu_write(hard_watchdog_warn, true); 35758687acbSDon Zickus return; 35858687acbSDon Zickus } 35958687acbSDon Zickus 360909ea964SChristoph Lameter __this_cpu_write(hard_watchdog_warn, false); 36158687acbSDon Zickus return; 36258687acbSDon Zickus } 363bcd951cfSThomas Gleixner #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 364bcd951cfSThomas Gleixner 36558687acbSDon Zickus static void watchdog_interrupt_count(void) 36658687acbSDon Zickus { 367909ea964SChristoph Lameter __this_cpu_inc(hrtimer_interrupts); 36858687acbSDon Zickus } 369bcd951cfSThomas Gleixner 370bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu); 371bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu); 37258687acbSDon Zickus 37358cf690aSUlrich Obergfell static int watchdog_enable_all_cpus(void); 37458cf690aSUlrich Obergfell static void watchdog_disable_all_cpus(void); 37558cf690aSUlrich Obergfell 37658687acbSDon Zickus /* watchdog kicker functions */ 37758687acbSDon Zickus static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 37858687acbSDon Zickus { 379909ea964SChristoph Lameter unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); 38058687acbSDon Zickus struct pt_regs *regs = get_irq_regs(); 38158687acbSDon Zickus int duration; 382ed235875SAaron Tomlin int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 38358687acbSDon Zickus 38458687acbSDon Zickus /* kick the hardlockup detector */ 38558687acbSDon Zickus watchdog_interrupt_count(); 38658687acbSDon Zickus 38758687acbSDon Zickus /* kick the softlockup detector */ 388909ea964SChristoph Lameter wake_up_process(__this_cpu_read(softlockup_watchdog)); 38958687acbSDon Zickus 39058687acbSDon Zickus /* .. and repeat */ 3910f34c400SChuansheng Liu hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); 39258687acbSDon Zickus 39358687acbSDon Zickus if (touch_ts == 0) { 394909ea964SChristoph Lameter if (unlikely(__this_cpu_read(softlockup_touch_sync))) { 39558687acbSDon Zickus /* 39658687acbSDon Zickus * If the time stamp was touched atomically 39758687acbSDon Zickus * make sure the scheduler tick is up to date. 39858687acbSDon Zickus */ 399909ea964SChristoph Lameter __this_cpu_write(softlockup_touch_sync, false); 40058687acbSDon Zickus sched_clock_tick(); 40158687acbSDon Zickus } 4025d1c0f4aSEric B Munson 4035d1c0f4aSEric B Munson /* Clear the guest paused flag on watchdog reset */ 4045d1c0f4aSEric B Munson kvm_check_and_clear_guest_paused(); 40558687acbSDon Zickus __touch_watchdog(); 40658687acbSDon Zickus return HRTIMER_RESTART; 40758687acbSDon Zickus } 40858687acbSDon Zickus 40958687acbSDon Zickus /* check for a softlockup 41058687acbSDon Zickus * This is done by making sure a high priority task is 41158687acbSDon Zickus * being scheduled. The task touches the watchdog to 41258687acbSDon Zickus * indicate it is getting cpu time. If it hasn't then 41358687acbSDon Zickus * this is a good indication some task is hogging the cpu 41458687acbSDon Zickus */ 41526e09c6eSDon Zickus duration = is_softlockup(touch_ts); 41658687acbSDon Zickus if (unlikely(duration)) { 4175d1c0f4aSEric B Munson /* 4185d1c0f4aSEric B Munson * If a virtual machine is stopped by the host it can look to 4195d1c0f4aSEric B Munson * the watchdog like a soft lockup, check to see if the host 4205d1c0f4aSEric B Munson * stopped the vm before we issue the warning 4215d1c0f4aSEric B Munson */ 4225d1c0f4aSEric B Munson if (kvm_check_and_clear_guest_paused()) 4235d1c0f4aSEric B Munson return HRTIMER_RESTART; 4245d1c0f4aSEric B Munson 42558687acbSDon Zickus /* only warn once */ 426b1a8de1fSchai wen if (__this_cpu_read(soft_watchdog_warn) == true) { 427b1a8de1fSchai wen /* 428b1a8de1fSchai wen * When multiple processes are causing softlockups the 429b1a8de1fSchai wen * softlockup detector only warns on the first one 430b1a8de1fSchai wen * because the code relies on a full quiet cycle to 431b1a8de1fSchai wen * re-arm. The second process prevents the quiet cycle 432b1a8de1fSchai wen * and never gets reported. Use task pointers to detect 433b1a8de1fSchai wen * this. 434b1a8de1fSchai wen */ 435b1a8de1fSchai wen if (__this_cpu_read(softlockup_task_ptr_saved) != 436b1a8de1fSchai wen current) { 437b1a8de1fSchai wen __this_cpu_write(soft_watchdog_warn, false); 438b1a8de1fSchai wen __touch_watchdog(); 439b1a8de1fSchai wen } 44058687acbSDon Zickus return HRTIMER_RESTART; 441b1a8de1fSchai wen } 44258687acbSDon Zickus 443ed235875SAaron Tomlin if (softlockup_all_cpu_backtrace) { 444ed235875SAaron Tomlin /* Prevent multiple soft-lockup reports if one cpu is already 445ed235875SAaron Tomlin * engaged in dumping cpu back traces 446ed235875SAaron Tomlin */ 447ed235875SAaron Tomlin if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { 448ed235875SAaron Tomlin /* Someone else will report us. Let's give up */ 449ed235875SAaron Tomlin __this_cpu_write(soft_watchdog_warn, true); 450ed235875SAaron Tomlin return HRTIMER_RESTART; 451ed235875SAaron Tomlin } 452ed235875SAaron Tomlin } 453ed235875SAaron Tomlin 454656c3b79SFabian Frederick pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 45526e09c6eSDon Zickus smp_processor_id(), duration, 45658687acbSDon Zickus current->comm, task_pid_nr(current)); 457b1a8de1fSchai wen __this_cpu_write(softlockup_task_ptr_saved, current); 45858687acbSDon Zickus print_modules(); 45958687acbSDon Zickus print_irqtrace_events(current); 46058687acbSDon Zickus if (regs) 46158687acbSDon Zickus show_regs(regs); 46258687acbSDon Zickus else 46358687acbSDon Zickus dump_stack(); 46458687acbSDon Zickus 465ed235875SAaron Tomlin if (softlockup_all_cpu_backtrace) { 466ed235875SAaron Tomlin /* Avoid generating two back traces for current 467ed235875SAaron Tomlin * given that one is already made above 468ed235875SAaron Tomlin */ 469ed235875SAaron Tomlin trigger_allbutself_cpu_backtrace(); 470ed235875SAaron Tomlin 471ed235875SAaron Tomlin clear_bit(0, &soft_lockup_nmi_warn); 472ed235875SAaron Tomlin /* Barrier to sync with other cpus */ 473ed235875SAaron Tomlin smp_mb__after_atomic(); 474ed235875SAaron Tomlin } 475ed235875SAaron Tomlin 47669361eefSJosh Hunt add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 47758687acbSDon Zickus if (softlockup_panic) 47858687acbSDon Zickus panic("softlockup: hung tasks"); 479909ea964SChristoph Lameter __this_cpu_write(soft_watchdog_warn, true); 48058687acbSDon Zickus } else 481909ea964SChristoph Lameter __this_cpu_write(soft_watchdog_warn, false); 48258687acbSDon Zickus 48358687acbSDon Zickus return HRTIMER_RESTART; 48458687acbSDon Zickus } 48558687acbSDon Zickus 486bcd951cfSThomas Gleixner static void watchdog_set_prio(unsigned int policy, unsigned int prio) 48758687acbSDon Zickus { 488bcd951cfSThomas Gleixner struct sched_param param = { .sched_priority = prio }; 489bcd951cfSThomas Gleixner 490bcd951cfSThomas Gleixner sched_setscheduler(current, policy, ¶m); 491bcd951cfSThomas Gleixner } 492bcd951cfSThomas Gleixner 493bcd951cfSThomas Gleixner static void watchdog_enable(unsigned int cpu) 494bcd951cfSThomas Gleixner { 495f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 49658687acbSDon Zickus 4973935e895SBjørn Mork /* kick off the timer for the hardlockup detector */ 4983935e895SBjørn Mork hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4993935e895SBjørn Mork hrtimer->function = watchdog_timer_fn; 5003935e895SBjørn Mork 501bcd951cfSThomas Gleixner /* Enable the perf event */ 502bcd951cfSThomas Gleixner watchdog_nmi_enable(cpu); 50358687acbSDon Zickus 50458687acbSDon Zickus /* done here because hrtimer_start can only pin to smp_processor_id() */ 5050f34c400SChuansheng Liu hrtimer_start(hrtimer, ns_to_ktime(sample_period), 50658687acbSDon Zickus HRTIMER_MODE_REL_PINNED); 50758687acbSDon Zickus 508bcd951cfSThomas Gleixner /* initialize timestamp */ 509bcd951cfSThomas Gleixner watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); 51058687acbSDon Zickus __touch_watchdog(); 51158687acbSDon Zickus } 512bcd951cfSThomas Gleixner 513bcd951cfSThomas Gleixner static void watchdog_disable(unsigned int cpu) 514bcd951cfSThomas Gleixner { 515f7f66b05SChristoph Lameter struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 516bcd951cfSThomas Gleixner 517bcd951cfSThomas Gleixner watchdog_set_prio(SCHED_NORMAL, 0); 518bcd951cfSThomas Gleixner hrtimer_cancel(hrtimer); 519bcd951cfSThomas Gleixner /* disable the perf event */ 520bcd951cfSThomas Gleixner watchdog_nmi_disable(cpu); 521bcd951cfSThomas Gleixner } 522bcd951cfSThomas Gleixner 523b8900bc0SFrederic Weisbecker static void watchdog_cleanup(unsigned int cpu, bool online) 524b8900bc0SFrederic Weisbecker { 525b8900bc0SFrederic Weisbecker watchdog_disable(cpu); 526b8900bc0SFrederic Weisbecker } 527b8900bc0SFrederic Weisbecker 528bcd951cfSThomas Gleixner static int watchdog_should_run(unsigned int cpu) 529bcd951cfSThomas Gleixner { 530bcd951cfSThomas Gleixner return __this_cpu_read(hrtimer_interrupts) != 531bcd951cfSThomas Gleixner __this_cpu_read(soft_lockup_hrtimer_cnt); 532bcd951cfSThomas Gleixner } 533bcd951cfSThomas Gleixner 534b60f796cSAndrew Morton /* 535bcd951cfSThomas Gleixner * The watchdog thread function - touches the timestamp. 536bcd951cfSThomas Gleixner * 5370f34c400SChuansheng Liu * It only runs once every sample_period seconds (4 seconds by 538bcd951cfSThomas Gleixner * default) to reset the softlockup timestamp. If this gets delayed 539bcd951cfSThomas Gleixner * for more than 2*watchdog_thresh seconds then the debug-printout 540bcd951cfSThomas Gleixner * triggers in watchdog_timer_fn(). 541b60f796cSAndrew Morton */ 542bcd951cfSThomas Gleixner static void watchdog(unsigned int cpu) 543bcd951cfSThomas Gleixner { 544bcd951cfSThomas Gleixner __this_cpu_write(soft_lockup_hrtimer_cnt, 545bcd951cfSThomas Gleixner __this_cpu_read(hrtimer_interrupts)); 546bcd951cfSThomas Gleixner __touch_watchdog(); 547bcfba4f4SUlrich Obergfell 548bcfba4f4SUlrich Obergfell /* 549bcfba4f4SUlrich Obergfell * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the 550bcfba4f4SUlrich Obergfell * failure path. Check for failures that can occur asynchronously - 551bcfba4f4SUlrich Obergfell * for example, when CPUs are on-lined - and shut down the hardware 552bcfba4f4SUlrich Obergfell * perf event on each CPU accordingly. 553bcfba4f4SUlrich Obergfell * 554bcfba4f4SUlrich Obergfell * The only non-obvious place this bit can be cleared is through 555bcfba4f4SUlrich Obergfell * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a 556bcfba4f4SUlrich Obergfell * pr_info here would be too noisy as it would result in a message 557bcfba4f4SUlrich Obergfell * every few seconds if the hardlockup was disabled but the softlockup 558bcfba4f4SUlrich Obergfell * enabled. 559bcfba4f4SUlrich Obergfell */ 560bcfba4f4SUlrich Obergfell if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) 561bcfba4f4SUlrich Obergfell watchdog_nmi_disable(cpu); 56258687acbSDon Zickus } 56358687acbSDon Zickus 56423637d47SFrederic Weisbecker #ifdef CONFIG_HARDLOCKUP_DETECTOR 565a7027046SDon Zickus /* 566a7027046SDon Zickus * People like the simple clean cpu node info on boot. 567a7027046SDon Zickus * Reduce the watchdog noise by only printing messages 568a7027046SDon Zickus * that are different from what cpu0 displayed. 569a7027046SDon Zickus */ 570a7027046SDon Zickus static unsigned long cpu0_err; 571a7027046SDon Zickus 572bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu) 57358687acbSDon Zickus { 57458687acbSDon Zickus struct perf_event_attr *wd_attr; 57558687acbSDon Zickus struct perf_event *event = per_cpu(watchdog_ev, cpu); 57658687acbSDon Zickus 577195daf66SUlrich Obergfell /* nothing to do if the hard lockup detector is disabled */ 578195daf66SUlrich Obergfell if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) 579195daf66SUlrich Obergfell goto out; 5806e7458a6SUlrich Obergfell 58158687acbSDon Zickus /* is it already setup and enabled? */ 58258687acbSDon Zickus if (event && event->state > PERF_EVENT_STATE_OFF) 58358687acbSDon Zickus goto out; 58458687acbSDon Zickus 58558687acbSDon Zickus /* it is setup but not enabled */ 58658687acbSDon Zickus if (event != NULL) 58758687acbSDon Zickus goto out_enable; 58858687acbSDon Zickus 58958687acbSDon Zickus wd_attr = &wd_hw_attr; 5904eec42f3SMandeep Singh Baines wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); 5911880c4aeSCyrill Gorcunov 5921880c4aeSCyrill Gorcunov /* Try to register using hardware perf events */ 5934dc0da86SAvi Kivity event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 594a7027046SDon Zickus 595a7027046SDon Zickus /* save cpu0 error for future comparision */ 596a7027046SDon Zickus if (cpu == 0 && IS_ERR(event)) 597a7027046SDon Zickus cpu0_err = PTR_ERR(event); 598a7027046SDon Zickus 59958687acbSDon Zickus if (!IS_ERR(event)) { 600a7027046SDon Zickus /* only print for cpu0 or different than cpu0 */ 601a7027046SDon Zickus if (cpu == 0 || cpu0_err) 602a7027046SDon Zickus pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); 60358687acbSDon Zickus goto out_save; 60458687acbSDon Zickus } 60558687acbSDon Zickus 606bcfba4f4SUlrich Obergfell /* 607bcfba4f4SUlrich Obergfell * Disable the hard lockup detector if _any_ CPU fails to set up 608bcfba4f4SUlrich Obergfell * set up the hardware perf event. The watchdog() function checks 609bcfba4f4SUlrich Obergfell * the NMI_WATCHDOG_ENABLED bit periodically. 610bcfba4f4SUlrich Obergfell * 611bcfba4f4SUlrich Obergfell * The barriers are for syncing up watchdog_enabled across all the 612bcfba4f4SUlrich Obergfell * cpus, as clear_bit() does not use barriers. 613bcfba4f4SUlrich Obergfell */ 614bcfba4f4SUlrich Obergfell smp_mb__before_atomic(); 615bcfba4f4SUlrich Obergfell clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); 616bcfba4f4SUlrich Obergfell smp_mb__after_atomic(); 617bcfba4f4SUlrich Obergfell 618a7027046SDon Zickus /* skip displaying the same error again */ 619a7027046SDon Zickus if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) 620a7027046SDon Zickus return PTR_ERR(event); 6215651f7f4SDon Zickus 6225651f7f4SDon Zickus /* vary the KERN level based on the returned errno */ 6235651f7f4SDon Zickus if (PTR_ERR(event) == -EOPNOTSUPP) 6244501980aSAndrew Morton pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); 6255651f7f4SDon Zickus else if (PTR_ERR(event) == -ENOENT) 626656c3b79SFabian Frederick pr_warn("disabled (cpu%i): hardware events not enabled\n", 6274501980aSAndrew Morton cpu); 6285651f7f4SDon Zickus else 6294501980aSAndrew Morton pr_err("disabled (cpu%i): unable to create perf event: %ld\n", 6304501980aSAndrew Morton cpu, PTR_ERR(event)); 631bcfba4f4SUlrich Obergfell 632bcfba4f4SUlrich Obergfell pr_info("Shutting down hard lockup detector on all cpus\n"); 633bcfba4f4SUlrich Obergfell 634eac24335SAkinobu Mita return PTR_ERR(event); 63558687acbSDon Zickus 63658687acbSDon Zickus /* success path */ 63758687acbSDon Zickus out_save: 63858687acbSDon Zickus per_cpu(watchdog_ev, cpu) = event; 63958687acbSDon Zickus out_enable: 64058687acbSDon Zickus perf_event_enable(per_cpu(watchdog_ev, cpu)); 64158687acbSDon Zickus out: 64258687acbSDon Zickus return 0; 64358687acbSDon Zickus } 64458687acbSDon Zickus 645bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu) 64658687acbSDon Zickus { 64758687acbSDon Zickus struct perf_event *event = per_cpu(watchdog_ev, cpu); 64858687acbSDon Zickus 64958687acbSDon Zickus if (event) { 65058687acbSDon Zickus perf_event_disable(event); 65158687acbSDon Zickus per_cpu(watchdog_ev, cpu) = NULL; 65258687acbSDon Zickus 65358687acbSDon Zickus /* should be in cleanup, but blocks oprofile */ 65458687acbSDon Zickus perf_event_release_kernel(event); 65558687acbSDon Zickus } 656df577149SUlrich Obergfell if (cpu == 0) { 657df577149SUlrich Obergfell /* watchdog_nmi_enable() expects this to be zero initially. */ 658df577149SUlrich Obergfell cpu0_err = 0; 659df577149SUlrich Obergfell } 66058687acbSDon Zickus } 661b3738d29SStephane Eranian 66258687acbSDon Zickus #else 663bcd951cfSThomas Gleixner static int watchdog_nmi_enable(unsigned int cpu) { return 0; } 664bcd951cfSThomas Gleixner static void watchdog_nmi_disable(unsigned int cpu) { return; } 66523637d47SFrederic Weisbecker #endif /* CONFIG_HARDLOCKUP_DETECTOR */ 66658687acbSDon Zickus 667b8900bc0SFrederic Weisbecker static struct smp_hotplug_thread watchdog_threads = { 668b8900bc0SFrederic Weisbecker .store = &softlockup_watchdog, 669b8900bc0SFrederic Weisbecker .thread_should_run = watchdog_should_run, 670b8900bc0SFrederic Weisbecker .thread_fn = watchdog, 671b8900bc0SFrederic Weisbecker .thread_comm = "watchdog/%u", 672b8900bc0SFrederic Weisbecker .setup = watchdog_enable, 673b8900bc0SFrederic Weisbecker .cleanup = watchdog_cleanup, 674b8900bc0SFrederic Weisbecker .park = watchdog_disable, 675b8900bc0SFrederic Weisbecker .unpark = watchdog_enable, 676b8900bc0SFrederic Weisbecker }; 677b8900bc0SFrederic Weisbecker 67881a4beefSUlrich Obergfell /* 67981a4beefSUlrich Obergfell * park all watchdog threads that are specified in 'watchdog_cpumask' 680ee7fed54SUlrich Obergfell * 681ee7fed54SUlrich Obergfell * This function returns an error if kthread_park() of a watchdog thread 682ee7fed54SUlrich Obergfell * fails. In this situation, the watchdog threads of some CPUs can already 683ee7fed54SUlrich Obergfell * be parked and the watchdog threads of other CPUs can still be runnable. 684ee7fed54SUlrich Obergfell * Callers are expected to handle this special condition as appropriate in 685ee7fed54SUlrich Obergfell * their context. 68681a4beefSUlrich Obergfell */ 68781a4beefSUlrich Obergfell static int watchdog_park_threads(void) 68881a4beefSUlrich Obergfell { 68981a4beefSUlrich Obergfell int cpu, ret = 0; 69081a4beefSUlrich Obergfell 69181a4beefSUlrich Obergfell get_online_cpus(); 69281a4beefSUlrich Obergfell for_each_watchdog_cpu(cpu) { 69381a4beefSUlrich Obergfell ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); 69481a4beefSUlrich Obergfell if (ret) 69581a4beefSUlrich Obergfell break; 69681a4beefSUlrich Obergfell } 69781a4beefSUlrich Obergfell put_online_cpus(); 69881a4beefSUlrich Obergfell 69981a4beefSUlrich Obergfell return ret; 70081a4beefSUlrich Obergfell } 70181a4beefSUlrich Obergfell 70281a4beefSUlrich Obergfell /* 70381a4beefSUlrich Obergfell * unpark all watchdog threads that are specified in 'watchdog_cpumask' 70481a4beefSUlrich Obergfell */ 70581a4beefSUlrich Obergfell static void watchdog_unpark_threads(void) 70681a4beefSUlrich Obergfell { 70781a4beefSUlrich Obergfell int cpu; 70881a4beefSUlrich Obergfell 70981a4beefSUlrich Obergfell get_online_cpus(); 71081a4beefSUlrich Obergfell for_each_watchdog_cpu(cpu) 71181a4beefSUlrich Obergfell kthread_unpark(per_cpu(softlockup_watchdog, cpu)); 71281a4beefSUlrich Obergfell put_online_cpus(); 71381a4beefSUlrich Obergfell } 71481a4beefSUlrich Obergfell 7158c073d27SUlrich Obergfell /* 7168c073d27SUlrich Obergfell * Suspend the hard and soft lockup detector by parking the watchdog threads. 7178c073d27SUlrich Obergfell */ 718ec6a9066SUlrich Obergfell int lockup_detector_suspend(void) 7198c073d27SUlrich Obergfell { 7208c073d27SUlrich Obergfell int ret = 0; 7218c073d27SUlrich Obergfell 722ee89e71eSUlrich Obergfell get_online_cpus(); 7238c073d27SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 7248c073d27SUlrich Obergfell /* 7258c073d27SUlrich Obergfell * Multiple suspend requests can be active in parallel (counted by 7268c073d27SUlrich Obergfell * the 'watchdog_suspended' variable). If the watchdog threads are 7278c073d27SUlrich Obergfell * running, the first caller takes care that they will be parked. 7288c073d27SUlrich Obergfell * The state of 'watchdog_running' cannot change while a suspend 729ec6a9066SUlrich Obergfell * request is active (see related code in 'proc' handlers). 7308c073d27SUlrich Obergfell */ 7318c073d27SUlrich Obergfell if (watchdog_running && !watchdog_suspended) 7328c073d27SUlrich Obergfell ret = watchdog_park_threads(); 7338c073d27SUlrich Obergfell 7348c073d27SUlrich Obergfell if (ret == 0) 7358c073d27SUlrich Obergfell watchdog_suspended++; 736c993590cSUlrich Obergfell else { 737c993590cSUlrich Obergfell watchdog_disable_all_cpus(); 738c993590cSUlrich Obergfell pr_err("Failed to suspend lockup detectors, disabled\n"); 739c993590cSUlrich Obergfell watchdog_enabled = 0; 740c993590cSUlrich Obergfell } 7418c073d27SUlrich Obergfell 7428c073d27SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 7438c073d27SUlrich Obergfell 7448c073d27SUlrich Obergfell return ret; 7458c073d27SUlrich Obergfell } 7468c073d27SUlrich Obergfell 7478c073d27SUlrich Obergfell /* 7488c073d27SUlrich Obergfell * Resume the hard and soft lockup detector by unparking the watchdog threads. 7498c073d27SUlrich Obergfell */ 750ec6a9066SUlrich Obergfell void lockup_detector_resume(void) 7518c073d27SUlrich Obergfell { 7528c073d27SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 7538c073d27SUlrich Obergfell 7548c073d27SUlrich Obergfell watchdog_suspended--; 7558c073d27SUlrich Obergfell /* 7568c073d27SUlrich Obergfell * The watchdog threads are unparked if they were previously running 7578c073d27SUlrich Obergfell * and if there is no more active suspend request. 7588c073d27SUlrich Obergfell */ 7598c073d27SUlrich Obergfell if (watchdog_running && !watchdog_suspended) 7608c073d27SUlrich Obergfell watchdog_unpark_threads(); 7618c073d27SUlrich Obergfell 7628c073d27SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 763ee89e71eSUlrich Obergfell put_online_cpus(); 7648c073d27SUlrich Obergfell } 7658c073d27SUlrich Obergfell 766b43cb43cSUlrich Obergfell static int update_watchdog_all_cpus(void) 7679809b18fSMichal Hocko { 768b43cb43cSUlrich Obergfell int ret; 769b43cb43cSUlrich Obergfell 770b43cb43cSUlrich Obergfell ret = watchdog_park_threads(); 771b43cb43cSUlrich Obergfell if (ret) 772b43cb43cSUlrich Obergfell return ret; 773b43cb43cSUlrich Obergfell 774d4bdd0b2SUlrich Obergfell watchdog_unpark_threads(); 775b43cb43cSUlrich Obergfell 776b43cb43cSUlrich Obergfell return 0; 7779809b18fSMichal Hocko } 7789809b18fSMichal Hocko 779b2f57c3aSUlrich Obergfell static int watchdog_enable_all_cpus(void) 780b8900bc0SFrederic Weisbecker { 781b8900bc0SFrederic Weisbecker int err = 0; 782b8900bc0SFrederic Weisbecker 7833c00ea82SFrederic Weisbecker if (!watchdog_running) { 784230ec939SFrederic Weisbecker err = smpboot_register_percpu_thread_cpumask(&watchdog_threads, 785230ec939SFrederic Weisbecker &watchdog_cpumask); 786b8900bc0SFrederic Weisbecker if (err) 787b8900bc0SFrederic Weisbecker pr_err("Failed to create watchdog threads, disabled\n"); 788230ec939SFrederic Weisbecker else 7893c00ea82SFrederic Weisbecker watchdog_running = 1; 790b2f57c3aSUlrich Obergfell } else { 791b2f57c3aSUlrich Obergfell /* 792b2f57c3aSUlrich Obergfell * Enable/disable the lockup detectors or 793b2f57c3aSUlrich Obergfell * change the sample period 'on the fly'. 794b2f57c3aSUlrich Obergfell */ 795b43cb43cSUlrich Obergfell err = update_watchdog_all_cpus(); 796b43cb43cSUlrich Obergfell 797b43cb43cSUlrich Obergfell if (err) { 798b43cb43cSUlrich Obergfell watchdog_disable_all_cpus(); 799b43cb43cSUlrich Obergfell pr_err("Failed to update lockup detectors, disabled\n"); 800b8900bc0SFrederic Weisbecker } 801b43cb43cSUlrich Obergfell } 802b43cb43cSUlrich Obergfell 803b43cb43cSUlrich Obergfell if (err) 804b43cb43cSUlrich Obergfell watchdog_enabled = 0; 805b8900bc0SFrederic Weisbecker 806b8900bc0SFrederic Weisbecker return err; 807b8900bc0SFrederic Weisbecker } 808b8900bc0SFrederic Weisbecker 80958687acbSDon Zickus static void watchdog_disable_all_cpus(void) 81058687acbSDon Zickus { 8113c00ea82SFrederic Weisbecker if (watchdog_running) { 8123c00ea82SFrederic Weisbecker watchdog_running = 0; 813b8900bc0SFrederic Weisbecker smpboot_unregister_percpu_thread(&watchdog_threads); 81458687acbSDon Zickus } 815bcd951cfSThomas Gleixner } 81658687acbSDon Zickus 81758cf690aSUlrich Obergfell #ifdef CONFIG_SYSCTL 81858cf690aSUlrich Obergfell 81958687acbSDon Zickus /* 820a0c9cbb9SUlrich Obergfell * Update the run state of the lockup detectors. 82158687acbSDon Zickus */ 822a0c9cbb9SUlrich Obergfell static int proc_watchdog_update(void) 82358687acbSDon Zickus { 824a0c9cbb9SUlrich Obergfell int err = 0; 825a0c9cbb9SUlrich Obergfell 826a0c9cbb9SUlrich Obergfell /* 827a0c9cbb9SUlrich Obergfell * Watchdog threads won't be started if they are already active. 828a0c9cbb9SUlrich Obergfell * The 'watchdog_running' variable in watchdog_*_all_cpus() takes 829a0c9cbb9SUlrich Obergfell * care of this. If those threads are already active, the sample 830a0c9cbb9SUlrich Obergfell * period will be updated and the lockup detectors will be enabled 831a0c9cbb9SUlrich Obergfell * or disabled 'on the fly'. 832a0c9cbb9SUlrich Obergfell */ 833a0c9cbb9SUlrich Obergfell if (watchdog_enabled && watchdog_thresh) 834b2f57c3aSUlrich Obergfell err = watchdog_enable_all_cpus(); 835a0c9cbb9SUlrich Obergfell else 836a0c9cbb9SUlrich Obergfell watchdog_disable_all_cpus(); 837a0c9cbb9SUlrich Obergfell 838a0c9cbb9SUlrich Obergfell return err; 839a0c9cbb9SUlrich Obergfell 840a0c9cbb9SUlrich Obergfell } 841a0c9cbb9SUlrich Obergfell 842a0c9cbb9SUlrich Obergfell /* 843ef246a21SUlrich Obergfell * common function for watchdog, nmi_watchdog and soft_watchdog parameter 844ef246a21SUlrich Obergfell * 845ef246a21SUlrich Obergfell * caller | table->data points to | 'which' contains the flag(s) 846ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 847ef246a21SUlrich Obergfell * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed 848ef246a21SUlrich Obergfell * | | with SOFT_WATCHDOG_ENABLED 849ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 850ef246a21SUlrich Obergfell * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED 851ef246a21SUlrich Obergfell * -------------------|-----------------------|----------------------------- 852ef246a21SUlrich Obergfell * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED 853ef246a21SUlrich Obergfell */ 854ef246a21SUlrich Obergfell static int proc_watchdog_common(int which, struct ctl_table *table, int write, 855ef246a21SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 856ef246a21SUlrich Obergfell { 857ef246a21SUlrich Obergfell int err, old, new; 858ef246a21SUlrich Obergfell int *watchdog_param = (int *)table->data; 859bcd951cfSThomas Gleixner 8608614ddefSUlrich Obergfell get_online_cpus(); 861ef246a21SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 862ef246a21SUlrich Obergfell 8638c073d27SUlrich Obergfell if (watchdog_suspended) { 8648c073d27SUlrich Obergfell /* no parameter changes allowed while watchdog is suspended */ 8658c073d27SUlrich Obergfell err = -EAGAIN; 8668c073d27SUlrich Obergfell goto out; 8678c073d27SUlrich Obergfell } 8688c073d27SUlrich Obergfell 869ef246a21SUlrich Obergfell /* 870ef246a21SUlrich Obergfell * If the parameter is being read return the state of the corresponding 871ef246a21SUlrich Obergfell * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the 872ef246a21SUlrich Obergfell * run state of the lockup detectors. 873ef246a21SUlrich Obergfell */ 874ef246a21SUlrich Obergfell if (!write) { 875ef246a21SUlrich Obergfell *watchdog_param = (watchdog_enabled & which) != 0; 876b8900bc0SFrederic Weisbecker err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 877ef246a21SUlrich Obergfell } else { 878ef246a21SUlrich Obergfell err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 879ef246a21SUlrich Obergfell if (err) 880ef246a21SUlrich Obergfell goto out; 881ef246a21SUlrich Obergfell 882ef246a21SUlrich Obergfell /* 883ef246a21SUlrich Obergfell * There is a race window between fetching the current value 884ef246a21SUlrich Obergfell * from 'watchdog_enabled' and storing the new value. During 885ef246a21SUlrich Obergfell * this race window, watchdog_nmi_enable() can sneak in and 886ef246a21SUlrich Obergfell * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'. 887ef246a21SUlrich Obergfell * The 'cmpxchg' detects this race and the loop retries. 888ef246a21SUlrich Obergfell */ 889ef246a21SUlrich Obergfell do { 890ef246a21SUlrich Obergfell old = watchdog_enabled; 891ef246a21SUlrich Obergfell /* 892ef246a21SUlrich Obergfell * If the parameter value is not zero set the 893ef246a21SUlrich Obergfell * corresponding bit(s), else clear it(them). 894ef246a21SUlrich Obergfell */ 895ef246a21SUlrich Obergfell if (*watchdog_param) 896ef246a21SUlrich Obergfell new = old | which; 897ef246a21SUlrich Obergfell else 898ef246a21SUlrich Obergfell new = old & ~which; 899ef246a21SUlrich Obergfell } while (cmpxchg(&watchdog_enabled, old, new) != old); 900ef246a21SUlrich Obergfell 901ef246a21SUlrich Obergfell /* 902b43cb43cSUlrich Obergfell * Update the run state of the lockup detectors. There is _no_ 903b43cb43cSUlrich Obergfell * need to check the value returned by proc_watchdog_update() 904b43cb43cSUlrich Obergfell * and to restore the previous value of 'watchdog_enabled' as 905b43cb43cSUlrich Obergfell * both lockup detectors are disabled if proc_watchdog_update() 906b43cb43cSUlrich Obergfell * returns an error. 907ef246a21SUlrich Obergfell */ 908ef246a21SUlrich Obergfell err = proc_watchdog_update(); 909ef246a21SUlrich Obergfell } 910ef246a21SUlrich Obergfell out: 911ef246a21SUlrich Obergfell mutex_unlock(&watchdog_proc_mutex); 9128614ddefSUlrich Obergfell put_online_cpus(); 913ef246a21SUlrich Obergfell return err; 914ef246a21SUlrich Obergfell } 915ef246a21SUlrich Obergfell 916ef246a21SUlrich Obergfell /* 91783a80a39SUlrich Obergfell * /proc/sys/kernel/watchdog 91883a80a39SUlrich Obergfell */ 91983a80a39SUlrich Obergfell int proc_watchdog(struct ctl_table *table, int write, 92083a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 92183a80a39SUlrich Obergfell { 92283a80a39SUlrich Obergfell return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, 92383a80a39SUlrich Obergfell table, write, buffer, lenp, ppos); 92483a80a39SUlrich Obergfell } 92583a80a39SUlrich Obergfell 92683a80a39SUlrich Obergfell /* 92783a80a39SUlrich Obergfell * /proc/sys/kernel/nmi_watchdog 92883a80a39SUlrich Obergfell */ 92983a80a39SUlrich Obergfell int proc_nmi_watchdog(struct ctl_table *table, int write, 93083a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 93183a80a39SUlrich Obergfell { 93283a80a39SUlrich Obergfell return proc_watchdog_common(NMI_WATCHDOG_ENABLED, 93383a80a39SUlrich Obergfell table, write, buffer, lenp, ppos); 93483a80a39SUlrich Obergfell } 93583a80a39SUlrich Obergfell 93683a80a39SUlrich Obergfell /* 93783a80a39SUlrich Obergfell * /proc/sys/kernel/soft_watchdog 93883a80a39SUlrich Obergfell */ 93983a80a39SUlrich Obergfell int proc_soft_watchdog(struct ctl_table *table, int write, 94083a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 94183a80a39SUlrich Obergfell { 94283a80a39SUlrich Obergfell return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, 94383a80a39SUlrich Obergfell table, write, buffer, lenp, ppos); 94483a80a39SUlrich Obergfell } 94583a80a39SUlrich Obergfell 94683a80a39SUlrich Obergfell /* 94783a80a39SUlrich Obergfell * /proc/sys/kernel/watchdog_thresh 94883a80a39SUlrich Obergfell */ 94983a80a39SUlrich Obergfell int proc_watchdog_thresh(struct ctl_table *table, int write, 95083a80a39SUlrich Obergfell void __user *buffer, size_t *lenp, loff_t *ppos) 95183a80a39SUlrich Obergfell { 95283a80a39SUlrich Obergfell int err, old; 95383a80a39SUlrich Obergfell 9548614ddefSUlrich Obergfell get_online_cpus(); 95583a80a39SUlrich Obergfell mutex_lock(&watchdog_proc_mutex); 95683a80a39SUlrich Obergfell 9578c073d27SUlrich Obergfell if (watchdog_suspended) { 9588c073d27SUlrich Obergfell /* no parameter changes allowed while watchdog is suspended */ 9598c073d27SUlrich Obergfell err = -EAGAIN; 9608c073d27SUlrich Obergfell goto out; 9618c073d27SUlrich Obergfell } 9628c073d27SUlrich Obergfell 96383a80a39SUlrich Obergfell old = ACCESS_ONCE(watchdog_thresh); 96483a80a39SUlrich Obergfell err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 96583a80a39SUlrich Obergfell 966b8900bc0SFrederic Weisbecker if (err || !write) 967359e6fabSMichal Hocko goto out; 968e04ab2bcSMandeep Singh Baines 96983a80a39SUlrich Obergfell /* 970d283c640SUlrich Obergfell * Update the sample period. Restore on failure. 97183a80a39SUlrich Obergfell */ 9720f34c400SChuansheng Liu set_sample_period(); 97383a80a39SUlrich Obergfell err = proc_watchdog_update(); 974d283c640SUlrich Obergfell if (err) { 97583a80a39SUlrich Obergfell watchdog_thresh = old; 976d283c640SUlrich Obergfell set_sample_period(); 977d283c640SUlrich Obergfell } 978359e6fabSMichal Hocko out: 979359e6fabSMichal Hocko mutex_unlock(&watchdog_proc_mutex); 9808614ddefSUlrich Obergfell put_online_cpus(); 981b8900bc0SFrederic Weisbecker return err; 98258687acbSDon Zickus } 983fe4ba3c3SChris Metcalf 984fe4ba3c3SChris Metcalf /* 985fe4ba3c3SChris Metcalf * The cpumask is the mask of possible cpus that the watchdog can run 986fe4ba3c3SChris Metcalf * on, not the mask of cpus it is actually running on. This allows the 987fe4ba3c3SChris Metcalf * user to specify a mask that will include cpus that have not yet 988fe4ba3c3SChris Metcalf * been brought online, if desired. 989fe4ba3c3SChris Metcalf */ 990fe4ba3c3SChris Metcalf int proc_watchdog_cpumask(struct ctl_table *table, int write, 991fe4ba3c3SChris Metcalf void __user *buffer, size_t *lenp, loff_t *ppos) 992fe4ba3c3SChris Metcalf { 993fe4ba3c3SChris Metcalf int err; 994fe4ba3c3SChris Metcalf 9958614ddefSUlrich Obergfell get_online_cpus(); 996fe4ba3c3SChris Metcalf mutex_lock(&watchdog_proc_mutex); 9978c073d27SUlrich Obergfell 9988c073d27SUlrich Obergfell if (watchdog_suspended) { 9998c073d27SUlrich Obergfell /* no parameter changes allowed while watchdog is suspended */ 10008c073d27SUlrich Obergfell err = -EAGAIN; 10018c073d27SUlrich Obergfell goto out; 10028c073d27SUlrich Obergfell } 10038c073d27SUlrich Obergfell 1004fe4ba3c3SChris Metcalf err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); 1005fe4ba3c3SChris Metcalf if (!err && write) { 1006fe4ba3c3SChris Metcalf /* Remove impossible cpus to keep sysctl output cleaner. */ 1007fe4ba3c3SChris Metcalf cpumask_and(&watchdog_cpumask, &watchdog_cpumask, 1008fe4ba3c3SChris Metcalf cpu_possible_mask); 1009fe4ba3c3SChris Metcalf 1010fe4ba3c3SChris Metcalf if (watchdog_running) { 1011fe4ba3c3SChris Metcalf /* 1012fe4ba3c3SChris Metcalf * Failure would be due to being unable to allocate 1013fe4ba3c3SChris Metcalf * a temporary cpumask, so we are likely not in a 1014fe4ba3c3SChris Metcalf * position to do much else to make things better. 1015fe4ba3c3SChris Metcalf */ 1016fe4ba3c3SChris Metcalf if (smpboot_update_cpumask_percpu_thread( 1017fe4ba3c3SChris Metcalf &watchdog_threads, &watchdog_cpumask) != 0) 1018fe4ba3c3SChris Metcalf pr_err("cpumask update failed\n"); 1019fe4ba3c3SChris Metcalf } 1020fe4ba3c3SChris Metcalf } 10218c073d27SUlrich Obergfell out: 1022fe4ba3c3SChris Metcalf mutex_unlock(&watchdog_proc_mutex); 10238614ddefSUlrich Obergfell put_online_cpus(); 1024fe4ba3c3SChris Metcalf return err; 1025fe4ba3c3SChris Metcalf } 1026fe4ba3c3SChris Metcalf 102758687acbSDon Zickus #endif /* CONFIG_SYSCTL */ 102858687acbSDon Zickus 1029004417a6SPeter Zijlstra void __init lockup_detector_init(void) 103058687acbSDon Zickus { 10310f34c400SChuansheng Liu set_sample_period(); 1032b8900bc0SFrederic Weisbecker 1033fe4ba3c3SChris Metcalf #ifdef CONFIG_NO_HZ_FULL 1034fe4ba3c3SChris Metcalf if (tick_nohz_full_enabled()) { 1035fe4ba3c3SChris Metcalf pr_info("Disabling watchdog on nohz_full cores by default\n"); 1036314b08ffSFrederic Weisbecker cpumask_copy(&watchdog_cpumask, housekeeping_mask); 1037fe4ba3c3SChris Metcalf } else 1038fe4ba3c3SChris Metcalf cpumask_copy(&watchdog_cpumask, cpu_possible_mask); 1039fe4ba3c3SChris Metcalf #else 1040fe4ba3c3SChris Metcalf cpumask_copy(&watchdog_cpumask, cpu_possible_mask); 1041fe4ba3c3SChris Metcalf #endif 1042fe4ba3c3SChris Metcalf 1043195daf66SUlrich Obergfell if (watchdog_enabled) 1044b2f57c3aSUlrich Obergfell watchdog_enable_all_cpus(); 104558687acbSDon Zickus } 1046