1 /* 2 * linux/include/linux/nmi.h 3 */ 4 #ifndef LINUX_NMI_H 5 #define LINUX_NMI_H 6 7 #include <linux/sched.h> 8 #include <asm/irq.h> 9 #if defined(CONFIG_HAVE_NMI_WATCHDOG) 10 #include <asm/nmi.h> 11 #endif 12 13 #ifdef CONFIG_LOCKUP_DETECTOR 14 void lockup_detector_init(void); 15 #else 16 static inline void lockup_detector_init(void) 17 { 18 } 19 #endif 20 21 #ifdef CONFIG_SOFTLOCKUP_DETECTOR 22 extern void touch_softlockup_watchdog_sched(void); 23 extern void touch_softlockup_watchdog(void); 24 extern void touch_softlockup_watchdog_sync(void); 25 extern void touch_all_softlockup_watchdogs(void); 26 extern unsigned int softlockup_panic; 27 extern int soft_watchdog_enabled; 28 extern atomic_t watchdog_park_in_progress; 29 #else 30 static inline void touch_softlockup_watchdog_sched(void) 31 { 32 } 33 static inline void touch_softlockup_watchdog(void) 34 { 35 } 36 static inline void touch_softlockup_watchdog_sync(void) 37 { 38 } 39 static inline void touch_all_softlockup_watchdogs(void) 40 { 41 } 42 #endif 43 44 #ifdef CONFIG_DETECT_HUNG_TASK 45 void reset_hung_task_detector(void); 46 #else 47 static inline void reset_hung_task_detector(void) 48 { 49 } 50 #endif 51 52 /* 53 * The run state of the lockup detectors is controlled by the content of the 54 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 55 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 56 * 57 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 58 * are variables that are only used as an 'interface' between the parameters 59 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 60 * 'watchdog_thresh' variable is handled differently because its value is not 61 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 62 * is equal zero. 63 */ 64 #define NMI_WATCHDOG_ENABLED_BIT 0 65 #define SOFT_WATCHDOG_ENABLED_BIT 1 66 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) 67 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) 68 69 #if defined(CONFIG_HARDLOCKUP_DETECTOR) 70 extern void hardlockup_detector_disable(void); 71 extern unsigned int hardlockup_panic; 72 #else 73 static inline void hardlockup_detector_disable(void) {} 74 #endif 75 76 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) 77 extern void arch_touch_nmi_watchdog(void); 78 #else 79 #if !defined(CONFIG_HAVE_NMI_WATCHDOG) 80 static inline void arch_touch_nmi_watchdog(void) {} 81 #endif 82 #endif 83 84 /** 85 * touch_nmi_watchdog - restart NMI watchdog timeout. 86 * 87 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 88 * may be used to reset the timeout - for code which intentionally 89 * disables interrupts for a long time. This call is stateless. 90 */ 91 static inline void touch_nmi_watchdog(void) 92 { 93 arch_touch_nmi_watchdog(); 94 touch_softlockup_watchdog(); 95 } 96 97 /* 98 * Create trigger_all_cpu_backtrace() out of the arch-provided 99 * base function. Return whether such support was available, 100 * to allow calling code to fall back to some other mechanism: 101 */ 102 #ifdef arch_trigger_cpumask_backtrace 103 static inline bool trigger_all_cpu_backtrace(void) 104 { 105 arch_trigger_cpumask_backtrace(cpu_online_mask, false); 106 return true; 107 } 108 109 static inline bool trigger_allbutself_cpu_backtrace(void) 110 { 111 arch_trigger_cpumask_backtrace(cpu_online_mask, true); 112 return true; 113 } 114 115 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) 116 { 117 arch_trigger_cpumask_backtrace(mask, false); 118 return true; 119 } 120 121 static inline bool trigger_single_cpu_backtrace(int cpu) 122 { 123 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); 124 return true; 125 } 126 127 /* generic implementation */ 128 void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, 129 bool exclude_self, 130 void (*raise)(cpumask_t *mask)); 131 bool nmi_cpu_backtrace(struct pt_regs *regs); 132 133 #else 134 static inline bool trigger_all_cpu_backtrace(void) 135 { 136 return false; 137 } 138 static inline bool trigger_allbutself_cpu_backtrace(void) 139 { 140 return false; 141 } 142 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) 143 { 144 return false; 145 } 146 static inline bool trigger_single_cpu_backtrace(int cpu) 147 { 148 return false; 149 } 150 #endif 151 152 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 153 u64 hw_nmi_get_sample_period(int watchdog_thresh); 154 #endif 155 156 #ifdef CONFIG_LOCKUP_DETECTOR 157 extern int nmi_watchdog_enabled; 158 extern int watchdog_user_enabled; 159 extern int watchdog_thresh; 160 extern unsigned long watchdog_enabled; 161 extern struct cpumask watchdog_cpumask; 162 extern unsigned long *watchdog_cpumask_bits; 163 extern int __read_mostly watchdog_suspended; 164 #ifdef CONFIG_SMP 165 extern int sysctl_softlockup_all_cpu_backtrace; 166 extern int sysctl_hardlockup_all_cpu_backtrace; 167 #else 168 #define sysctl_softlockup_all_cpu_backtrace 0 169 #define sysctl_hardlockup_all_cpu_backtrace 0 170 #endif 171 172 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ 173 defined(CONFIG_HARDLOCKUP_DETECTOR) 174 void watchdog_update_hrtimer_threshold(u64 period); 175 #else 176 static inline void watchdog_update_hrtimer_threshold(u64 period) { } 177 #endif 178 179 extern bool is_hardlockup(void); 180 struct ctl_table; 181 extern int proc_watchdog(struct ctl_table *, int , 182 void __user *, size_t *, loff_t *); 183 extern int proc_nmi_watchdog(struct ctl_table *, int , 184 void __user *, size_t *, loff_t *); 185 extern int proc_soft_watchdog(struct ctl_table *, int , 186 void __user *, size_t *, loff_t *); 187 extern int proc_watchdog_thresh(struct ctl_table *, int , 188 void __user *, size_t *, loff_t *); 189 extern int proc_watchdog_cpumask(struct ctl_table *, int, 190 void __user *, size_t *, loff_t *); 191 extern int lockup_detector_suspend(void); 192 extern void lockup_detector_resume(void); 193 #else 194 static inline int lockup_detector_suspend(void) 195 { 196 return 0; 197 } 198 199 static inline void lockup_detector_resume(void) 200 { 201 } 202 #endif 203 204 #ifdef CONFIG_HAVE_ACPI_APEI_NMI 205 #include <asm/nmi.h> 206 #endif 207 208 #endif 209