1 /* 2 * linux/include/linux/nmi.h 3 */ 4 #ifndef LINUX_NMI_H 5 #define LINUX_NMI_H 6 7 #include <linux/sched.h> 8 #include <asm/irq.h> 9 10 /** 11 * touch_nmi_watchdog - restart NMI watchdog timeout. 12 * 13 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 14 * may be used to reset the timeout - for code which intentionally 15 * disables interrupts for a long time. This call is stateless. 16 */ 17 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) 18 #include <asm/nmi.h> 19 extern void touch_nmi_watchdog(void); 20 #else 21 static inline void touch_nmi_watchdog(void) 22 { 23 touch_softlockup_watchdog(); 24 } 25 #endif 26 27 #if defined(CONFIG_HARDLOCKUP_DETECTOR) 28 extern void hardlockup_detector_disable(void); 29 #else 30 static inline void hardlockup_detector_disable(void) {} 31 #endif 32 33 /* 34 * Create trigger_all_cpu_backtrace() out of the arch-provided 35 * base function. Return whether such support was available, 36 * to allow calling code to fall back to some other mechanism: 37 */ 38 #ifdef arch_trigger_all_cpu_backtrace 39 static inline bool trigger_all_cpu_backtrace(void) 40 { 41 arch_trigger_all_cpu_backtrace(true); 42 43 return true; 44 } 45 static inline bool trigger_allbutself_cpu_backtrace(void) 46 { 47 arch_trigger_all_cpu_backtrace(false); 48 return true; 49 } 50 51 /* generic implementation */ 52 void nmi_trigger_all_cpu_backtrace(bool include_self, 53 void (*raise)(cpumask_t *mask)); 54 bool nmi_cpu_backtrace(struct pt_regs *regs); 55 56 #else 57 static inline bool trigger_all_cpu_backtrace(void) 58 { 59 return false; 60 } 61 static inline bool trigger_allbutself_cpu_backtrace(void) 62 { 63 return false; 64 } 65 #endif 66 67 #ifdef CONFIG_LOCKUP_DETECTOR 68 int hw_nmi_is_cpu_stuck(struct pt_regs *); 69 u64 hw_nmi_get_sample_period(int watchdog_thresh); 70 extern int nmi_watchdog_enabled; 71 extern int soft_watchdog_enabled; 72 extern int watchdog_user_enabled; 73 extern int watchdog_thresh; 74 extern unsigned long *watchdog_cpumask_bits; 75 extern int sysctl_softlockup_all_cpu_backtrace; 76 extern int sysctl_hardlockup_all_cpu_backtrace; 77 struct ctl_table; 78 extern int proc_watchdog(struct ctl_table *, int , 79 void __user *, size_t *, loff_t *); 80 extern int proc_nmi_watchdog(struct ctl_table *, int , 81 void __user *, size_t *, loff_t *); 82 extern int proc_soft_watchdog(struct ctl_table *, int , 83 void __user *, size_t *, loff_t *); 84 extern int proc_watchdog_thresh(struct ctl_table *, int , 85 void __user *, size_t *, loff_t *); 86 extern int proc_watchdog_cpumask(struct ctl_table *, int, 87 void __user *, size_t *, loff_t *); 88 extern int lockup_detector_suspend(void); 89 extern void lockup_detector_resume(void); 90 #else 91 static inline int lockup_detector_suspend(void) 92 { 93 return 0; 94 } 95 96 static inline void lockup_detector_resume(void) 97 { 98 } 99 #endif 100 101 #ifdef CONFIG_HAVE_ACPI_APEI_NMI 102 #include <asm/nmi.h> 103 #endif 104 105 #endif 106