1 /* 2 * linux/include/linux/nmi.h 3 */ 4 #ifndef LINUX_NMI_H 5 #define LINUX_NMI_H 6 7 #include <linux/sched.h> 8 #include <asm/irq.h> 9 10 /** 11 * touch_nmi_watchdog - restart NMI watchdog timeout. 12 * 13 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 14 * may be used to reset the timeout - for code which intentionally 15 * disables interrupts for a long time. This call is stateless. 16 */ 17 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) 18 #include <asm/nmi.h> 19 extern void touch_nmi_watchdog(void); 20 #else 21 static inline void touch_nmi_watchdog(void) 22 { 23 touch_softlockup_watchdog(); 24 } 25 #endif 26 27 #if defined(CONFIG_HARDLOCKUP_DETECTOR) 28 extern void hardlockup_detector_disable(void); 29 #else 30 static inline void hardlockup_detector_disable(void) {} 31 #endif 32 33 /* 34 * Create trigger_all_cpu_backtrace() out of the arch-provided 35 * base function. Return whether such support was available, 36 * to allow calling code to fall back to some other mechanism: 37 */ 38 #ifdef arch_trigger_cpumask_backtrace 39 static inline bool trigger_all_cpu_backtrace(void) 40 { 41 arch_trigger_cpumask_backtrace(cpu_online_mask, false); 42 return true; 43 } 44 45 static inline bool trigger_allbutself_cpu_backtrace(void) 46 { 47 arch_trigger_cpumask_backtrace(cpu_online_mask, true); 48 return true; 49 } 50 51 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) 52 { 53 arch_trigger_cpumask_backtrace(mask, false); 54 return true; 55 } 56 57 static inline bool trigger_single_cpu_backtrace(int cpu) 58 { 59 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); 60 return true; 61 } 62 63 /* generic implementation */ 64 void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, 65 bool exclude_self, 66 void (*raise)(cpumask_t *mask)); 67 bool nmi_cpu_backtrace(struct pt_regs *regs); 68 69 #else 70 static inline bool trigger_all_cpu_backtrace(void) 71 { 72 return false; 73 } 74 static inline bool trigger_allbutself_cpu_backtrace(void) 75 { 76 return false; 77 } 78 static inline bool trigger_cpumask_backtrace(struct cpumask *mask) 79 { 80 return false; 81 } 82 static inline bool trigger_single_cpu_backtrace(int cpu) 83 { 84 return false; 85 } 86 #endif 87 88 #ifdef CONFIG_LOCKUP_DETECTOR 89 u64 hw_nmi_get_sample_period(int watchdog_thresh); 90 extern int nmi_watchdog_enabled; 91 extern int soft_watchdog_enabled; 92 extern int watchdog_user_enabled; 93 extern int watchdog_thresh; 94 extern unsigned long *watchdog_cpumask_bits; 95 extern int sysctl_softlockup_all_cpu_backtrace; 96 extern int sysctl_hardlockup_all_cpu_backtrace; 97 struct ctl_table; 98 extern int proc_watchdog(struct ctl_table *, int , 99 void __user *, size_t *, loff_t *); 100 extern int proc_nmi_watchdog(struct ctl_table *, int , 101 void __user *, size_t *, loff_t *); 102 extern int proc_soft_watchdog(struct ctl_table *, int , 103 void __user *, size_t *, loff_t *); 104 extern int proc_watchdog_thresh(struct ctl_table *, int , 105 void __user *, size_t *, loff_t *); 106 extern int proc_watchdog_cpumask(struct ctl_table *, int, 107 void __user *, size_t *, loff_t *); 108 extern int lockup_detector_suspend(void); 109 extern void lockup_detector_resume(void); 110 #else 111 static inline int lockup_detector_suspend(void) 112 { 113 return 0; 114 } 115 116 static inline void lockup_detector_resume(void) 117 { 118 } 119 #endif 120 121 #ifdef CONFIG_HAVE_ACPI_APEI_NMI 122 #include <asm/nmi.h> 123 #endif 124 125 #endif 126