1 /* 2 * HW NMI watchdog support 3 * 4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 5 * 6 * Arch specific calls to support NMI watchdog 7 * 8 * Bits copied from original nmi.c file 9 * 10 */ 11 #include <asm/apic.h> 12 #include <asm/nmi.h> 13 14 #include <linux/cpumask.h> 15 #include <linux/kdebug.h> 16 #include <linux/notifier.h> 17 #include <linux/kprobes.h> 18 #include <linux/nmi.h> 19 #include <linux/module.h> 20 #include <linux/delay.h> 21 #include <linux/seq_buf.h> 22 23 #ifdef CONFIG_HARDLOCKUP_DETECTOR 24 u64 hw_nmi_get_sample_period(int watchdog_thresh) 25 { 26 return (u64)(cpu_khz) * 1000 * watchdog_thresh; 27 } 28 #endif 29 30 #ifdef arch_trigger_all_cpu_backtrace 31 /* For reliability, we're prepared to waste bits here. */ 32 static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; 33 static cpumask_t printtrace_mask; 34 35 #define NMI_BUF_SIZE 4096 36 37 struct nmi_seq_buf { 38 unsigned char buffer[NMI_BUF_SIZE]; 39 struct seq_buf seq; 40 }; 41 42 /* Safe printing in NMI context */ 43 static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); 44 45 /* "in progress" flag of arch_trigger_all_cpu_backtrace */ 46 static unsigned long backtrace_flag; 47 48 static void print_seq_line(struct nmi_seq_buf *s, int start, int end) 49 { 50 const char *buf = s->buffer + start; 51 52 printk("%.*s", (end - start) + 1, buf); 53 } 54 55 void arch_trigger_all_cpu_backtrace(bool include_self) 56 { 57 struct nmi_seq_buf *s; 58 int len; 59 int cpu; 60 int i; 61 int this_cpu = get_cpu(); 62 63 if (test_and_set_bit(0, &backtrace_flag)) { 64 /* 65 * If there is already a trigger_all_cpu_backtrace() in progress 66 * (backtrace_flag == 1), don't output double cpu dump infos. 67 */ 68 put_cpu(); 69 return; 70 } 71 72 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 73 if (!include_self) 74 cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); 75 76 cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask)); 77 /* 78 * Set up per_cpu seq_buf buffers that the NMIs running on the other 79 * CPUs will write to. 80 */ 81 for_each_cpu(cpu, to_cpumask(backtrace_mask)) { 82 s = &per_cpu(nmi_print_seq, cpu); 83 seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE); 84 } 85 86 if (!cpumask_empty(to_cpumask(backtrace_mask))) { 87 pr_info("sending NMI to %s CPUs:\n", 88 (include_self ? "all" : "other")); 89 apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR); 90 } 91 92 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 93 for (i = 0; i < 10 * 1000; i++) { 94 if (cpumask_empty(to_cpumask(backtrace_mask))) 95 break; 96 mdelay(1); 97 touch_softlockup_watchdog(); 98 } 99 100 /* 101 * Now that all the NMIs have triggered, we can dump out their 102 * back traces safely to the console. 103 */ 104 for_each_cpu(cpu, &printtrace_mask) { 105 int last_i = 0; 106 107 s = &per_cpu(nmi_print_seq, cpu); 108 len = seq_buf_used(&s->seq); 109 if (!len) 110 continue; 111 112 /* Print line by line. */ 113 for (i = 0; i < len; i++) { 114 if (s->buffer[i] == '\n') { 115 print_seq_line(s, last_i, i); 116 last_i = i + 1; 117 } 118 } 119 /* Check if there was a partial line. */ 120 if (last_i < len) { 121 print_seq_line(s, last_i, len - 1); 122 pr_cont("\n"); 123 } 124 } 125 126 clear_bit(0, &backtrace_flag); 127 smp_mb__after_atomic(); 128 put_cpu(); 129 } 130 131 /* 132 * It is not safe to call printk() directly from NMI handlers. 133 * It may be fine if the NMI detected a lock up and we have no choice 134 * but to do so, but doing a NMI on all other CPUs to get a back trace 135 * can be done with a sysrq-l. We don't want that to lock up, which 136 * can happen if the NMI interrupts a printk in progress. 137 * 138 * Instead, we redirect the vprintk() to this nmi_vprintk() that writes 139 * the content into a per cpu seq_buf buffer. Then when the NMIs are 140 * all done, we can safely dump the contents of the seq_buf to a printk() 141 * from a non NMI context. 142 */ 143 static int nmi_vprintk(const char *fmt, va_list args) 144 { 145 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); 146 unsigned int len = seq_buf_used(&s->seq); 147 148 seq_buf_vprintf(&s->seq, fmt, args); 149 return seq_buf_used(&s->seq) - len; 150 } 151 152 static int 153 arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) 154 { 155 int cpu; 156 157 cpu = smp_processor_id(); 158 159 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 160 printk_func_t printk_func_save = this_cpu_read(printk_func); 161 162 /* Replace printk to write into the NMI seq */ 163 this_cpu_write(printk_func, nmi_vprintk); 164 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 165 show_regs(regs); 166 this_cpu_write(printk_func, printk_func_save); 167 168 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 169 return NMI_HANDLED; 170 } 171 172 return NMI_DONE; 173 } 174 NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler); 175 176 static int __init register_trigger_all_cpu_backtrace(void) 177 { 178 register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler, 179 0, "arch_bt"); 180 return 0; 181 } 182 early_initcall(register_trigger_all_cpu_backtrace); 183 #endif 184