1 /* 2 * NMI backtrace support 3 * 4 * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, 5 * with the following header: 6 * 7 * HW NMI watchdog support 8 * 9 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 10 * 11 * Arch specific calls to support NMI watchdog 12 * 13 * Bits copied from original nmi.c file 14 */ 15 #include <linux/cpumask.h> 16 #include <linux/delay.h> 17 #include <linux/kprobes.h> 18 #include <linux/nmi.h> 19 20 #ifdef arch_trigger_cpumask_backtrace 21 /* For reliability, we're prepared to waste bits here. */ 22 static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; 23 24 /* "in progress" flag of arch_trigger_cpumask_backtrace */ 25 static unsigned long backtrace_flag; 26 27 /* 28 * When raise() is called it will be passed a pointer to the 29 * backtrace_mask. Architectures that call nmi_cpu_backtrace() 30 * directly from their raise() functions may rely on the mask 31 * they are passed being updated as a side effect of this call. 32 */ 33 void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, 34 bool exclude_self, 35 void (*raise)(cpumask_t *mask)) 36 { 37 int i, this_cpu = get_cpu(); 38 39 if (test_and_set_bit(0, &backtrace_flag)) { 40 /* 41 * If there is already a trigger_all_cpu_backtrace() in progress 42 * (backtrace_flag == 1), don't output double cpu dump infos. 43 */ 44 put_cpu(); 45 return; 46 } 47 48 cpumask_copy(to_cpumask(backtrace_mask), mask); 49 if (exclude_self) 50 cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); 51 52 /* 53 * Don't try to send an NMI to this cpu; it may work on some 54 * architectures, but on others it may not, and we'll get 55 * information at least as useful just by doing a dump_stack() here. 56 * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. 57 */ 58 if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) 59 nmi_cpu_backtrace(NULL); 60 61 if (!cpumask_empty(to_cpumask(backtrace_mask))) { 62 pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", 63 this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); 64 raise(to_cpumask(backtrace_mask)); 65 } 66 67 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 68 for (i = 0; i < 10 * 1000; i++) { 69 if (cpumask_empty(to_cpumask(backtrace_mask))) 70 break; 71 mdelay(1); 72 touch_softlockup_watchdog(); 73 } 74 75 /* 76 * Force flush any remote buffers that might be stuck in IRQ context 77 * and therefore could not run their irq_work. 78 */ 79 printk_nmi_flush(); 80 81 clear_bit_unlock(0, &backtrace_flag); 82 put_cpu(); 83 } 84 85 bool nmi_cpu_backtrace(struct pt_regs *regs) 86 { 87 int cpu = smp_processor_id(); 88 89 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 90 pr_warn("NMI backtrace for cpu %d\n", cpu); 91 if (regs) 92 show_regs(regs); 93 else 94 dump_stack(); 95 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 96 return true; 97 } 98 99 return false; 100 } 101 NOKPROBE_SYMBOL(nmi_cpu_backtrace); 102 #endif 103