1 /* 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps. 3 * 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 * 6 * Copyright (C) IBM Corporation, 2004. All rights reserved. 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/smp.h> 14 #include <linux/reboot.h> 15 #include <linux/kexec.h> 16 #include <linux/delay.h> 17 #include <linux/elf.h> 18 #include <linux/elfcore.h> 19 20 #include <asm/processor.h> 21 #include <asm/hardirq.h> 22 #include <asm/nmi.h> 23 #include <asm/hw_irq.h> 24 #include <asm/apic.h> 25 #include <asm/hpet.h> 26 #include <linux/kdebug.h> 27 #include <asm/smp.h> 28 29 #ifdef CONFIG_X86_32 30 #include <mach_ipi.h> 31 #else 32 #include <asm/mach_apic.h> 33 #endif 34 35 /* This keeps a track of which one is crashing cpu. */ 36 static int crashing_cpu; 37 38 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 39 static atomic_t waiting_for_crash_ipi; 40 41 static int crash_nmi_callback(struct notifier_block *self, 42 unsigned long val, void *data) 43 { 44 struct pt_regs *regs; 45 #ifdef CONFIG_X86_32 46 struct pt_regs fixed_regs; 47 #endif 48 int cpu; 49 50 if (val != DIE_NMI_IPI) 51 return NOTIFY_OK; 52 53 regs = ((struct die_args *)data)->regs; 54 cpu = raw_smp_processor_id(); 55 56 /* Don't do anything if this handler is invoked on crashing cpu. 57 * Otherwise, system will completely hang. Crashing cpu can get 58 * an NMI if system was initially booted with nmi_watchdog parameter. 59 */ 60 if (cpu == crashing_cpu) 61 return NOTIFY_STOP; 62 local_irq_disable(); 63 64 #ifdef CONFIG_X86_32 65 if (!user_mode_vm(regs)) { 66 crash_fixup_ss_esp(&fixed_regs, regs); 67 regs = &fixed_regs; 68 } 69 #endif 70 crash_save_cpu(regs, cpu); 71 disable_local_APIC(); 72 atomic_dec(&waiting_for_crash_ipi); 73 /* Assume hlt works */ 74 halt(); 75 for (;;) 76 cpu_relax(); 77 78 return 1; 79 } 80 81 static void smp_send_nmi_allbutself(void) 82 { 83 cpumask_t mask = cpu_online_map; 84 cpu_clear(safe_smp_processor_id(), mask); 85 if (!cpus_empty(mask)) 86 send_IPI_mask(mask, NMI_VECTOR); 87 } 88 89 static struct notifier_block crash_nmi_nb = { 90 .notifier_call = crash_nmi_callback, 91 }; 92 93 static void nmi_shootdown_cpus(void) 94 { 95 unsigned long msecs; 96 97 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 98 /* Would it be better to replace the trap vector here? */ 99 if (register_die_notifier(&crash_nmi_nb)) 100 return; /* return what? */ 101 /* Ensure the new callback function is set before sending 102 * out the NMI 103 */ 104 wmb(); 105 106 smp_send_nmi_allbutself(); 107 108 msecs = 1000; /* Wait at most a second for the other cpus to stop */ 109 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 110 mdelay(1); 111 msecs--; 112 } 113 114 /* Leave the nmi callback set */ 115 disable_local_APIC(); 116 } 117 #else 118 static void nmi_shootdown_cpus(void) 119 { 120 /* There are no cpus to shootdown */ 121 } 122 #endif 123 124 void machine_crash_shutdown(struct pt_regs *regs) 125 { 126 /* This function is only called after the system 127 * has panicked or is otherwise in a critical state. 128 * The minimum amount of code to allow a kexec'd kernel 129 * to run successfully needs to happen here. 130 * 131 * In practice this means shooting down the other cpus in 132 * an SMP system. 133 */ 134 /* The kernel is broken so disable interrupts */ 135 local_irq_disable(); 136 137 /* Make a note of crashing cpu. Will be used in NMI callback.*/ 138 crashing_cpu = safe_smp_processor_id(); 139 nmi_shootdown_cpus(); 140 lapic_shutdown(); 141 #if defined(CONFIG_X86_IO_APIC) 142 disable_IO_APIC(); 143 #endif 144 #ifdef CONFIG_HPET_TIMER 145 hpet_disable(); 146 #endif 147 crash_save_cpu(regs, safe_smp_processor_id()); 148 } 149