1 /* 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps. 3 * 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 * 6 * Copyright (C) IBM Corporation, 2004. All rights reserved. 7 * 8 */ 9 10 #include <linux/init.h> 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/smp.h> 14 #include <linux/reboot.h> 15 #include <linux/kexec.h> 16 #include <linux/delay.h> 17 #include <linux/elf.h> 18 #include <linux/elfcore.h> 19 20 #include <asm/processor.h> 21 #include <asm/hardirq.h> 22 #include <asm/nmi.h> 23 #include <asm/hw_irq.h> 24 #include <asm/apic.h> 25 #include <asm/hpet.h> 26 #include <linux/kdebug.h> 27 #include <asm/smp.h> 28 29 #include <mach_ipi.h> 30 31 /* This keeps a track of which one is crashing cpu. */ 32 static int crashing_cpu; 33 34 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 35 static atomic_t waiting_for_crash_ipi; 36 37 static int crash_nmi_callback(struct notifier_block *self, 38 unsigned long val, void *data) 39 { 40 struct pt_regs *regs; 41 #ifdef CONFIG_X86_32 42 struct pt_regs fixed_regs; 43 #endif 44 int cpu; 45 46 if (val != DIE_NMI_IPI) 47 return NOTIFY_OK; 48 49 regs = ((struct die_args *)data)->regs; 50 cpu = raw_smp_processor_id(); 51 52 /* Don't do anything if this handler is invoked on crashing cpu. 53 * Otherwise, system will completely hang. Crashing cpu can get 54 * an NMI if system was initially booted with nmi_watchdog parameter. 55 */ 56 if (cpu == crashing_cpu) 57 return NOTIFY_STOP; 58 local_irq_disable(); 59 60 #ifdef CONFIG_X86_32 61 if (!user_mode_vm(regs)) { 62 crash_fixup_ss_esp(&fixed_regs, regs); 63 regs = &fixed_regs; 64 } 65 #endif 66 crash_save_cpu(regs, cpu); 67 disable_local_APIC(); 68 atomic_dec(&waiting_for_crash_ipi); 69 /* Assume hlt works */ 70 halt(); 71 for (;;) 72 cpu_relax(); 73 74 return 1; 75 } 76 77 static void smp_send_nmi_allbutself(void) 78 { 79 cpumask_t mask = cpu_online_map; 80 cpu_clear(safe_smp_processor_id(), mask); 81 if (!cpus_empty(mask)) 82 send_IPI_mask(mask, NMI_VECTOR); 83 } 84 85 static struct notifier_block crash_nmi_nb = { 86 .notifier_call = crash_nmi_callback, 87 }; 88 89 static void nmi_shootdown_cpus(void) 90 { 91 unsigned long msecs; 92 93 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 94 /* Would it be better to replace the trap vector here? */ 95 if (register_die_notifier(&crash_nmi_nb)) 96 return; /* return what? */ 97 /* Ensure the new callback function is set before sending 98 * out the NMI 99 */ 100 wmb(); 101 102 smp_send_nmi_allbutself(); 103 104 msecs = 1000; /* Wait at most a second for the other cpus to stop */ 105 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 106 mdelay(1); 107 msecs--; 108 } 109 110 /* Leave the nmi callback set */ 111 disable_local_APIC(); 112 } 113 #else 114 static void nmi_shootdown_cpus(void) 115 { 116 /* There are no cpus to shootdown */ 117 } 118 #endif 119 120 void machine_crash_shutdown(struct pt_regs *regs) 121 { 122 /* This function is only called after the system 123 * has panicked or is otherwise in a critical state. 124 * The minimum amount of code to allow a kexec'd kernel 125 * to run successfully needs to happen here. 126 * 127 * In practice this means shooting down the other cpus in 128 * an SMP system. 129 */ 130 /* The kernel is broken so disable interrupts */ 131 local_irq_disable(); 132 133 /* Make a note of crashing cpu. Will be used in NMI callback.*/ 134 crashing_cpu = safe_smp_processor_id(); 135 nmi_shootdown_cpus(); 136 lapic_shutdown(); 137 #if defined(CONFIG_X86_IO_APIC) 138 disable_IO_APIC(); 139 #endif 140 #ifdef CONFIG_HPET_TIMER 141 hpet_disable(); 142 #endif 143 crash_save_cpu(regs, safe_smp_processor_id()); 144 } 145