xref: /openbmc/linux/arch/x86/kernel/crash.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
3  *
4  * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5  *
6  * Copyright (C) IBM Corporation, 2004. All rights reserved.
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/reboot.h>
15 #include <linux/kexec.h>
16 #include <linux/delay.h>
17 #include <linux/elf.h>
18 #include <linux/elfcore.h>
19 
20 #include <asm/processor.h>
21 #include <asm/hardirq.h>
22 #include <asm/nmi.h>
23 #include <asm/hw_irq.h>
24 #include <asm/apic.h>
25 #include <asm/hpet.h>
26 #include <linux/kdebug.h>
27 #include <asm/smp.h>
28 #include <asm/reboot.h>
29 
30 #include <mach_ipi.h>
31 
32 /* This keeps a track of which one is crashing cpu. */
33 static int crashing_cpu;
34 
35 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
36 static atomic_t waiting_for_crash_ipi;
37 
38 static int crash_nmi_callback(struct notifier_block *self,
39 			unsigned long val, void *data)
40 {
41 	struct pt_regs *regs;
42 #ifdef CONFIG_X86_32
43 	struct pt_regs fixed_regs;
44 #endif
45 	int cpu;
46 
47 	if (val != DIE_NMI_IPI)
48 		return NOTIFY_OK;
49 
50 	regs = ((struct die_args *)data)->regs;
51 	cpu = raw_smp_processor_id();
52 
53 	/* Don't do anything if this handler is invoked on crashing cpu.
54 	 * Otherwise, system will completely hang. Crashing cpu can get
55 	 * an NMI if system was initially booted with nmi_watchdog parameter.
56 	 */
57 	if (cpu == crashing_cpu)
58 		return NOTIFY_STOP;
59 	local_irq_disable();
60 
61 #ifdef CONFIG_X86_32
62 	if (!user_mode_vm(regs)) {
63 		crash_fixup_ss_esp(&fixed_regs, regs);
64 		regs = &fixed_regs;
65 	}
66 #endif
67 	crash_save_cpu(regs, cpu);
68 	disable_local_APIC();
69 	atomic_dec(&waiting_for_crash_ipi);
70 	/* Assume hlt works */
71 	halt();
72 	for (;;)
73 		cpu_relax();
74 
75 	return 1;
76 }
77 
78 static void smp_send_nmi_allbutself(void)
79 {
80 	cpumask_t mask = cpu_online_map;
81 	cpu_clear(safe_smp_processor_id(), mask);
82 	if (!cpus_empty(mask))
83 		send_IPI_mask(mask, NMI_VECTOR);
84 }
85 
86 static struct notifier_block crash_nmi_nb = {
87 	.notifier_call = crash_nmi_callback,
88 };
89 
90 static void nmi_shootdown_cpus(void)
91 {
92 	unsigned long msecs;
93 
94 	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
95 	/* Would it be better to replace the trap vector here? */
96 	if (register_die_notifier(&crash_nmi_nb))
97 		return;		/* return what? */
98 	/* Ensure the new callback function is set before sending
99 	 * out the NMI
100 	 */
101 	wmb();
102 
103 	smp_send_nmi_allbutself();
104 
105 	msecs = 1000; /* Wait at most a second for the other cpus to stop */
106 	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
107 		mdelay(1);
108 		msecs--;
109 	}
110 
111 	/* Leave the nmi callback set */
112 	disable_local_APIC();
113 }
114 #else
115 static void nmi_shootdown_cpus(void)
116 {
117 	/* There are no cpus to shootdown */
118 }
119 #endif
120 
121 void native_machine_crash_shutdown(struct pt_regs *regs)
122 {
123 	/* This function is only called after the system
124 	 * has panicked or is otherwise in a critical state.
125 	 * The minimum amount of code to allow a kexec'd kernel
126 	 * to run successfully needs to happen here.
127 	 *
128 	 * In practice this means shooting down the other cpus in
129 	 * an SMP system.
130 	 */
131 	/* The kernel is broken so disable interrupts */
132 	local_irq_disable();
133 
134 	/* Make a note of crashing cpu. Will be used in NMI callback.*/
135 	crashing_cpu = safe_smp_processor_id();
136 	nmi_shootdown_cpus();
137 	lapic_shutdown();
138 #if defined(CONFIG_X86_IO_APIC)
139 	disable_IO_APIC();
140 #endif
141 #ifdef CONFIG_HPET_TIMER
142 	hpet_disable();
143 #endif
144 	crash_save_cpu(regs, safe_smp_processor_id());
145 }
146