xref: /openbmc/linux/arch/s390/kernel/machine_kexec.c (revision 8730046c)
1 /*
2  * Copyright IBM Corp. 2005, 2011
3  *
4  * Author(s): Rolf Adelsberger,
5  *	      Heiko Carstens <heiko.carstens@de.ibm.com>
6  *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/mm.h>
11 #include <linux/kexec.h>
12 #include <linux/delay.h>
13 #include <linux/reboot.h>
14 #include <linux/ftrace.h>
15 #include <linux/debug_locks.h>
16 #include <linux/suspend.h>
17 #include <asm/cio.h>
18 #include <asm/setup.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/smp.h>
22 #include <asm/reset.h>
23 #include <asm/ipl.h>
24 #include <asm/diag.h>
25 #include <asm/elf.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/cacheflush.h>
28 #include <asm/os_info.h>
29 #include <asm/switch_to.h>
30 
31 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
32 
33 extern const unsigned char relocate_kernel[];
34 extern const unsigned long long relocate_kernel_len;
35 
36 #ifdef CONFIG_CRASH_DUMP
37 
38 /*
39  * PM notifier callback for kdump
40  */
41 static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
42 			       void *ptr)
43 {
44 	switch (action) {
45 	case PM_SUSPEND_PREPARE:
46 	case PM_HIBERNATION_PREPARE:
47 		if (kexec_crash_image)
48 			arch_kexec_unprotect_crashkres();
49 		break;
50 	case PM_POST_SUSPEND:
51 	case PM_POST_HIBERNATION:
52 		if (kexec_crash_image)
53 			arch_kexec_protect_crashkres();
54 		break;
55 	default:
56 		return NOTIFY_DONE;
57 	}
58 	return NOTIFY_OK;
59 }
60 
61 static int __init machine_kdump_pm_init(void)
62 {
63 	pm_notifier(machine_kdump_pm_cb, 0);
64 	return 0;
65 }
66 arch_initcall(machine_kdump_pm_init);
67 
68 /*
69  * Reset the system, copy boot CPU registers to absolute zero,
70  * and jump to the kdump image
71  */
72 static void __do_machine_kdump(void *image)
73 {
74 	int (*start_kdump)(int);
75 	unsigned long prefix;
76 
77 	/* store_status() saved the prefix register to lowcore */
78 	prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
79 
80 	/* Now do the reset  */
81 	s390_reset_system();
82 
83 	/*
84 	 * Copy dump CPU store status info to absolute zero.
85 	 * This need to be done *after* s390_reset_system set the
86 	 * prefix register of this CPU to zero
87 	 */
88 	memcpy((void *) __LC_FPREGS_SAVE_AREA,
89 	       (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
90 
91 	__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
92 	start_kdump = (void *)((struct kimage *) image)->start;
93 	start_kdump(1);
94 
95 	/* Die if start_kdump returns */
96 	disabled_wait((unsigned long) __builtin_return_address(0));
97 }
98 
99 /*
100  * Start kdump: create a LGR log entry, store status of all CPUs and
101  * branch to __do_machine_kdump.
102  */
103 static noinline void __machine_kdump(void *image)
104 {
105 	int this_cpu, cpu;
106 
107 	lgr_info_log();
108 	/* Get status of the other CPUs */
109 	this_cpu = smp_find_processor_id(stap());
110 	for_each_online_cpu(cpu) {
111 		if (cpu == this_cpu)
112 			continue;
113 		if (smp_store_status(cpu))
114 			continue;
115 	}
116 	/* Store status of the boot CPU */
117 	if (MACHINE_HAS_VX)
118 		save_vx_regs((void *) &S390_lowcore.vector_save_area);
119 	/*
120 	 * To create a good backchain for this CPU in the dump store_status
121 	 * is passed the address of a function. The address is saved into
122 	 * the PSW save area of the boot CPU and the function is invoked as
123 	 * a tail call of store_status. The backchain in the dump will look
124 	 * like this:
125 	 *   restart_int_handler ->  __machine_kexec -> __do_machine_kdump
126 	 * The call to store_status() will not return.
127 	 */
128 	store_status(__do_machine_kdump, image);
129 }
130 #endif
131 
132 /*
133  * Check if kdump checksums are valid: We call purgatory with parameter "0"
134  */
135 static int kdump_csum_valid(struct kimage *image)
136 {
137 #ifdef CONFIG_CRASH_DUMP
138 	int (*start_kdump)(int) = (void *)image->start;
139 	int rc;
140 
141 	__arch_local_irq_stnsm(0xfb); /* disable DAT */
142 	rc = start_kdump(0);
143 	__arch_local_irq_stosm(0x04); /* enable DAT */
144 	return rc ? 0 : -EINVAL;
145 #else
146 	return -EINVAL;
147 #endif
148 }
149 
150 #ifdef CONFIG_CRASH_DUMP
151 
152 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
153 {
154 	unsigned long addr, size;
155 
156 	for (addr = begin; addr < end; addr += PAGE_SIZE)
157 		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
158 	size = begin - crashk_res.start;
159 	if (size)
160 		os_info_crashkernel_add(crashk_res.start, size);
161 	else
162 		os_info_crashkernel_add(0, 0);
163 }
164 
165 static void crash_protect_pages(int protect)
166 {
167 	unsigned long size;
168 
169 	if (!crashk_res.end)
170 		return;
171 	size = resource_size(&crashk_res);
172 	if (protect)
173 		set_memory_ro(crashk_res.start, size >> PAGE_SHIFT);
174 	else
175 		set_memory_rw(crashk_res.start, size >> PAGE_SHIFT);
176 }
177 
178 void arch_kexec_protect_crashkres(void)
179 {
180 	crash_protect_pages(1);
181 }
182 
183 void arch_kexec_unprotect_crashkres(void)
184 {
185 	crash_protect_pages(0);
186 }
187 
188 #endif
189 
190 /*
191  * Give back memory to hypervisor before new kdump is loaded
192  */
193 static int machine_kexec_prepare_kdump(void)
194 {
195 #ifdef CONFIG_CRASH_DUMP
196 	if (MACHINE_IS_VM)
197 		diag10_range(PFN_DOWN(crashk_res.start),
198 			     PFN_DOWN(crashk_res.end - crashk_res.start + 1));
199 	return 0;
200 #else
201 	return -EINVAL;
202 #endif
203 }
204 
205 int machine_kexec_prepare(struct kimage *image)
206 {
207 	void *reboot_code_buffer;
208 
209 	/* Can't replace kernel image since it is read-only. */
210 	if (ipl_flags & IPL_NSS_VALID)
211 		return -EOPNOTSUPP;
212 
213 	if (image->type == KEXEC_TYPE_CRASH)
214 		return machine_kexec_prepare_kdump();
215 
216 	/* We don't support anything but the default image type for now. */
217 	if (image->type != KEXEC_TYPE_DEFAULT)
218 		return -EINVAL;
219 
220 	/* Get the destination where the assembler code should be copied to.*/
221 	reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
222 
223 	/* Then copy it */
224 	memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
225 	return 0;
226 }
227 
228 void machine_kexec_cleanup(struct kimage *image)
229 {
230 }
231 
232 void arch_crash_save_vmcoreinfo(void)
233 {
234 	VMCOREINFO_SYMBOL(lowcore_ptr);
235 	VMCOREINFO_SYMBOL(high_memory);
236 	VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
237 }
238 
239 void machine_shutdown(void)
240 {
241 }
242 
243 void machine_crash_shutdown(struct pt_regs *regs)
244 {
245 }
246 
247 /*
248  * Do normal kexec
249  */
250 static void __do_machine_kexec(void *data)
251 {
252 	relocate_kernel_t data_mover;
253 	struct kimage *image = data;
254 
255 	s390_reset_system();
256 	data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
257 
258 	/* Call the moving routine */
259 	(*data_mover)(&image->head, image->start);
260 
261 	/* Die if kexec returns */
262 	disabled_wait((unsigned long) __builtin_return_address(0));
263 }
264 
265 /*
266  * Reset system and call either kdump or normal kexec
267  */
268 static void __machine_kexec(void *data)
269 {
270 	__arch_local_irq_stosm(0x04); /* enable DAT */
271 	pfault_fini();
272 	tracing_off();
273 	debug_locks_off();
274 #ifdef CONFIG_CRASH_DUMP
275 	if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
276 		__machine_kdump(data);
277 #endif
278 	__do_machine_kexec(data);
279 }
280 
281 /*
282  * Do either kdump or normal kexec. In case of kdump we first ask
283  * purgatory, if kdump checksums are valid.
284  */
285 void machine_kexec(struct kimage *image)
286 {
287 	if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
288 		return;
289 	tracer_disable();
290 	smp_send_stop();
291 	smp_call_ipl_cpu(__machine_kexec, image);
292 }
293