xref: /openbmc/linux/arch/x86/kernel/crash.c (revision 6f599d84)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
262a31a03SHiroshi Shimamoto /*
362a31a03SHiroshi Shimamoto  * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
462a31a03SHiroshi Shimamoto  *
562a31a03SHiroshi Shimamoto  * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
662a31a03SHiroshi Shimamoto  *
762a31a03SHiroshi Shimamoto  * Copyright (C) IBM Corporation, 2004. All rights reserved.
8dd5f7260SVivek Goyal  * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9dd5f7260SVivek Goyal  * Authors:
10dd5f7260SVivek Goyal  *      Vivek Goyal <vgoyal@redhat.com>
1162a31a03SHiroshi Shimamoto  *
1262a31a03SHiroshi Shimamoto  */
1362a31a03SHiroshi Shimamoto 
14dd5f7260SVivek Goyal #define pr_fmt(fmt)	"kexec: " fmt
15dd5f7260SVivek Goyal 
1662a31a03SHiroshi Shimamoto #include <linux/types.h>
1762a31a03SHiroshi Shimamoto #include <linux/kernel.h>
1862a31a03SHiroshi Shimamoto #include <linux/smp.h>
1962a31a03SHiroshi Shimamoto #include <linux/reboot.h>
2062a31a03SHiroshi Shimamoto #include <linux/kexec.h>
2162a31a03SHiroshi Shimamoto #include <linux/delay.h>
2262a31a03SHiroshi Shimamoto #include <linux/elf.h>
2362a31a03SHiroshi Shimamoto #include <linux/elfcore.h>
24186f4360SPaul Gortmaker #include <linux/export.h>
25dd5f7260SVivek Goyal #include <linux/slab.h>
26d6472302SStephen Rothwell #include <linux/vmalloc.h>
276f599d84SLianbo Jiang #include <linux/memblock.h>
2862a31a03SHiroshi Shimamoto 
2962a31a03SHiroshi Shimamoto #include <asm/processor.h>
3062a31a03SHiroshi Shimamoto #include <asm/hardirq.h>
3162a31a03SHiroshi Shimamoto #include <asm/nmi.h>
3262a31a03SHiroshi Shimamoto #include <asm/hw_irq.h>
3362a31a03SHiroshi Shimamoto #include <asm/apic.h>
345520b7e7SIngo Molnar #include <asm/e820/types.h>
358643e28dSJiang Liu #include <asm/io_apic.h>
360c1b2724SOGAWA Hirofumi #include <asm/hpet.h>
3762a31a03SHiroshi Shimamoto #include <linux/kdebug.h>
3896b89dc6SJaswinder Singh Rajput #include <asm/cpu.h>
39ed23dc6fSGlauber Costa #include <asm/reboot.h>
402340b62fSEduardo Habkost #include <asm/virtext.h>
41da06a43dSTakao Indoh #include <asm/intel_pt.h>
4289f579ceSYi Wang #include <asm/crash.h>
436f599d84SLianbo Jiang #include <asm/cmdline.h>
448e294786SEduardo Habkost 
45dd5f7260SVivek Goyal /* Used while preparing memory map entries for second kernel */
46dd5f7260SVivek Goyal struct crash_memmap_data {
47dd5f7260SVivek Goyal 	struct boot_params *params;
48dd5f7260SVivek Goyal 	/* Type of memory */
49dd5f7260SVivek Goyal 	unsigned int type;
50dd5f7260SVivek Goyal };
51dd5f7260SVivek Goyal 
52f23d1f4aSZhang Yanfei /*
53f23d1f4aSZhang Yanfei  * This is used to VMCLEAR all VMCSs loaded on the
54f23d1f4aSZhang Yanfei  * processor. And when loading kvm_intel module, the
55f23d1f4aSZhang Yanfei  * callback function pointer will be assigned.
56f23d1f4aSZhang Yanfei  *
57f23d1f4aSZhang Yanfei  * protected by rcu.
58f23d1f4aSZhang Yanfei  */
590ca0d818SZhang Yanfei crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
60f23d1f4aSZhang Yanfei EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
61f23d1f4aSZhang Yanfei 
62f23d1f4aSZhang Yanfei static inline void cpu_crash_vmclear_loaded_vmcss(void)
63f23d1f4aSZhang Yanfei {
640ca0d818SZhang Yanfei 	crash_vmclear_fn *do_vmclear_operation = NULL;
65f23d1f4aSZhang Yanfei 
66f23d1f4aSZhang Yanfei 	rcu_read_lock();
67f23d1f4aSZhang Yanfei 	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
68f23d1f4aSZhang Yanfei 	if (do_vmclear_operation)
69f23d1f4aSZhang Yanfei 		do_vmclear_operation();
70f23d1f4aSZhang Yanfei 	rcu_read_unlock();
71f23d1f4aSZhang Yanfei }
72f23d1f4aSZhang Yanfei 
736f599d84SLianbo Jiang /*
746f599d84SLianbo Jiang  * When the crashkernel option is specified, only use the low
756f599d84SLianbo Jiang  * 1M for the real mode trampoline.
766f599d84SLianbo Jiang  */
776f599d84SLianbo Jiang void __init crash_reserve_low_1M(void)
786f599d84SLianbo Jiang {
796f599d84SLianbo Jiang 	if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
806f599d84SLianbo Jiang 		return;
816f599d84SLianbo Jiang 
826f599d84SLianbo Jiang 	memblock_reserve(0, 1<<20);
836f599d84SLianbo Jiang 	pr_info("Reserving the low 1M of memory for crashkernel\n");
846f599d84SLianbo Jiang }
856f599d84SLianbo Jiang 
86b2bbe71bSEduardo Habkost #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
87b2bbe71bSEduardo Habkost 
889c48f1c6SDon Zickus static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
8962a31a03SHiroshi Shimamoto {
90a7d41820SEduardo Habkost 	crash_save_cpu(regs, cpu);
91a7d41820SEduardo Habkost 
92f23d1f4aSZhang Yanfei 	/*
93f23d1f4aSZhang Yanfei 	 * VMCLEAR VMCSs loaded on all cpus if needed.
94f23d1f4aSZhang Yanfei 	 */
95f23d1f4aSZhang Yanfei 	cpu_crash_vmclear_loaded_vmcss();
96f23d1f4aSZhang Yanfei 
972340b62fSEduardo Habkost 	/* Disable VMX or SVM if needed.
982340b62fSEduardo Habkost 	 *
992340b62fSEduardo Habkost 	 * We need to disable virtualization on all CPUs.
1002340b62fSEduardo Habkost 	 * Having VMX or SVM enabled on any CPU may break rebooting
1012340b62fSEduardo Habkost 	 * after the kdump kernel has finished its task.
1022340b62fSEduardo Habkost 	 */
1032340b62fSEduardo Habkost 	cpu_emergency_vmxoff();
1042340b62fSEduardo Habkost 	cpu_emergency_svm_disable();
1052340b62fSEduardo Habkost 
106da06a43dSTakao Indoh 	/*
107da06a43dSTakao Indoh 	 * Disable Intel PT to stop its logging
108da06a43dSTakao Indoh 	 */
109da06a43dSTakao Indoh 	cpu_emergency_stop_pt();
110da06a43dSTakao Indoh 
111a7d41820SEduardo Habkost 	disable_local_APIC();
112a7d41820SEduardo Habkost }
113a7d41820SEduardo Habkost 
1140ee59413SHidehiro Kawai void kdump_nmi_shootdown_cpus(void)
115d1e7b91cSEduardo Habkost {
1168e294786SEduardo Habkost 	nmi_shootdown_cpus(kdump_nmi_callback);
117d1e7b91cSEduardo Habkost 
11862a31a03SHiroshi Shimamoto 	disable_local_APIC();
11962a31a03SHiroshi Shimamoto }
120d1e7b91cSEduardo Habkost 
1210ee59413SHidehiro Kawai /* Override the weak function in kernel/panic.c */
1220ee59413SHidehiro Kawai void crash_smp_send_stop(void)
1230ee59413SHidehiro Kawai {
1240ee59413SHidehiro Kawai 	static int cpus_stopped;
1250ee59413SHidehiro Kawai 
1260ee59413SHidehiro Kawai 	if (cpus_stopped)
1270ee59413SHidehiro Kawai 		return;
1280ee59413SHidehiro Kawai 
1290ee59413SHidehiro Kawai 	if (smp_ops.crash_stop_other_cpus)
1300ee59413SHidehiro Kawai 		smp_ops.crash_stop_other_cpus();
1310ee59413SHidehiro Kawai 	else
1320ee59413SHidehiro Kawai 		smp_send_stop();
1330ee59413SHidehiro Kawai 
1340ee59413SHidehiro Kawai 	cpus_stopped = 1;
1350ee59413SHidehiro Kawai }
1360ee59413SHidehiro Kawai 
13762a31a03SHiroshi Shimamoto #else
1380ee59413SHidehiro Kawai void crash_smp_send_stop(void)
13962a31a03SHiroshi Shimamoto {
14062a31a03SHiroshi Shimamoto 	/* There are no cpus to shootdown */
14162a31a03SHiroshi Shimamoto }
14262a31a03SHiroshi Shimamoto #endif
14362a31a03SHiroshi Shimamoto 
144ed23dc6fSGlauber Costa void native_machine_crash_shutdown(struct pt_regs *regs)
14562a31a03SHiroshi Shimamoto {
14662a31a03SHiroshi Shimamoto 	/* This function is only called after the system
14762a31a03SHiroshi Shimamoto 	 * has panicked or is otherwise in a critical state.
14862a31a03SHiroshi Shimamoto 	 * The minimum amount of code to allow a kexec'd kernel
14962a31a03SHiroshi Shimamoto 	 * to run successfully needs to happen here.
15062a31a03SHiroshi Shimamoto 	 *
15162a31a03SHiroshi Shimamoto 	 * In practice this means shooting down the other cpus in
15262a31a03SHiroshi Shimamoto 	 * an SMP system.
15362a31a03SHiroshi Shimamoto 	 */
15462a31a03SHiroshi Shimamoto 	/* The kernel is broken so disable interrupts */
15562a31a03SHiroshi Shimamoto 	local_irq_disable();
15662a31a03SHiroshi Shimamoto 
1570ee59413SHidehiro Kawai 	crash_smp_send_stop();
1582340b62fSEduardo Habkost 
159f23d1f4aSZhang Yanfei 	/*
160f23d1f4aSZhang Yanfei 	 * VMCLEAR VMCSs loaded on this cpu if needed.
161f23d1f4aSZhang Yanfei 	 */
162f23d1f4aSZhang Yanfei 	cpu_crash_vmclear_loaded_vmcss();
163f23d1f4aSZhang Yanfei 
1642340b62fSEduardo Habkost 	/* Booting kdump kernel with VMX or SVM enabled won't work,
1652340b62fSEduardo Habkost 	 * because (among other limitations) we can't disable paging
1662340b62fSEduardo Habkost 	 * with the virt flags.
1672340b62fSEduardo Habkost 	 */
1682340b62fSEduardo Habkost 	cpu_emergency_vmxoff();
1692340b62fSEduardo Habkost 	cpu_emergency_svm_disable();
1702340b62fSEduardo Habkost 
171da06a43dSTakao Indoh 	/*
172da06a43dSTakao Indoh 	 * Disable Intel PT to stop its logging
173da06a43dSTakao Indoh 	 */
174da06a43dSTakao Indoh 	cpu_emergency_stop_pt();
175da06a43dSTakao Indoh 
17617405453SYoshihiro YUNOMAE #ifdef CONFIG_X86_IO_APIC
17717405453SYoshihiro YUNOMAE 	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
17817405453SYoshihiro YUNOMAE 	ioapic_zap_locks();
179339b2ae0SBaoquan He 	clear_IO_APIC();
18062a31a03SHiroshi Shimamoto #endif
181522e6646SFenghua Yu 	lapic_shutdown();
182339b2ae0SBaoquan He 	restore_boot_irq_mode();
1830c1b2724SOGAWA Hirofumi #ifdef CONFIG_HPET_TIMER
1840c1b2724SOGAWA Hirofumi 	hpet_disable();
1850c1b2724SOGAWA Hirofumi #endif
18662a31a03SHiroshi Shimamoto 	crash_save_cpu(regs, safe_smp_processor_id());
18762a31a03SHiroshi Shimamoto }
188dd5f7260SVivek Goyal 
18974ca317cSVivek Goyal #ifdef CONFIG_KEXEC_FILE
19053b76073STiezhu Yang 
19153b76073STiezhu Yang static unsigned long crash_zero_bytes;
19253b76073STiezhu Yang 
1931d2e733bSTom Lendacky static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
194dd5f7260SVivek Goyal {
195e3c41e37SLee, Chun-Yi 	unsigned int *nr_ranges = arg;
196dd5f7260SVivek Goyal 
197dd5f7260SVivek Goyal 	(*nr_ranges)++;
198dd5f7260SVivek Goyal 	return 0;
199dd5f7260SVivek Goyal }
200dd5f7260SVivek Goyal 
201dd5f7260SVivek Goyal /* Gather all the required information to prepare elf headers for ram regions */
2028d5f894aSAKASHI Takahiro static struct crash_mem *fill_up_crash_elf_data(void)
203dd5f7260SVivek Goyal {
204dd5f7260SVivek Goyal 	unsigned int nr_ranges = 0;
2058d5f894aSAKASHI Takahiro 	struct crash_mem *cmem;
206dd5f7260SVivek Goyal 
207e3c41e37SLee, Chun-Yi 	walk_system_ram_res(0, -1, &nr_ranges,
208dd5f7260SVivek Goyal 				get_nr_ram_ranges_callback);
2098d5f894aSAKASHI Takahiro 	if (!nr_ranges)
2108d5f894aSAKASHI Takahiro 		return NULL;
211dd5f7260SVivek Goyal 
2128d5f894aSAKASHI Takahiro 	/*
2138d5f894aSAKASHI Takahiro 	 * Exclusion of crash region and/or crashk_low_res may cause
2148d5f894aSAKASHI Takahiro 	 * another range split. So add extra two slots here.
2158d5f894aSAKASHI Takahiro 	 */
2168d5f894aSAKASHI Takahiro 	nr_ranges += 2;
2174df43095SGustavo A. R. Silva 	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
2188d5f894aSAKASHI Takahiro 	if (!cmem)
2198d5f894aSAKASHI Takahiro 		return NULL;
220dd5f7260SVivek Goyal 
2218d5f894aSAKASHI Takahiro 	cmem->max_nr_ranges = nr_ranges;
2228d5f894aSAKASHI Takahiro 	cmem->nr_ranges = 0;
223dd5f7260SVivek Goyal 
2248d5f894aSAKASHI Takahiro 	return cmem;
225dd5f7260SVivek Goyal }
226dd5f7260SVivek Goyal 
227dd5f7260SVivek Goyal /*
228dd5f7260SVivek Goyal  * Look for any unwanted ranges between mstart, mend and remove them. This
2298d5f894aSAKASHI Takahiro  * might lead to split and split ranges are put in cmem->ranges[] array
230dd5f7260SVivek Goyal  */
2318d5f894aSAKASHI Takahiro static int elf_header_exclude_ranges(struct crash_mem *cmem)
232dd5f7260SVivek Goyal {
233dd5f7260SVivek Goyal 	int ret = 0;
234dd5f7260SVivek Goyal 
235dd5f7260SVivek Goyal 	/* Exclude crashkernel region */
236babac4a8SAKASHI Takahiro 	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
237dd5f7260SVivek Goyal 	if (ret)
238dd5f7260SVivek Goyal 		return ret;
239dd5f7260SVivek Goyal 
240a2d6aa8fSBaoquan He 	if (crashk_low_res.end) {
241babac4a8SAKASHI Takahiro 		ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
242babac4a8SAKASHI Takahiro 							crashk_low_res.end);
243a2d6aa8fSBaoquan He 	}
244dd5f7260SVivek Goyal 
245dd5f7260SVivek Goyal 	return ret;
246dd5f7260SVivek Goyal }
247dd5f7260SVivek Goyal 
2481d2e733bSTom Lendacky static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
249dd5f7260SVivek Goyal {
2508d5f894aSAKASHI Takahiro 	struct crash_mem *cmem = arg;
251dd5f7260SVivek Goyal 
252cbe66016SAKASHI Takahiro 	cmem->ranges[cmem->nr_ranges].start = res->start;
253cbe66016SAKASHI Takahiro 	cmem->ranges[cmem->nr_ranges].end = res->end;
254cbe66016SAKASHI Takahiro 	cmem->nr_ranges++;
255dd5f7260SVivek Goyal 
256cbe66016SAKASHI Takahiro 	return 0;
257dd5f7260SVivek Goyal }
258dd5f7260SVivek Goyal 
259dd5f7260SVivek Goyal /* Prepare elf headers. Return addr and size */
260dd5f7260SVivek Goyal static int prepare_elf_headers(struct kimage *image, void **addr,
261dd5f7260SVivek Goyal 					unsigned long *sz)
262dd5f7260SVivek Goyal {
2638d5f894aSAKASHI Takahiro 	struct crash_mem *cmem;
264cbe66016SAKASHI Takahiro 	Elf64_Ehdr *ehdr;
265cbe66016SAKASHI Takahiro 	Elf64_Phdr *phdr;
266cbe66016SAKASHI Takahiro 	int ret, i;
267dd5f7260SVivek Goyal 
2688d5f894aSAKASHI Takahiro 	cmem = fill_up_crash_elf_data();
2698d5f894aSAKASHI Takahiro 	if (!cmem)
270dd5f7260SVivek Goyal 		return -ENOMEM;
271dd5f7260SVivek Goyal 
2728d5f894aSAKASHI Takahiro 	ret = walk_system_ram_res(0, -1, cmem,
273cbe66016SAKASHI Takahiro 				prepare_elf64_ram_headers_callback);
274cbe66016SAKASHI Takahiro 	if (ret)
275cbe66016SAKASHI Takahiro 		goto out;
276cbe66016SAKASHI Takahiro 
277cbe66016SAKASHI Takahiro 	/* Exclude unwanted mem ranges */
2788d5f894aSAKASHI Takahiro 	ret = elf_header_exclude_ranges(cmem);
279cbe66016SAKASHI Takahiro 	if (ret)
280cbe66016SAKASHI Takahiro 		goto out;
281cbe66016SAKASHI Takahiro 
282dd5f7260SVivek Goyal 	/* By default prepare 64bit headers */
283babac4a8SAKASHI Takahiro 	ret =  crash_prepare_elf64_headers(cmem,
284babac4a8SAKASHI Takahiro 				IS_ENABLED(CONFIG_X86_64), addr, sz);
285cbe66016SAKASHI Takahiro 	if (ret)
286cbe66016SAKASHI Takahiro 		goto out;
287cbe66016SAKASHI Takahiro 
288cbe66016SAKASHI Takahiro 	/*
289cbe66016SAKASHI Takahiro 	 * If a range matches backup region, adjust offset to backup
290cbe66016SAKASHI Takahiro 	 * segment.
291cbe66016SAKASHI Takahiro 	 */
292cbe66016SAKASHI Takahiro 	ehdr = (Elf64_Ehdr *)*addr;
293cbe66016SAKASHI Takahiro 	phdr = (Elf64_Phdr *)(ehdr + 1);
294cbe66016SAKASHI Takahiro 	for (i = 0; i < ehdr->e_phnum; phdr++, i++)
295cbe66016SAKASHI Takahiro 		if (phdr->p_type == PT_LOAD &&
296cbe66016SAKASHI Takahiro 				phdr->p_paddr == image->arch.backup_src_start &&
297cbe66016SAKASHI Takahiro 				phdr->p_memsz == image->arch.backup_src_sz) {
298cbe66016SAKASHI Takahiro 			phdr->p_offset = image->arch.backup_load_addr;
299cbe66016SAKASHI Takahiro 			break;
300cbe66016SAKASHI Takahiro 		}
301cbe66016SAKASHI Takahiro out:
3028d5f894aSAKASHI Takahiro 	vfree(cmem);
303dd5f7260SVivek Goyal 	return ret;
304dd5f7260SVivek Goyal }
305dd5f7260SVivek Goyal 
3068ec67d97SIngo Molnar static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
307dd5f7260SVivek Goyal {
308dd5f7260SVivek Goyal 	unsigned int nr_e820_entries;
309dd5f7260SVivek Goyal 
310dd5f7260SVivek Goyal 	nr_e820_entries = params->e820_entries;
31108b46d5dSIngo Molnar 	if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
312dd5f7260SVivek Goyal 		return 1;
313dd5f7260SVivek Goyal 
31461a50101SIngo Molnar 	memcpy(&params->e820_table[nr_e820_entries], entry,
3158ec67d97SIngo Molnar 			sizeof(struct e820_entry));
316dd5f7260SVivek Goyal 	params->e820_entries++;
317dd5f7260SVivek Goyal 	return 0;
318dd5f7260SVivek Goyal }
319dd5f7260SVivek Goyal 
3201d2e733bSTom Lendacky static int memmap_entry_callback(struct resource *res, void *arg)
321dd5f7260SVivek Goyal {
322dd5f7260SVivek Goyal 	struct crash_memmap_data *cmd = arg;
323dd5f7260SVivek Goyal 	struct boot_params *params = cmd->params;
3248ec67d97SIngo Molnar 	struct e820_entry ei;
325dd5f7260SVivek Goyal 
3261d2e733bSTom Lendacky 	ei.addr = res->start;
3279275b933Skbuild test robot 	ei.size = resource_size(res);
328dd5f7260SVivek Goyal 	ei.type = cmd->type;
329dd5f7260SVivek Goyal 	add_e820_entry(params, &ei);
330dd5f7260SVivek Goyal 
331dd5f7260SVivek Goyal 	return 0;
332dd5f7260SVivek Goyal }
333dd5f7260SVivek Goyal 
334dd5f7260SVivek Goyal static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
335dd5f7260SVivek Goyal 				 unsigned long long mstart,
336dd5f7260SVivek Goyal 				 unsigned long long mend)
337dd5f7260SVivek Goyal {
338dd5f7260SVivek Goyal 	unsigned long start, end;
339dd5f7260SVivek Goyal 	int ret = 0;
340dd5f7260SVivek Goyal 
341dd5f7260SVivek Goyal 	cmem->ranges[0].start = mstart;
342dd5f7260SVivek Goyal 	cmem->ranges[0].end = mend;
343dd5f7260SVivek Goyal 	cmem->nr_ranges = 1;
344dd5f7260SVivek Goyal 
345dd5f7260SVivek Goyal 	/* Exclude Backup region */
346dd5f7260SVivek Goyal 	start = image->arch.backup_load_addr;
347dd5f7260SVivek Goyal 	end = start + image->arch.backup_src_sz - 1;
348babac4a8SAKASHI Takahiro 	ret = crash_exclude_mem_range(cmem, start, end);
349dd5f7260SVivek Goyal 	if (ret)
350dd5f7260SVivek Goyal 		return ret;
351dd5f7260SVivek Goyal 
352dd5f7260SVivek Goyal 	/* Exclude elf header region */
353dd5f7260SVivek Goyal 	start = image->arch.elf_load_addr;
354dd5f7260SVivek Goyal 	end = start + image->arch.elf_headers_sz - 1;
355babac4a8SAKASHI Takahiro 	return crash_exclude_mem_range(cmem, start, end);
356dd5f7260SVivek Goyal }
357dd5f7260SVivek Goyal 
358dd5f7260SVivek Goyal /* Prepare memory map for crash dump kernel */
359dd5f7260SVivek Goyal int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
360dd5f7260SVivek Goyal {
361dd5f7260SVivek Goyal 	int i, ret = 0;
362dd5f7260SVivek Goyal 	unsigned long flags;
3638ec67d97SIngo Molnar 	struct e820_entry ei;
364dd5f7260SVivek Goyal 	struct crash_memmap_data cmd;
365dd5f7260SVivek Goyal 	struct crash_mem *cmem;
366dd5f7260SVivek Goyal 
367dd5f7260SVivek Goyal 	cmem = vzalloc(sizeof(struct crash_mem));
368dd5f7260SVivek Goyal 	if (!cmem)
369dd5f7260SVivek Goyal 		return -ENOMEM;
370dd5f7260SVivek Goyal 
371dd5f7260SVivek Goyal 	memset(&cmd, 0, sizeof(struct crash_memmap_data));
372dd5f7260SVivek Goyal 	cmd.params = params;
373dd5f7260SVivek Goyal 
374dd5f7260SVivek Goyal 	/* Add first 640K segment */
375dd5f7260SVivek Goyal 	ei.addr = image->arch.backup_src_start;
376dd5f7260SVivek Goyal 	ei.size = image->arch.backup_src_sz;
37709821ff1SIngo Molnar 	ei.type = E820_TYPE_RAM;
378dd5f7260SVivek Goyal 	add_e820_entry(params, &ei);
379dd5f7260SVivek Goyal 
380dd5f7260SVivek Goyal 	/* Add ACPI tables */
38109821ff1SIngo Molnar 	cmd.type = E820_TYPE_ACPI;
382dd5f7260SVivek Goyal 	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
383f0f4711aSToshi Kani 	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
384dd5f7260SVivek Goyal 		       memmap_entry_callback);
385dd5f7260SVivek Goyal 
386dd5f7260SVivek Goyal 	/* Add ACPI Non-volatile Storage */
38709821ff1SIngo Molnar 	cmd.type = E820_TYPE_NVS;
388f0f4711aSToshi Kani 	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
389dd5f7260SVivek Goyal 			memmap_entry_callback);
390dd5f7260SVivek Goyal 
391980621daSLianbo Jiang 	/* Add e820 reserved ranges */
392980621daSLianbo Jiang 	cmd.type = E820_TYPE_RESERVED;
393980621daSLianbo Jiang 	flags = IORESOURCE_MEM;
394980621daSLianbo Jiang 	walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
395980621daSLianbo Jiang 			   memmap_entry_callback);
396980621daSLianbo Jiang 
397dd5f7260SVivek Goyal 	/* Add crashk_low_res region */
398dd5f7260SVivek Goyal 	if (crashk_low_res.end) {
399dd5f7260SVivek Goyal 		ei.addr = crashk_low_res.start;
400dd5f7260SVivek Goyal 		ei.size = crashk_low_res.end - crashk_low_res.start + 1;
40109821ff1SIngo Molnar 		ei.type = E820_TYPE_RAM;
402dd5f7260SVivek Goyal 		add_e820_entry(params, &ei);
403dd5f7260SVivek Goyal 	}
404dd5f7260SVivek Goyal 
405dd5f7260SVivek Goyal 	/* Exclude some ranges from crashk_res and add rest to memmap */
406dd5f7260SVivek Goyal 	ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
407dd5f7260SVivek Goyal 						crashk_res.end);
408dd5f7260SVivek Goyal 	if (ret)
409dd5f7260SVivek Goyal 		goto out;
410dd5f7260SVivek Goyal 
411dd5f7260SVivek Goyal 	for (i = 0; i < cmem->nr_ranges; i++) {
412dd5f7260SVivek Goyal 		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
413dd5f7260SVivek Goyal 
414dd5f7260SVivek Goyal 		/* If entry is less than a page, skip it */
415dd5f7260SVivek Goyal 		if (ei.size < PAGE_SIZE)
416dd5f7260SVivek Goyal 			continue;
417dd5f7260SVivek Goyal 		ei.addr = cmem->ranges[i].start;
41809821ff1SIngo Molnar 		ei.type = E820_TYPE_RAM;
419dd5f7260SVivek Goyal 		add_e820_entry(params, &ei);
420dd5f7260SVivek Goyal 	}
421dd5f7260SVivek Goyal 
422dd5f7260SVivek Goyal out:
423dd5f7260SVivek Goyal 	vfree(cmem);
424dd5f7260SVivek Goyal 	return ret;
425dd5f7260SVivek Goyal }
426dd5f7260SVivek Goyal 
4271d2e733bSTom Lendacky static int determine_backup_region(struct resource *res, void *arg)
428dd5f7260SVivek Goyal {
429dd5f7260SVivek Goyal 	struct kimage *image = arg;
430dd5f7260SVivek Goyal 
4311d2e733bSTom Lendacky 	image->arch.backup_src_start = res->start;
4329275b933Skbuild test robot 	image->arch.backup_src_sz = resource_size(res);
433dd5f7260SVivek Goyal 
434dd5f7260SVivek Goyal 	/* Expecting only one range for backup region */
435dd5f7260SVivek Goyal 	return 1;
436dd5f7260SVivek Goyal }
437dd5f7260SVivek Goyal 
438dd5f7260SVivek Goyal int crash_load_segments(struct kimage *image)
439dd5f7260SVivek Goyal {
440dd5f7260SVivek Goyal 	int ret;
441ec2b9bfaSThiago Jung Bauermann 	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
442ec2b9bfaSThiago Jung Bauermann 				  .buf_max = ULONG_MAX, .top_down = false };
443dd5f7260SVivek Goyal 
444dd5f7260SVivek Goyal 	/*
445dd5f7260SVivek Goyal 	 * Determine and load a segment for backup area. First 640K RAM
446dd5f7260SVivek Goyal 	 * region is backup source
447dd5f7260SVivek Goyal 	 */
448dd5f7260SVivek Goyal 
449dd5f7260SVivek Goyal 	ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
450dd5f7260SVivek Goyal 				image, determine_backup_region);
451dd5f7260SVivek Goyal 
452dd5f7260SVivek Goyal 	/* Zero or postive return values are ok */
453dd5f7260SVivek Goyal 	if (ret < 0)
454dd5f7260SVivek Goyal 		return ret;
455dd5f7260SVivek Goyal 
456dd5f7260SVivek Goyal 	/* Add backup segment. */
457ec2b9bfaSThiago Jung Bauermann 	if (image->arch.backup_src_sz) {
458ec2b9bfaSThiago Jung Bauermann 		kbuf.buffer = &crash_zero_bytes;
459ec2b9bfaSThiago Jung Bauermann 		kbuf.bufsz = sizeof(crash_zero_bytes);
460ec2b9bfaSThiago Jung Bauermann 		kbuf.memsz = image->arch.backup_src_sz;
461ec2b9bfaSThiago Jung Bauermann 		kbuf.buf_align = PAGE_SIZE;
462dd5f7260SVivek Goyal 		/*
463dd5f7260SVivek Goyal 		 * Ideally there is no source for backup segment. This is
464dd5f7260SVivek Goyal 		 * copied in purgatory after crash. Just add a zero filled
465dd5f7260SVivek Goyal 		 * segment for now to make sure checksum logic works fine.
466dd5f7260SVivek Goyal 		 */
467ec2b9bfaSThiago Jung Bauermann 		ret = kexec_add_buffer(&kbuf);
468dd5f7260SVivek Goyal 		if (ret)
469dd5f7260SVivek Goyal 			return ret;
470ec2b9bfaSThiago Jung Bauermann 		image->arch.backup_load_addr = kbuf.mem;
471dd5f7260SVivek Goyal 		pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
472ec2b9bfaSThiago Jung Bauermann 			 image->arch.backup_load_addr,
473ec2b9bfaSThiago Jung Bauermann 			 image->arch.backup_src_start, kbuf.memsz);
474dd5f7260SVivek Goyal 	}
475dd5f7260SVivek Goyal 
476dd5f7260SVivek Goyal 	/* Prepare elf headers and add a segment */
477ec2b9bfaSThiago Jung Bauermann 	ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
478dd5f7260SVivek Goyal 	if (ret)
479dd5f7260SVivek Goyal 		return ret;
480dd5f7260SVivek Goyal 
481ec2b9bfaSThiago Jung Bauermann 	image->arch.elf_headers = kbuf.buffer;
482ec2b9bfaSThiago Jung Bauermann 	image->arch.elf_headers_sz = kbuf.bufsz;
483dd5f7260SVivek Goyal 
484ec2b9bfaSThiago Jung Bauermann 	kbuf.memsz = kbuf.bufsz;
485ec2b9bfaSThiago Jung Bauermann 	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
486993a1103SDave Young 	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
487ec2b9bfaSThiago Jung Bauermann 	ret = kexec_add_buffer(&kbuf);
488dd5f7260SVivek Goyal 	if (ret) {
489dd5f7260SVivek Goyal 		vfree((void *)image->arch.elf_headers);
490dd5f7260SVivek Goyal 		return ret;
491dd5f7260SVivek Goyal 	}
492ec2b9bfaSThiago Jung Bauermann 	image->arch.elf_load_addr = kbuf.mem;
493dd5f7260SVivek Goyal 	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
494ec2b9bfaSThiago Jung Bauermann 		 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
495dd5f7260SVivek Goyal 
496dd5f7260SVivek Goyal 	return ret;
497dd5f7260SVivek Goyal }
49874ca317cSVivek Goyal #endif /* CONFIG_KEXEC_FILE */
499