xref: /openbmc/linux/arch/x86/kernel/crash.c (revision cc8bbe1a)
1 /*
2  * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
3  *
4  * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5  *
6  * Copyright (C) IBM Corporation, 2004. All rights reserved.
7  * Copyright (C) Red Hat Inc., 2014. All rights reserved.
8  * Authors:
9  *      Vivek Goyal <vgoyal@redhat.com>
10  *
11  */
12 
13 #define pr_fmt(fmt)	"kexec: " fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
29 #include <asm/nmi.h>
30 #include <asm/hw_irq.h>
31 #include <asm/apic.h>
32 #include <asm/io_apic.h>
33 #include <asm/hpet.h>
34 #include <linux/kdebug.h>
35 #include <asm/cpu.h>
36 #include <asm/reboot.h>
37 #include <asm/virtext.h>
38 #include <asm/intel_pt.h>
39 
40 /* Alignment required for elf header segment */
41 #define ELF_CORE_HEADER_ALIGN   4096
42 
43 /* This primarily represents number of split ranges due to exclusion */
44 #define CRASH_MAX_RANGES	16
45 
46 struct crash_mem_range {
47 	u64 start, end;
48 };
49 
50 struct crash_mem {
51 	unsigned int nr_ranges;
52 	struct crash_mem_range ranges[CRASH_MAX_RANGES];
53 };
54 
55 /* Misc data about ram ranges needed to prepare elf headers */
56 struct crash_elf_data {
57 	struct kimage *image;
58 	/*
59 	 * Total number of ram ranges we have after various adjustments for
60 	 * GART, crash reserved region etc.
61 	 */
62 	unsigned int max_nr_ranges;
63 	unsigned long gart_start, gart_end;
64 
65 	/* Pointer to elf header */
66 	void *ehdr;
67 	/* Pointer to next phdr */
68 	void *bufp;
69 	struct crash_mem mem;
70 };
71 
72 /* Used while preparing memory map entries for second kernel */
73 struct crash_memmap_data {
74 	struct boot_params *params;
75 	/* Type of memory */
76 	unsigned int type;
77 };
78 
79 /*
80  * This is used to VMCLEAR all VMCSs loaded on the
81  * processor. And when loading kvm_intel module, the
82  * callback function pointer will be assigned.
83  *
84  * protected by rcu.
85  */
86 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
87 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
88 unsigned long crash_zero_bytes;
89 
90 static inline void cpu_crash_vmclear_loaded_vmcss(void)
91 {
92 	crash_vmclear_fn *do_vmclear_operation = NULL;
93 
94 	rcu_read_lock();
95 	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
96 	if (do_vmclear_operation)
97 		do_vmclear_operation();
98 	rcu_read_unlock();
99 }
100 
101 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
102 
103 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
104 {
105 #ifdef CONFIG_X86_32
106 	struct pt_regs fixed_regs;
107 
108 	if (!user_mode(regs)) {
109 		crash_fixup_ss_esp(&fixed_regs, regs);
110 		regs = &fixed_regs;
111 	}
112 #endif
113 	crash_save_cpu(regs, cpu);
114 
115 	/*
116 	 * VMCLEAR VMCSs loaded on all cpus if needed.
117 	 */
118 	cpu_crash_vmclear_loaded_vmcss();
119 
120 	/* Disable VMX or SVM if needed.
121 	 *
122 	 * We need to disable virtualization on all CPUs.
123 	 * Having VMX or SVM enabled on any CPU may break rebooting
124 	 * after the kdump kernel has finished its task.
125 	 */
126 	cpu_emergency_vmxoff();
127 	cpu_emergency_svm_disable();
128 
129 	/*
130 	 * Disable Intel PT to stop its logging
131 	 */
132 	cpu_emergency_stop_pt();
133 
134 	disable_local_APIC();
135 }
136 
137 static void kdump_nmi_shootdown_cpus(void)
138 {
139 	nmi_shootdown_cpus(kdump_nmi_callback);
140 
141 	disable_local_APIC();
142 }
143 
144 #else
145 static void kdump_nmi_shootdown_cpus(void)
146 {
147 	/* There are no cpus to shootdown */
148 }
149 #endif
150 
151 void native_machine_crash_shutdown(struct pt_regs *regs)
152 {
153 	/* This function is only called after the system
154 	 * has panicked or is otherwise in a critical state.
155 	 * The minimum amount of code to allow a kexec'd kernel
156 	 * to run successfully needs to happen here.
157 	 *
158 	 * In practice this means shooting down the other cpus in
159 	 * an SMP system.
160 	 */
161 	/* The kernel is broken so disable interrupts */
162 	local_irq_disable();
163 
164 	kdump_nmi_shootdown_cpus();
165 
166 	/*
167 	 * VMCLEAR VMCSs loaded on this cpu if needed.
168 	 */
169 	cpu_crash_vmclear_loaded_vmcss();
170 
171 	/* Booting kdump kernel with VMX or SVM enabled won't work,
172 	 * because (among other limitations) we can't disable paging
173 	 * with the virt flags.
174 	 */
175 	cpu_emergency_vmxoff();
176 	cpu_emergency_svm_disable();
177 
178 	/*
179 	 * Disable Intel PT to stop its logging
180 	 */
181 	cpu_emergency_stop_pt();
182 
183 #ifdef CONFIG_X86_IO_APIC
184 	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
185 	ioapic_zap_locks();
186 	disable_IO_APIC();
187 #endif
188 	lapic_shutdown();
189 #ifdef CONFIG_HPET_TIMER
190 	hpet_disable();
191 #endif
192 	crash_save_cpu(regs, safe_smp_processor_id());
193 }
194 
195 #ifdef CONFIG_KEXEC_FILE
196 static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
197 {
198 	unsigned int *nr_ranges = arg;
199 
200 	(*nr_ranges)++;
201 	return 0;
202 }
203 
204 static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
205 {
206 	struct crash_elf_data *ced = arg;
207 
208 	ced->gart_start = start;
209 	ced->gart_end = end;
210 
211 	/* Not expecting more than 1 gart aperture */
212 	return 1;
213 }
214 
215 
216 /* Gather all the required information to prepare elf headers for ram regions */
217 static void fill_up_crash_elf_data(struct crash_elf_data *ced,
218 				   struct kimage *image)
219 {
220 	unsigned int nr_ranges = 0;
221 
222 	ced->image = image;
223 
224 	walk_system_ram_res(0, -1, &nr_ranges,
225 				get_nr_ram_ranges_callback);
226 
227 	ced->max_nr_ranges = nr_ranges;
228 
229 	/*
230 	 * We don't create ELF headers for GART aperture as an attempt
231 	 * to dump this memory in second kernel leads to hang/crash.
232 	 * If gart aperture is present, one needs to exclude that region
233 	 * and that could lead to need of extra phdr.
234 	 */
235 	walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
236 				ced, get_gart_ranges_callback);
237 
238 	/*
239 	 * If we have gart region, excluding that could potentially split
240 	 * a memory range, resulting in extra header. Account for  that.
241 	 */
242 	if (ced->gart_end)
243 		ced->max_nr_ranges++;
244 
245 	/* Exclusion of crash region could split memory ranges */
246 	ced->max_nr_ranges++;
247 
248 	/* If crashk_low_res is not 0, another range split possible */
249 	if (crashk_low_res.end)
250 		ced->max_nr_ranges++;
251 }
252 
253 static int exclude_mem_range(struct crash_mem *mem,
254 		unsigned long long mstart, unsigned long long mend)
255 {
256 	int i, j;
257 	unsigned long long start, end;
258 	struct crash_mem_range temp_range = {0, 0};
259 
260 	for (i = 0; i < mem->nr_ranges; i++) {
261 		start = mem->ranges[i].start;
262 		end = mem->ranges[i].end;
263 
264 		if (mstart > end || mend < start)
265 			continue;
266 
267 		/* Truncate any area outside of range */
268 		if (mstart < start)
269 			mstart = start;
270 		if (mend > end)
271 			mend = end;
272 
273 		/* Found completely overlapping range */
274 		if (mstart == start && mend == end) {
275 			mem->ranges[i].start = 0;
276 			mem->ranges[i].end = 0;
277 			if (i < mem->nr_ranges - 1) {
278 				/* Shift rest of the ranges to left */
279 				for (j = i; j < mem->nr_ranges - 1; j++) {
280 					mem->ranges[j].start =
281 						mem->ranges[j+1].start;
282 					mem->ranges[j].end =
283 							mem->ranges[j+1].end;
284 				}
285 			}
286 			mem->nr_ranges--;
287 			return 0;
288 		}
289 
290 		if (mstart > start && mend < end) {
291 			/* Split original range */
292 			mem->ranges[i].end = mstart - 1;
293 			temp_range.start = mend + 1;
294 			temp_range.end = end;
295 		} else if (mstart != start)
296 			mem->ranges[i].end = mstart - 1;
297 		else
298 			mem->ranges[i].start = mend + 1;
299 		break;
300 	}
301 
302 	/* If a split happend, add the split to array */
303 	if (!temp_range.end)
304 		return 0;
305 
306 	/* Split happened */
307 	if (i == CRASH_MAX_RANGES - 1) {
308 		pr_err("Too many crash ranges after split\n");
309 		return -ENOMEM;
310 	}
311 
312 	/* Location where new range should go */
313 	j = i + 1;
314 	if (j < mem->nr_ranges) {
315 		/* Move over all ranges one slot towards the end */
316 		for (i = mem->nr_ranges - 1; i >= j; i--)
317 			mem->ranges[i + 1] = mem->ranges[i];
318 	}
319 
320 	mem->ranges[j].start = temp_range.start;
321 	mem->ranges[j].end = temp_range.end;
322 	mem->nr_ranges++;
323 	return 0;
324 }
325 
326 /*
327  * Look for any unwanted ranges between mstart, mend and remove them. This
328  * might lead to split and split ranges are put in ced->mem.ranges[] array
329  */
330 static int elf_header_exclude_ranges(struct crash_elf_data *ced,
331 		unsigned long long mstart, unsigned long long mend)
332 {
333 	struct crash_mem *cmem = &ced->mem;
334 	int ret = 0;
335 
336 	memset(cmem->ranges, 0, sizeof(cmem->ranges));
337 
338 	cmem->ranges[0].start = mstart;
339 	cmem->ranges[0].end = mend;
340 	cmem->nr_ranges = 1;
341 
342 	/* Exclude crashkernel region */
343 	ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
344 	if (ret)
345 		return ret;
346 
347 	if (crashk_low_res.end) {
348 		ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
349 		if (ret)
350 			return ret;
351 	}
352 
353 	/* Exclude GART region */
354 	if (ced->gart_end) {
355 		ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
356 		if (ret)
357 			return ret;
358 	}
359 
360 	return ret;
361 }
362 
363 static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
364 {
365 	struct crash_elf_data *ced = arg;
366 	Elf64_Ehdr *ehdr;
367 	Elf64_Phdr *phdr;
368 	unsigned long mstart, mend;
369 	struct kimage *image = ced->image;
370 	struct crash_mem *cmem;
371 	int ret, i;
372 
373 	ehdr = ced->ehdr;
374 
375 	/* Exclude unwanted mem ranges */
376 	ret = elf_header_exclude_ranges(ced, start, end);
377 	if (ret)
378 		return ret;
379 
380 	/* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
381 	cmem = &ced->mem;
382 
383 	for (i = 0; i < cmem->nr_ranges; i++) {
384 		mstart = cmem->ranges[i].start;
385 		mend = cmem->ranges[i].end;
386 
387 		phdr = ced->bufp;
388 		ced->bufp += sizeof(Elf64_Phdr);
389 
390 		phdr->p_type = PT_LOAD;
391 		phdr->p_flags = PF_R|PF_W|PF_X;
392 		phdr->p_offset  = mstart;
393 
394 		/*
395 		 * If a range matches backup region, adjust offset to backup
396 		 * segment.
397 		 */
398 		if (mstart == image->arch.backup_src_start &&
399 		    (mend - mstart + 1) == image->arch.backup_src_sz)
400 			phdr->p_offset = image->arch.backup_load_addr;
401 
402 		phdr->p_paddr = mstart;
403 		phdr->p_vaddr = (unsigned long long) __va(mstart);
404 		phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
405 		phdr->p_align = 0;
406 		ehdr->e_phnum++;
407 		pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
408 			phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
409 			ehdr->e_phnum, phdr->p_offset);
410 	}
411 
412 	return ret;
413 }
414 
415 static int prepare_elf64_headers(struct crash_elf_data *ced,
416 		void **addr, unsigned long *sz)
417 {
418 	Elf64_Ehdr *ehdr;
419 	Elf64_Phdr *phdr;
420 	unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
421 	unsigned char *buf, *bufp;
422 	unsigned int cpu;
423 	unsigned long long notes_addr;
424 	int ret;
425 
426 	/* extra phdr for vmcoreinfo elf note */
427 	nr_phdr = nr_cpus + 1;
428 	nr_phdr += ced->max_nr_ranges;
429 
430 	/*
431 	 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
432 	 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
433 	 * I think this is required by tools like gdb. So same physical
434 	 * memory will be mapped in two elf headers. One will contain kernel
435 	 * text virtual addresses and other will have __va(physical) addresses.
436 	 */
437 
438 	nr_phdr++;
439 	elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
440 	elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
441 
442 	buf = vzalloc(elf_sz);
443 	if (!buf)
444 		return -ENOMEM;
445 
446 	bufp = buf;
447 	ehdr = (Elf64_Ehdr *)bufp;
448 	bufp += sizeof(Elf64_Ehdr);
449 	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
450 	ehdr->e_ident[EI_CLASS] = ELFCLASS64;
451 	ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
452 	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
453 	ehdr->e_ident[EI_OSABI] = ELF_OSABI;
454 	memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
455 	ehdr->e_type = ET_CORE;
456 	ehdr->e_machine = ELF_ARCH;
457 	ehdr->e_version = EV_CURRENT;
458 	ehdr->e_phoff = sizeof(Elf64_Ehdr);
459 	ehdr->e_ehsize = sizeof(Elf64_Ehdr);
460 	ehdr->e_phentsize = sizeof(Elf64_Phdr);
461 
462 	/* Prepare one phdr of type PT_NOTE for each present cpu */
463 	for_each_present_cpu(cpu) {
464 		phdr = (Elf64_Phdr *)bufp;
465 		bufp += sizeof(Elf64_Phdr);
466 		phdr->p_type = PT_NOTE;
467 		notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
468 		phdr->p_offset = phdr->p_paddr = notes_addr;
469 		phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
470 		(ehdr->e_phnum)++;
471 	}
472 
473 	/* Prepare one PT_NOTE header for vmcoreinfo */
474 	phdr = (Elf64_Phdr *)bufp;
475 	bufp += sizeof(Elf64_Phdr);
476 	phdr->p_type = PT_NOTE;
477 	phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
478 	phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
479 	(ehdr->e_phnum)++;
480 
481 #ifdef CONFIG_X86_64
482 	/* Prepare PT_LOAD type program header for kernel text region */
483 	phdr = (Elf64_Phdr *)bufp;
484 	bufp += sizeof(Elf64_Phdr);
485 	phdr->p_type = PT_LOAD;
486 	phdr->p_flags = PF_R|PF_W|PF_X;
487 	phdr->p_vaddr = (Elf64_Addr)_text;
488 	phdr->p_filesz = phdr->p_memsz = _end - _text;
489 	phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
490 	(ehdr->e_phnum)++;
491 #endif
492 
493 	/* Prepare PT_LOAD headers for system ram chunks. */
494 	ced->ehdr = ehdr;
495 	ced->bufp = bufp;
496 	ret = walk_system_ram_res(0, -1, ced,
497 			prepare_elf64_ram_headers_callback);
498 	if (ret < 0)
499 		return ret;
500 
501 	*addr = buf;
502 	*sz = elf_sz;
503 	return 0;
504 }
505 
506 /* Prepare elf headers. Return addr and size */
507 static int prepare_elf_headers(struct kimage *image, void **addr,
508 					unsigned long *sz)
509 {
510 	struct crash_elf_data *ced;
511 	int ret;
512 
513 	ced = kzalloc(sizeof(*ced), GFP_KERNEL);
514 	if (!ced)
515 		return -ENOMEM;
516 
517 	fill_up_crash_elf_data(ced, image);
518 
519 	/* By default prepare 64bit headers */
520 	ret =  prepare_elf64_headers(ced, addr, sz);
521 	kfree(ced);
522 	return ret;
523 }
524 
525 static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
526 {
527 	unsigned int nr_e820_entries;
528 
529 	nr_e820_entries = params->e820_entries;
530 	if (nr_e820_entries >= E820MAX)
531 		return 1;
532 
533 	memcpy(&params->e820_map[nr_e820_entries], entry,
534 			sizeof(struct e820entry));
535 	params->e820_entries++;
536 	return 0;
537 }
538 
539 static int memmap_entry_callback(u64 start, u64 end, void *arg)
540 {
541 	struct crash_memmap_data *cmd = arg;
542 	struct boot_params *params = cmd->params;
543 	struct e820entry ei;
544 
545 	ei.addr = start;
546 	ei.size = end - start + 1;
547 	ei.type = cmd->type;
548 	add_e820_entry(params, &ei);
549 
550 	return 0;
551 }
552 
553 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
554 				 unsigned long long mstart,
555 				 unsigned long long mend)
556 {
557 	unsigned long start, end;
558 	int ret = 0;
559 
560 	cmem->ranges[0].start = mstart;
561 	cmem->ranges[0].end = mend;
562 	cmem->nr_ranges = 1;
563 
564 	/* Exclude Backup region */
565 	start = image->arch.backup_load_addr;
566 	end = start + image->arch.backup_src_sz - 1;
567 	ret = exclude_mem_range(cmem, start, end);
568 	if (ret)
569 		return ret;
570 
571 	/* Exclude elf header region */
572 	start = image->arch.elf_load_addr;
573 	end = start + image->arch.elf_headers_sz - 1;
574 	return exclude_mem_range(cmem, start, end);
575 }
576 
577 /* Prepare memory map for crash dump kernel */
578 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
579 {
580 	int i, ret = 0;
581 	unsigned long flags;
582 	struct e820entry ei;
583 	struct crash_memmap_data cmd;
584 	struct crash_mem *cmem;
585 
586 	cmem = vzalloc(sizeof(struct crash_mem));
587 	if (!cmem)
588 		return -ENOMEM;
589 
590 	memset(&cmd, 0, sizeof(struct crash_memmap_data));
591 	cmd.params = params;
592 
593 	/* Add first 640K segment */
594 	ei.addr = image->arch.backup_src_start;
595 	ei.size = image->arch.backup_src_sz;
596 	ei.type = E820_RAM;
597 	add_e820_entry(params, &ei);
598 
599 	/* Add ACPI tables */
600 	cmd.type = E820_ACPI;
601 	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
602 	walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd,
603 		       memmap_entry_callback);
604 
605 	/* Add ACPI Non-volatile Storage */
606 	cmd.type = E820_NVS;
607 	walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
608 			memmap_entry_callback);
609 
610 	/* Add crashk_low_res region */
611 	if (crashk_low_res.end) {
612 		ei.addr = crashk_low_res.start;
613 		ei.size = crashk_low_res.end - crashk_low_res.start + 1;
614 		ei.type = E820_RAM;
615 		add_e820_entry(params, &ei);
616 	}
617 
618 	/* Exclude some ranges from crashk_res and add rest to memmap */
619 	ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
620 						crashk_res.end);
621 	if (ret)
622 		goto out;
623 
624 	for (i = 0; i < cmem->nr_ranges; i++) {
625 		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
626 
627 		/* If entry is less than a page, skip it */
628 		if (ei.size < PAGE_SIZE)
629 			continue;
630 		ei.addr = cmem->ranges[i].start;
631 		ei.type = E820_RAM;
632 		add_e820_entry(params, &ei);
633 	}
634 
635 out:
636 	vfree(cmem);
637 	return ret;
638 }
639 
640 static int determine_backup_region(u64 start, u64 end, void *arg)
641 {
642 	struct kimage *image = arg;
643 
644 	image->arch.backup_src_start = start;
645 	image->arch.backup_src_sz = end - start + 1;
646 
647 	/* Expecting only one range for backup region */
648 	return 1;
649 }
650 
651 int crash_load_segments(struct kimage *image)
652 {
653 	unsigned long src_start, src_sz, elf_sz;
654 	void *elf_addr;
655 	int ret;
656 
657 	/*
658 	 * Determine and load a segment for backup area. First 640K RAM
659 	 * region is backup source
660 	 */
661 
662 	ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
663 				image, determine_backup_region);
664 
665 	/* Zero or postive return values are ok */
666 	if (ret < 0)
667 		return ret;
668 
669 	src_start = image->arch.backup_src_start;
670 	src_sz = image->arch.backup_src_sz;
671 
672 	/* Add backup segment. */
673 	if (src_sz) {
674 		/*
675 		 * Ideally there is no source for backup segment. This is
676 		 * copied in purgatory after crash. Just add a zero filled
677 		 * segment for now to make sure checksum logic works fine.
678 		 */
679 		ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
680 				       sizeof(crash_zero_bytes), src_sz,
681 				       PAGE_SIZE, 0, -1, 0,
682 				       &image->arch.backup_load_addr);
683 		if (ret)
684 			return ret;
685 		pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
686 			 image->arch.backup_load_addr, src_start, src_sz);
687 	}
688 
689 	/* Prepare elf headers and add a segment */
690 	ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
691 	if (ret)
692 		return ret;
693 
694 	image->arch.elf_headers = elf_addr;
695 	image->arch.elf_headers_sz = elf_sz;
696 
697 	ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
698 			ELF_CORE_HEADER_ALIGN, 0, -1, 0,
699 			&image->arch.elf_load_addr);
700 	if (ret) {
701 		vfree((void *)image->arch.elf_headers);
702 		return ret;
703 	}
704 	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
705 		 image->arch.elf_load_addr, elf_sz, elf_sz);
706 
707 	return ret;
708 }
709 #endif /* CONFIG_KEXEC_FILE */
710