162a31a03SHiroshi Shimamoto /* 262a31a03SHiroshi Shimamoto * Architecture specific (i386/x86_64) functions for kexec based crash dumps. 362a31a03SHiroshi Shimamoto * 462a31a03SHiroshi Shimamoto * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 562a31a03SHiroshi Shimamoto * 662a31a03SHiroshi Shimamoto * Copyright (C) IBM Corporation, 2004. All rights reserved. 7dd5f7260SVivek Goyal * Copyright (C) Red Hat Inc., 2014. All rights reserved. 8dd5f7260SVivek Goyal * Authors: 9dd5f7260SVivek Goyal * Vivek Goyal <vgoyal@redhat.com> 1062a31a03SHiroshi Shimamoto * 1162a31a03SHiroshi Shimamoto */ 1262a31a03SHiroshi Shimamoto 13dd5f7260SVivek Goyal #define pr_fmt(fmt) "kexec: " fmt 14dd5f7260SVivek Goyal 1562a31a03SHiroshi Shimamoto #include <linux/types.h> 1662a31a03SHiroshi Shimamoto #include <linux/kernel.h> 1762a31a03SHiroshi Shimamoto #include <linux/smp.h> 1862a31a03SHiroshi Shimamoto #include <linux/reboot.h> 1962a31a03SHiroshi Shimamoto #include <linux/kexec.h> 2062a31a03SHiroshi Shimamoto #include <linux/delay.h> 2162a31a03SHiroshi Shimamoto #include <linux/elf.h> 2262a31a03SHiroshi Shimamoto #include <linux/elfcore.h> 23186f4360SPaul Gortmaker #include <linux/export.h> 24dd5f7260SVivek Goyal #include <linux/slab.h> 25d6472302SStephen Rothwell #include <linux/vmalloc.h> 2662a31a03SHiroshi Shimamoto 2762a31a03SHiroshi Shimamoto #include <asm/processor.h> 2862a31a03SHiroshi Shimamoto #include <asm/hardirq.h> 2962a31a03SHiroshi Shimamoto #include <asm/nmi.h> 3062a31a03SHiroshi Shimamoto #include <asm/hw_irq.h> 3162a31a03SHiroshi Shimamoto #include <asm/apic.h> 325520b7e7SIngo Molnar #include <asm/e820/types.h> 338643e28dSJiang Liu #include <asm/io_apic.h> 340c1b2724SOGAWA Hirofumi #include <asm/hpet.h> 3562a31a03SHiroshi Shimamoto #include <linux/kdebug.h> 3696b89dc6SJaswinder Singh Rajput #include <asm/cpu.h> 37ed23dc6fSGlauber Costa #include <asm/reboot.h> 382340b62fSEduardo Habkost #include <asm/virtext.h> 39da06a43dSTakao Indoh #include <asm/intel_pt.h> 408e294786SEduardo Habkost 41dd5f7260SVivek Goyal /* Used while preparing memory map entries for second kernel */ 42dd5f7260SVivek Goyal struct crash_memmap_data { 43dd5f7260SVivek Goyal struct boot_params *params; 44dd5f7260SVivek Goyal /* Type of memory */ 45dd5f7260SVivek Goyal unsigned int type; 46dd5f7260SVivek Goyal }; 47dd5f7260SVivek Goyal 48f23d1f4aSZhang Yanfei /* 49f23d1f4aSZhang Yanfei * This is used to VMCLEAR all VMCSs loaded on the 50f23d1f4aSZhang Yanfei * processor. And when loading kvm_intel module, the 51f23d1f4aSZhang Yanfei * callback function pointer will be assigned. 52f23d1f4aSZhang Yanfei * 53f23d1f4aSZhang Yanfei * protected by rcu. 54f23d1f4aSZhang Yanfei */ 550ca0d818SZhang Yanfei crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; 56f23d1f4aSZhang Yanfei EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); 57dd5f7260SVivek Goyal unsigned long crash_zero_bytes; 58f23d1f4aSZhang Yanfei 59f23d1f4aSZhang Yanfei static inline void cpu_crash_vmclear_loaded_vmcss(void) 60f23d1f4aSZhang Yanfei { 610ca0d818SZhang Yanfei crash_vmclear_fn *do_vmclear_operation = NULL; 62f23d1f4aSZhang Yanfei 63f23d1f4aSZhang Yanfei rcu_read_lock(); 64f23d1f4aSZhang Yanfei do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); 65f23d1f4aSZhang Yanfei if (do_vmclear_operation) 66f23d1f4aSZhang Yanfei do_vmclear_operation(); 67f23d1f4aSZhang Yanfei rcu_read_unlock(); 68f23d1f4aSZhang Yanfei } 69f23d1f4aSZhang Yanfei 70b2bbe71bSEduardo Habkost #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 71b2bbe71bSEduardo Habkost 729c48f1c6SDon Zickus static void kdump_nmi_callback(int cpu, struct pt_regs *regs) 7362a31a03SHiroshi Shimamoto { 741fb473d8SMike Galbraith #ifdef CONFIG_X86_32 7562a31a03SHiroshi Shimamoto struct pt_regs fixed_regs; 76a7d41820SEduardo Habkost 77f39b6f0eSAndy Lutomirski if (!user_mode(regs)) { 78a7d41820SEduardo Habkost crash_fixup_ss_esp(&fixed_regs, regs); 79a7d41820SEduardo Habkost regs = &fixed_regs; 80a7d41820SEduardo Habkost } 81a7d41820SEduardo Habkost #endif 82a7d41820SEduardo Habkost crash_save_cpu(regs, cpu); 83a7d41820SEduardo Habkost 84f23d1f4aSZhang Yanfei /* 85f23d1f4aSZhang Yanfei * VMCLEAR VMCSs loaded on all cpus if needed. 86f23d1f4aSZhang Yanfei */ 87f23d1f4aSZhang Yanfei cpu_crash_vmclear_loaded_vmcss(); 88f23d1f4aSZhang Yanfei 892340b62fSEduardo Habkost /* Disable VMX or SVM if needed. 902340b62fSEduardo Habkost * 912340b62fSEduardo Habkost * We need to disable virtualization on all CPUs. 922340b62fSEduardo Habkost * Having VMX or SVM enabled on any CPU may break rebooting 932340b62fSEduardo Habkost * after the kdump kernel has finished its task. 942340b62fSEduardo Habkost */ 952340b62fSEduardo Habkost cpu_emergency_vmxoff(); 962340b62fSEduardo Habkost cpu_emergency_svm_disable(); 972340b62fSEduardo Habkost 98da06a43dSTakao Indoh /* 99da06a43dSTakao Indoh * Disable Intel PT to stop its logging 100da06a43dSTakao Indoh */ 101da06a43dSTakao Indoh cpu_emergency_stop_pt(); 102da06a43dSTakao Indoh 103a7d41820SEduardo Habkost disable_local_APIC(); 104a7d41820SEduardo Habkost } 105a7d41820SEduardo Habkost 1060ee59413SHidehiro Kawai void kdump_nmi_shootdown_cpus(void) 107d1e7b91cSEduardo Habkost { 1088e294786SEduardo Habkost nmi_shootdown_cpus(kdump_nmi_callback); 109d1e7b91cSEduardo Habkost 11062a31a03SHiroshi Shimamoto disable_local_APIC(); 11162a31a03SHiroshi Shimamoto } 112d1e7b91cSEduardo Habkost 1130ee59413SHidehiro Kawai /* Override the weak function in kernel/panic.c */ 1140ee59413SHidehiro Kawai void crash_smp_send_stop(void) 1150ee59413SHidehiro Kawai { 1160ee59413SHidehiro Kawai static int cpus_stopped; 1170ee59413SHidehiro Kawai 1180ee59413SHidehiro Kawai if (cpus_stopped) 1190ee59413SHidehiro Kawai return; 1200ee59413SHidehiro Kawai 1210ee59413SHidehiro Kawai if (smp_ops.crash_stop_other_cpus) 1220ee59413SHidehiro Kawai smp_ops.crash_stop_other_cpus(); 1230ee59413SHidehiro Kawai else 1240ee59413SHidehiro Kawai smp_send_stop(); 1250ee59413SHidehiro Kawai 1260ee59413SHidehiro Kawai cpus_stopped = 1; 1270ee59413SHidehiro Kawai } 1280ee59413SHidehiro Kawai 12962a31a03SHiroshi Shimamoto #else 1300ee59413SHidehiro Kawai void crash_smp_send_stop(void) 13162a31a03SHiroshi Shimamoto { 13262a31a03SHiroshi Shimamoto /* There are no cpus to shootdown */ 13362a31a03SHiroshi Shimamoto } 13462a31a03SHiroshi Shimamoto #endif 13562a31a03SHiroshi Shimamoto 136ed23dc6fSGlauber Costa void native_machine_crash_shutdown(struct pt_regs *regs) 13762a31a03SHiroshi Shimamoto { 13862a31a03SHiroshi Shimamoto /* This function is only called after the system 13962a31a03SHiroshi Shimamoto * has panicked or is otherwise in a critical state. 14062a31a03SHiroshi Shimamoto * The minimum amount of code to allow a kexec'd kernel 14162a31a03SHiroshi Shimamoto * to run successfully needs to happen here. 14262a31a03SHiroshi Shimamoto * 14362a31a03SHiroshi Shimamoto * In practice this means shooting down the other cpus in 14462a31a03SHiroshi Shimamoto * an SMP system. 14562a31a03SHiroshi Shimamoto */ 14662a31a03SHiroshi Shimamoto /* The kernel is broken so disable interrupts */ 14762a31a03SHiroshi Shimamoto local_irq_disable(); 14862a31a03SHiroshi Shimamoto 1490ee59413SHidehiro Kawai crash_smp_send_stop(); 1502340b62fSEduardo Habkost 151f23d1f4aSZhang Yanfei /* 152f23d1f4aSZhang Yanfei * VMCLEAR VMCSs loaded on this cpu if needed. 153f23d1f4aSZhang Yanfei */ 154f23d1f4aSZhang Yanfei cpu_crash_vmclear_loaded_vmcss(); 155f23d1f4aSZhang Yanfei 1562340b62fSEduardo Habkost /* Booting kdump kernel with VMX or SVM enabled won't work, 1572340b62fSEduardo Habkost * because (among other limitations) we can't disable paging 1582340b62fSEduardo Habkost * with the virt flags. 1592340b62fSEduardo Habkost */ 1602340b62fSEduardo Habkost cpu_emergency_vmxoff(); 1612340b62fSEduardo Habkost cpu_emergency_svm_disable(); 1622340b62fSEduardo Habkost 163da06a43dSTakao Indoh /* 164da06a43dSTakao Indoh * Disable Intel PT to stop its logging 165da06a43dSTakao Indoh */ 166da06a43dSTakao Indoh cpu_emergency_stop_pt(); 167da06a43dSTakao Indoh 16817405453SYoshihiro YUNOMAE #ifdef CONFIG_X86_IO_APIC 16917405453SYoshihiro YUNOMAE /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ 17017405453SYoshihiro YUNOMAE ioapic_zap_locks(); 171339b2ae0SBaoquan He clear_IO_APIC(); 17262a31a03SHiroshi Shimamoto #endif 173522e6646SFenghua Yu lapic_shutdown(); 174339b2ae0SBaoquan He restore_boot_irq_mode(); 1750c1b2724SOGAWA Hirofumi #ifdef CONFIG_HPET_TIMER 1760c1b2724SOGAWA Hirofumi hpet_disable(); 1770c1b2724SOGAWA Hirofumi #endif 17862a31a03SHiroshi Shimamoto crash_save_cpu(regs, safe_smp_processor_id()); 17962a31a03SHiroshi Shimamoto } 180dd5f7260SVivek Goyal 18174ca317cSVivek Goyal #ifdef CONFIG_KEXEC_FILE 1821d2e733bSTom Lendacky static int get_nr_ram_ranges_callback(struct resource *res, void *arg) 183dd5f7260SVivek Goyal { 184e3c41e37SLee, Chun-Yi unsigned int *nr_ranges = arg; 185dd5f7260SVivek Goyal 186dd5f7260SVivek Goyal (*nr_ranges)++; 187dd5f7260SVivek Goyal return 0; 188dd5f7260SVivek Goyal } 189dd5f7260SVivek Goyal 190dd5f7260SVivek Goyal /* Gather all the required information to prepare elf headers for ram regions */ 1918d5f894aSAKASHI Takahiro static struct crash_mem *fill_up_crash_elf_data(void) 192dd5f7260SVivek Goyal { 193dd5f7260SVivek Goyal unsigned int nr_ranges = 0; 1948d5f894aSAKASHI Takahiro struct crash_mem *cmem; 195dd5f7260SVivek Goyal 196e3c41e37SLee, Chun-Yi walk_system_ram_res(0, -1, &nr_ranges, 197dd5f7260SVivek Goyal get_nr_ram_ranges_callback); 1988d5f894aSAKASHI Takahiro if (!nr_ranges) 1998d5f894aSAKASHI Takahiro return NULL; 200dd5f7260SVivek Goyal 2018d5f894aSAKASHI Takahiro /* 2028d5f894aSAKASHI Takahiro * Exclusion of crash region and/or crashk_low_res may cause 2038d5f894aSAKASHI Takahiro * another range split. So add extra two slots here. 2048d5f894aSAKASHI Takahiro */ 2058d5f894aSAKASHI Takahiro nr_ranges += 2; 2068d5f894aSAKASHI Takahiro cmem = vzalloc(sizeof(struct crash_mem) + 2078d5f894aSAKASHI Takahiro sizeof(struct crash_mem_range) * nr_ranges); 2088d5f894aSAKASHI Takahiro if (!cmem) 2098d5f894aSAKASHI Takahiro return NULL; 210dd5f7260SVivek Goyal 2118d5f894aSAKASHI Takahiro cmem->max_nr_ranges = nr_ranges; 2128d5f894aSAKASHI Takahiro cmem->nr_ranges = 0; 213dd5f7260SVivek Goyal 2148d5f894aSAKASHI Takahiro return cmem; 215dd5f7260SVivek Goyal } 216dd5f7260SVivek Goyal 217dd5f7260SVivek Goyal /* 218dd5f7260SVivek Goyal * Look for any unwanted ranges between mstart, mend and remove them. This 2198d5f894aSAKASHI Takahiro * might lead to split and split ranges are put in cmem->ranges[] array 220dd5f7260SVivek Goyal */ 2218d5f894aSAKASHI Takahiro static int elf_header_exclude_ranges(struct crash_mem *cmem) 222dd5f7260SVivek Goyal { 223dd5f7260SVivek Goyal int ret = 0; 224dd5f7260SVivek Goyal 225dd5f7260SVivek Goyal /* Exclude crashkernel region */ 226babac4a8SAKASHI Takahiro ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); 227dd5f7260SVivek Goyal if (ret) 228dd5f7260SVivek Goyal return ret; 229dd5f7260SVivek Goyal 230a2d6aa8fSBaoquan He if (crashk_low_res.end) { 231babac4a8SAKASHI Takahiro ret = crash_exclude_mem_range(cmem, crashk_low_res.start, 232babac4a8SAKASHI Takahiro crashk_low_res.end); 233dd5f7260SVivek Goyal if (ret) 234dd5f7260SVivek Goyal return ret; 235a2d6aa8fSBaoquan He } 236dd5f7260SVivek Goyal 237dd5f7260SVivek Goyal return ret; 238dd5f7260SVivek Goyal } 239dd5f7260SVivek Goyal 2401d2e733bSTom Lendacky static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) 241dd5f7260SVivek Goyal { 2428d5f894aSAKASHI Takahiro struct crash_mem *cmem = arg; 243dd5f7260SVivek Goyal 244cbe66016SAKASHI Takahiro cmem->ranges[cmem->nr_ranges].start = res->start; 245cbe66016SAKASHI Takahiro cmem->ranges[cmem->nr_ranges].end = res->end; 246cbe66016SAKASHI Takahiro cmem->nr_ranges++; 247dd5f7260SVivek Goyal 248cbe66016SAKASHI Takahiro return 0; 249dd5f7260SVivek Goyal } 250dd5f7260SVivek Goyal 251dd5f7260SVivek Goyal /* Prepare elf headers. Return addr and size */ 252dd5f7260SVivek Goyal static int prepare_elf_headers(struct kimage *image, void **addr, 253dd5f7260SVivek Goyal unsigned long *sz) 254dd5f7260SVivek Goyal { 2558d5f894aSAKASHI Takahiro struct crash_mem *cmem; 256cbe66016SAKASHI Takahiro Elf64_Ehdr *ehdr; 257cbe66016SAKASHI Takahiro Elf64_Phdr *phdr; 258cbe66016SAKASHI Takahiro int ret, i; 259dd5f7260SVivek Goyal 2608d5f894aSAKASHI Takahiro cmem = fill_up_crash_elf_data(); 2618d5f894aSAKASHI Takahiro if (!cmem) 262dd5f7260SVivek Goyal return -ENOMEM; 263dd5f7260SVivek Goyal 2648d5f894aSAKASHI Takahiro ret = walk_system_ram_res(0, -1, cmem, 265cbe66016SAKASHI Takahiro prepare_elf64_ram_headers_callback); 266cbe66016SAKASHI Takahiro if (ret) 267cbe66016SAKASHI Takahiro goto out; 268cbe66016SAKASHI Takahiro 269cbe66016SAKASHI Takahiro /* Exclude unwanted mem ranges */ 2708d5f894aSAKASHI Takahiro ret = elf_header_exclude_ranges(cmem); 271cbe66016SAKASHI Takahiro if (ret) 272cbe66016SAKASHI Takahiro goto out; 273cbe66016SAKASHI Takahiro 274dd5f7260SVivek Goyal /* By default prepare 64bit headers */ 275babac4a8SAKASHI Takahiro ret = crash_prepare_elf64_headers(cmem, 276babac4a8SAKASHI Takahiro IS_ENABLED(CONFIG_X86_64), addr, sz); 277cbe66016SAKASHI Takahiro if (ret) 278cbe66016SAKASHI Takahiro goto out; 279cbe66016SAKASHI Takahiro 280cbe66016SAKASHI Takahiro /* 281cbe66016SAKASHI Takahiro * If a range matches backup region, adjust offset to backup 282cbe66016SAKASHI Takahiro * segment. 283cbe66016SAKASHI Takahiro */ 284cbe66016SAKASHI Takahiro ehdr = (Elf64_Ehdr *)*addr; 285cbe66016SAKASHI Takahiro phdr = (Elf64_Phdr *)(ehdr + 1); 286cbe66016SAKASHI Takahiro for (i = 0; i < ehdr->e_phnum; phdr++, i++) 287cbe66016SAKASHI Takahiro if (phdr->p_type == PT_LOAD && 288cbe66016SAKASHI Takahiro phdr->p_paddr == image->arch.backup_src_start && 289cbe66016SAKASHI Takahiro phdr->p_memsz == image->arch.backup_src_sz) { 290cbe66016SAKASHI Takahiro phdr->p_offset = image->arch.backup_load_addr; 291cbe66016SAKASHI Takahiro break; 292cbe66016SAKASHI Takahiro } 293cbe66016SAKASHI Takahiro out: 2948d5f894aSAKASHI Takahiro vfree(cmem); 295dd5f7260SVivek Goyal return ret; 296dd5f7260SVivek Goyal } 297dd5f7260SVivek Goyal 2988ec67d97SIngo Molnar static int add_e820_entry(struct boot_params *params, struct e820_entry *entry) 299dd5f7260SVivek Goyal { 300dd5f7260SVivek Goyal unsigned int nr_e820_entries; 301dd5f7260SVivek Goyal 302dd5f7260SVivek Goyal nr_e820_entries = params->e820_entries; 30308b46d5dSIngo Molnar if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE) 304dd5f7260SVivek Goyal return 1; 305dd5f7260SVivek Goyal 30661a50101SIngo Molnar memcpy(¶ms->e820_table[nr_e820_entries], entry, 3078ec67d97SIngo Molnar sizeof(struct e820_entry)); 308dd5f7260SVivek Goyal params->e820_entries++; 309dd5f7260SVivek Goyal return 0; 310dd5f7260SVivek Goyal } 311dd5f7260SVivek Goyal 3121d2e733bSTom Lendacky static int memmap_entry_callback(struct resource *res, void *arg) 313dd5f7260SVivek Goyal { 314dd5f7260SVivek Goyal struct crash_memmap_data *cmd = arg; 315dd5f7260SVivek Goyal struct boot_params *params = cmd->params; 3168ec67d97SIngo Molnar struct e820_entry ei; 317dd5f7260SVivek Goyal 3181d2e733bSTom Lendacky ei.addr = res->start; 3199275b933Skbuild test robot ei.size = resource_size(res); 320dd5f7260SVivek Goyal ei.type = cmd->type; 321dd5f7260SVivek Goyal add_e820_entry(params, &ei); 322dd5f7260SVivek Goyal 323dd5f7260SVivek Goyal return 0; 324dd5f7260SVivek Goyal } 325dd5f7260SVivek Goyal 326dd5f7260SVivek Goyal static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, 327dd5f7260SVivek Goyal unsigned long long mstart, 328dd5f7260SVivek Goyal unsigned long long mend) 329dd5f7260SVivek Goyal { 330dd5f7260SVivek Goyal unsigned long start, end; 331dd5f7260SVivek Goyal int ret = 0; 332dd5f7260SVivek Goyal 333dd5f7260SVivek Goyal cmem->ranges[0].start = mstart; 334dd5f7260SVivek Goyal cmem->ranges[0].end = mend; 335dd5f7260SVivek Goyal cmem->nr_ranges = 1; 336dd5f7260SVivek Goyal 337dd5f7260SVivek Goyal /* Exclude Backup region */ 338dd5f7260SVivek Goyal start = image->arch.backup_load_addr; 339dd5f7260SVivek Goyal end = start + image->arch.backup_src_sz - 1; 340babac4a8SAKASHI Takahiro ret = crash_exclude_mem_range(cmem, start, end); 341dd5f7260SVivek Goyal if (ret) 342dd5f7260SVivek Goyal return ret; 343dd5f7260SVivek Goyal 344dd5f7260SVivek Goyal /* Exclude elf header region */ 345dd5f7260SVivek Goyal start = image->arch.elf_load_addr; 346dd5f7260SVivek Goyal end = start + image->arch.elf_headers_sz - 1; 347babac4a8SAKASHI Takahiro return crash_exclude_mem_range(cmem, start, end); 348dd5f7260SVivek Goyal } 349dd5f7260SVivek Goyal 350dd5f7260SVivek Goyal /* Prepare memory map for crash dump kernel */ 351dd5f7260SVivek Goyal int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) 352dd5f7260SVivek Goyal { 353dd5f7260SVivek Goyal int i, ret = 0; 354dd5f7260SVivek Goyal unsigned long flags; 3558ec67d97SIngo Molnar struct e820_entry ei; 356dd5f7260SVivek Goyal struct crash_memmap_data cmd; 357dd5f7260SVivek Goyal struct crash_mem *cmem; 358dd5f7260SVivek Goyal 359dd5f7260SVivek Goyal cmem = vzalloc(sizeof(struct crash_mem)); 360dd5f7260SVivek Goyal if (!cmem) 361dd5f7260SVivek Goyal return -ENOMEM; 362dd5f7260SVivek Goyal 363dd5f7260SVivek Goyal memset(&cmd, 0, sizeof(struct crash_memmap_data)); 364dd5f7260SVivek Goyal cmd.params = params; 365dd5f7260SVivek Goyal 366dd5f7260SVivek Goyal /* Add first 640K segment */ 367dd5f7260SVivek Goyal ei.addr = image->arch.backup_src_start; 368dd5f7260SVivek Goyal ei.size = image->arch.backup_src_sz; 36909821ff1SIngo Molnar ei.type = E820_TYPE_RAM; 370dd5f7260SVivek Goyal add_e820_entry(params, &ei); 371dd5f7260SVivek Goyal 372dd5f7260SVivek Goyal /* Add ACPI tables */ 37309821ff1SIngo Molnar cmd.type = E820_TYPE_ACPI; 374dd5f7260SVivek Goyal flags = IORESOURCE_MEM | IORESOURCE_BUSY; 375f0f4711aSToshi Kani walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, 376dd5f7260SVivek Goyal memmap_entry_callback); 377dd5f7260SVivek Goyal 378dd5f7260SVivek Goyal /* Add ACPI Non-volatile Storage */ 37909821ff1SIngo Molnar cmd.type = E820_TYPE_NVS; 380f0f4711aSToshi Kani walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, 381dd5f7260SVivek Goyal memmap_entry_callback); 382dd5f7260SVivek Goyal 383dd5f7260SVivek Goyal /* Add crashk_low_res region */ 384dd5f7260SVivek Goyal if (crashk_low_res.end) { 385dd5f7260SVivek Goyal ei.addr = crashk_low_res.start; 386dd5f7260SVivek Goyal ei.size = crashk_low_res.end - crashk_low_res.start + 1; 38709821ff1SIngo Molnar ei.type = E820_TYPE_RAM; 388dd5f7260SVivek Goyal add_e820_entry(params, &ei); 389dd5f7260SVivek Goyal } 390dd5f7260SVivek Goyal 391dd5f7260SVivek Goyal /* Exclude some ranges from crashk_res and add rest to memmap */ 392dd5f7260SVivek Goyal ret = memmap_exclude_ranges(image, cmem, crashk_res.start, 393dd5f7260SVivek Goyal crashk_res.end); 394dd5f7260SVivek Goyal if (ret) 395dd5f7260SVivek Goyal goto out; 396dd5f7260SVivek Goyal 397dd5f7260SVivek Goyal for (i = 0; i < cmem->nr_ranges; i++) { 398dd5f7260SVivek Goyal ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; 399dd5f7260SVivek Goyal 400dd5f7260SVivek Goyal /* If entry is less than a page, skip it */ 401dd5f7260SVivek Goyal if (ei.size < PAGE_SIZE) 402dd5f7260SVivek Goyal continue; 403dd5f7260SVivek Goyal ei.addr = cmem->ranges[i].start; 40409821ff1SIngo Molnar ei.type = E820_TYPE_RAM; 405dd5f7260SVivek Goyal add_e820_entry(params, &ei); 406dd5f7260SVivek Goyal } 407dd5f7260SVivek Goyal 408dd5f7260SVivek Goyal out: 409dd5f7260SVivek Goyal vfree(cmem); 410dd5f7260SVivek Goyal return ret; 411dd5f7260SVivek Goyal } 412dd5f7260SVivek Goyal 4131d2e733bSTom Lendacky static int determine_backup_region(struct resource *res, void *arg) 414dd5f7260SVivek Goyal { 415dd5f7260SVivek Goyal struct kimage *image = arg; 416dd5f7260SVivek Goyal 4171d2e733bSTom Lendacky image->arch.backup_src_start = res->start; 4189275b933Skbuild test robot image->arch.backup_src_sz = resource_size(res); 419dd5f7260SVivek Goyal 420dd5f7260SVivek Goyal /* Expecting only one range for backup region */ 421dd5f7260SVivek Goyal return 1; 422dd5f7260SVivek Goyal } 423dd5f7260SVivek Goyal 424dd5f7260SVivek Goyal int crash_load_segments(struct kimage *image) 425dd5f7260SVivek Goyal { 426dd5f7260SVivek Goyal int ret; 427ec2b9bfaSThiago Jung Bauermann struct kexec_buf kbuf = { .image = image, .buf_min = 0, 428ec2b9bfaSThiago Jung Bauermann .buf_max = ULONG_MAX, .top_down = false }; 429dd5f7260SVivek Goyal 430dd5f7260SVivek Goyal /* 431dd5f7260SVivek Goyal * Determine and load a segment for backup area. First 640K RAM 432dd5f7260SVivek Goyal * region is backup source 433dd5f7260SVivek Goyal */ 434dd5f7260SVivek Goyal 435dd5f7260SVivek Goyal ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END, 436dd5f7260SVivek Goyal image, determine_backup_region); 437dd5f7260SVivek Goyal 438dd5f7260SVivek Goyal /* Zero or postive return values are ok */ 439dd5f7260SVivek Goyal if (ret < 0) 440dd5f7260SVivek Goyal return ret; 441dd5f7260SVivek Goyal 442dd5f7260SVivek Goyal /* Add backup segment. */ 443ec2b9bfaSThiago Jung Bauermann if (image->arch.backup_src_sz) { 444ec2b9bfaSThiago Jung Bauermann kbuf.buffer = &crash_zero_bytes; 445ec2b9bfaSThiago Jung Bauermann kbuf.bufsz = sizeof(crash_zero_bytes); 446ec2b9bfaSThiago Jung Bauermann kbuf.memsz = image->arch.backup_src_sz; 447ec2b9bfaSThiago Jung Bauermann kbuf.buf_align = PAGE_SIZE; 448dd5f7260SVivek Goyal /* 449dd5f7260SVivek Goyal * Ideally there is no source for backup segment. This is 450dd5f7260SVivek Goyal * copied in purgatory after crash. Just add a zero filled 451dd5f7260SVivek Goyal * segment for now to make sure checksum logic works fine. 452dd5f7260SVivek Goyal */ 453ec2b9bfaSThiago Jung Bauermann ret = kexec_add_buffer(&kbuf); 454dd5f7260SVivek Goyal if (ret) 455dd5f7260SVivek Goyal return ret; 456ec2b9bfaSThiago Jung Bauermann image->arch.backup_load_addr = kbuf.mem; 457dd5f7260SVivek Goyal pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", 458ec2b9bfaSThiago Jung Bauermann image->arch.backup_load_addr, 459ec2b9bfaSThiago Jung Bauermann image->arch.backup_src_start, kbuf.memsz); 460dd5f7260SVivek Goyal } 461dd5f7260SVivek Goyal 462dd5f7260SVivek Goyal /* Prepare elf headers and add a segment */ 463ec2b9bfaSThiago Jung Bauermann ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz); 464dd5f7260SVivek Goyal if (ret) 465dd5f7260SVivek Goyal return ret; 466dd5f7260SVivek Goyal 467ec2b9bfaSThiago Jung Bauermann image->arch.elf_headers = kbuf.buffer; 468ec2b9bfaSThiago Jung Bauermann image->arch.elf_headers_sz = kbuf.bufsz; 469dd5f7260SVivek Goyal 470ec2b9bfaSThiago Jung Bauermann kbuf.memsz = kbuf.bufsz; 471ec2b9bfaSThiago Jung Bauermann kbuf.buf_align = ELF_CORE_HEADER_ALIGN; 472ec2b9bfaSThiago Jung Bauermann ret = kexec_add_buffer(&kbuf); 473dd5f7260SVivek Goyal if (ret) { 474dd5f7260SVivek Goyal vfree((void *)image->arch.elf_headers); 475dd5f7260SVivek Goyal return ret; 476dd5f7260SVivek Goyal } 477ec2b9bfaSThiago Jung Bauermann image->arch.elf_load_addr = kbuf.mem; 478dd5f7260SVivek Goyal pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 479ec2b9bfaSThiago Jung Bauermann image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); 480dd5f7260SVivek Goyal 481dd5f7260SVivek Goyal return ret; 482dd5f7260SVivek Goyal } 48374ca317cSVivek Goyal #endif /* CONFIG_KEXEC_FILE */ 484