1 /* 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps. 3 * 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 * 6 * Copyright (C) IBM Corporation, 2004. All rights reserved. 7 * Copyright (C) Red Hat Inc., 2014. All rights reserved. 8 * Authors: 9 * Vivek Goyal <vgoyal@redhat.com> 10 * 11 */ 12 13 #define pr_fmt(fmt) "kexec: " fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/smp.h> 18 #include <linux/reboot.h> 19 #include <linux/kexec.h> 20 #include <linux/delay.h> 21 #include <linux/elf.h> 22 #include <linux/elfcore.h> 23 #include <linux/export.h> 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/processor.h> 28 #include <asm/hardirq.h> 29 #include <asm/nmi.h> 30 #include <asm/hw_irq.h> 31 #include <asm/apic.h> 32 #include <asm/io_apic.h> 33 #include <asm/hpet.h> 34 #include <linux/kdebug.h> 35 #include <asm/cpu.h> 36 #include <asm/reboot.h> 37 #include <asm/virtext.h> 38 #include <asm/intel_pt.h> 39 40 /* Alignment required for elf header segment */ 41 #define ELF_CORE_HEADER_ALIGN 4096 42 43 /* This primarily represents number of split ranges due to exclusion */ 44 #define CRASH_MAX_RANGES 16 45 46 struct crash_mem_range { 47 u64 start, end; 48 }; 49 50 struct crash_mem { 51 unsigned int nr_ranges; 52 struct crash_mem_range ranges[CRASH_MAX_RANGES]; 53 }; 54 55 /* Misc data about ram ranges needed to prepare elf headers */ 56 struct crash_elf_data { 57 struct kimage *image; 58 /* 59 * Total number of ram ranges we have after various adjustments for 60 * crash reserved region, etc. 61 */ 62 unsigned int max_nr_ranges; 63 64 /* Pointer to elf header */ 65 void *ehdr; 66 /* Pointer to next phdr */ 67 void *bufp; 68 struct crash_mem mem; 69 }; 70 71 /* Used while preparing memory map entries for second kernel */ 72 struct crash_memmap_data { 73 struct boot_params *params; 74 /* Type of memory */ 75 unsigned int type; 76 }; 77 78 /* 79 * This is used to VMCLEAR all VMCSs loaded on the 80 * processor. And when loading kvm_intel module, the 81 * callback function pointer will be assigned. 82 * 83 * protected by rcu. 84 */ 85 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; 86 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); 87 unsigned long crash_zero_bytes; 88 89 static inline void cpu_crash_vmclear_loaded_vmcss(void) 90 { 91 crash_vmclear_fn *do_vmclear_operation = NULL; 92 93 rcu_read_lock(); 94 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); 95 if (do_vmclear_operation) 96 do_vmclear_operation(); 97 rcu_read_unlock(); 98 } 99 100 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 101 102 static void kdump_nmi_callback(int cpu, struct pt_regs *regs) 103 { 104 #ifdef CONFIG_X86_32 105 struct pt_regs fixed_regs; 106 107 if (!user_mode(regs)) { 108 crash_fixup_ss_esp(&fixed_regs, regs); 109 regs = &fixed_regs; 110 } 111 #endif 112 crash_save_cpu(regs, cpu); 113 114 /* 115 * VMCLEAR VMCSs loaded on all cpus if needed. 116 */ 117 cpu_crash_vmclear_loaded_vmcss(); 118 119 /* Disable VMX or SVM if needed. 120 * 121 * We need to disable virtualization on all CPUs. 122 * Having VMX or SVM enabled on any CPU may break rebooting 123 * after the kdump kernel has finished its task. 124 */ 125 cpu_emergency_vmxoff(); 126 cpu_emergency_svm_disable(); 127 128 /* 129 * Disable Intel PT to stop its logging 130 */ 131 cpu_emergency_stop_pt(); 132 133 disable_local_APIC(); 134 } 135 136 static void kdump_nmi_shootdown_cpus(void) 137 { 138 nmi_shootdown_cpus(kdump_nmi_callback); 139 140 disable_local_APIC(); 141 } 142 143 #else 144 static void kdump_nmi_shootdown_cpus(void) 145 { 146 /* There are no cpus to shootdown */ 147 } 148 #endif 149 150 void native_machine_crash_shutdown(struct pt_regs *regs) 151 { 152 /* This function is only called after the system 153 * has panicked or is otherwise in a critical state. 154 * The minimum amount of code to allow a kexec'd kernel 155 * to run successfully needs to happen here. 156 * 157 * In practice this means shooting down the other cpus in 158 * an SMP system. 159 */ 160 /* The kernel is broken so disable interrupts */ 161 local_irq_disable(); 162 163 kdump_nmi_shootdown_cpus(); 164 165 /* 166 * VMCLEAR VMCSs loaded on this cpu if needed. 167 */ 168 cpu_crash_vmclear_loaded_vmcss(); 169 170 /* Booting kdump kernel with VMX or SVM enabled won't work, 171 * because (among other limitations) we can't disable paging 172 * with the virt flags. 173 */ 174 cpu_emergency_vmxoff(); 175 cpu_emergency_svm_disable(); 176 177 /* 178 * Disable Intel PT to stop its logging 179 */ 180 cpu_emergency_stop_pt(); 181 182 #ifdef CONFIG_X86_IO_APIC 183 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ 184 ioapic_zap_locks(); 185 disable_IO_APIC(); 186 #endif 187 lapic_shutdown(); 188 #ifdef CONFIG_HPET_TIMER 189 hpet_disable(); 190 #endif 191 crash_save_cpu(regs, safe_smp_processor_id()); 192 } 193 194 #ifdef CONFIG_KEXEC_FILE 195 static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) 196 { 197 unsigned int *nr_ranges = arg; 198 199 (*nr_ranges)++; 200 return 0; 201 } 202 203 204 /* Gather all the required information to prepare elf headers for ram regions */ 205 static void fill_up_crash_elf_data(struct crash_elf_data *ced, 206 struct kimage *image) 207 { 208 unsigned int nr_ranges = 0; 209 210 ced->image = image; 211 212 walk_system_ram_res(0, -1, &nr_ranges, 213 get_nr_ram_ranges_callback); 214 215 ced->max_nr_ranges = nr_ranges; 216 217 /* Exclusion of crash region could split memory ranges */ 218 ced->max_nr_ranges++; 219 220 /* If crashk_low_res is not 0, another range split possible */ 221 if (crashk_low_res.end) 222 ced->max_nr_ranges++; 223 } 224 225 static int exclude_mem_range(struct crash_mem *mem, 226 unsigned long long mstart, unsigned long long mend) 227 { 228 int i, j; 229 unsigned long long start, end; 230 struct crash_mem_range temp_range = {0, 0}; 231 232 for (i = 0; i < mem->nr_ranges; i++) { 233 start = mem->ranges[i].start; 234 end = mem->ranges[i].end; 235 236 if (mstart > end || mend < start) 237 continue; 238 239 /* Truncate any area outside of range */ 240 if (mstart < start) 241 mstart = start; 242 if (mend > end) 243 mend = end; 244 245 /* Found completely overlapping range */ 246 if (mstart == start && mend == end) { 247 mem->ranges[i].start = 0; 248 mem->ranges[i].end = 0; 249 if (i < mem->nr_ranges - 1) { 250 /* Shift rest of the ranges to left */ 251 for (j = i; j < mem->nr_ranges - 1; j++) { 252 mem->ranges[j].start = 253 mem->ranges[j+1].start; 254 mem->ranges[j].end = 255 mem->ranges[j+1].end; 256 } 257 } 258 mem->nr_ranges--; 259 return 0; 260 } 261 262 if (mstart > start && mend < end) { 263 /* Split original range */ 264 mem->ranges[i].end = mstart - 1; 265 temp_range.start = mend + 1; 266 temp_range.end = end; 267 } else if (mstart != start) 268 mem->ranges[i].end = mstart - 1; 269 else 270 mem->ranges[i].start = mend + 1; 271 break; 272 } 273 274 /* If a split happend, add the split to array */ 275 if (!temp_range.end) 276 return 0; 277 278 /* Split happened */ 279 if (i == CRASH_MAX_RANGES - 1) { 280 pr_err("Too many crash ranges after split\n"); 281 return -ENOMEM; 282 } 283 284 /* Location where new range should go */ 285 j = i + 1; 286 if (j < mem->nr_ranges) { 287 /* Move over all ranges one slot towards the end */ 288 for (i = mem->nr_ranges - 1; i >= j; i--) 289 mem->ranges[i + 1] = mem->ranges[i]; 290 } 291 292 mem->ranges[j].start = temp_range.start; 293 mem->ranges[j].end = temp_range.end; 294 mem->nr_ranges++; 295 return 0; 296 } 297 298 /* 299 * Look for any unwanted ranges between mstart, mend and remove them. This 300 * might lead to split and split ranges are put in ced->mem.ranges[] array 301 */ 302 static int elf_header_exclude_ranges(struct crash_elf_data *ced, 303 unsigned long long mstart, unsigned long long mend) 304 { 305 struct crash_mem *cmem = &ced->mem; 306 int ret = 0; 307 308 memset(cmem->ranges, 0, sizeof(cmem->ranges)); 309 310 cmem->ranges[0].start = mstart; 311 cmem->ranges[0].end = mend; 312 cmem->nr_ranges = 1; 313 314 /* Exclude crashkernel region */ 315 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end); 316 if (ret) 317 return ret; 318 319 if (crashk_low_res.end) { 320 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); 321 if (ret) 322 return ret; 323 } 324 325 return ret; 326 } 327 328 static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg) 329 { 330 struct crash_elf_data *ced = arg; 331 Elf64_Ehdr *ehdr; 332 Elf64_Phdr *phdr; 333 unsigned long mstart, mend; 334 struct kimage *image = ced->image; 335 struct crash_mem *cmem; 336 int ret, i; 337 338 ehdr = ced->ehdr; 339 340 /* Exclude unwanted mem ranges */ 341 ret = elf_header_exclude_ranges(ced, start, end); 342 if (ret) 343 return ret; 344 345 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */ 346 cmem = &ced->mem; 347 348 for (i = 0; i < cmem->nr_ranges; i++) { 349 mstart = cmem->ranges[i].start; 350 mend = cmem->ranges[i].end; 351 352 phdr = ced->bufp; 353 ced->bufp += sizeof(Elf64_Phdr); 354 355 phdr->p_type = PT_LOAD; 356 phdr->p_flags = PF_R|PF_W|PF_X; 357 phdr->p_offset = mstart; 358 359 /* 360 * If a range matches backup region, adjust offset to backup 361 * segment. 362 */ 363 if (mstart == image->arch.backup_src_start && 364 (mend - mstart + 1) == image->arch.backup_src_sz) 365 phdr->p_offset = image->arch.backup_load_addr; 366 367 phdr->p_paddr = mstart; 368 phdr->p_vaddr = (unsigned long long) __va(mstart); 369 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; 370 phdr->p_align = 0; 371 ehdr->e_phnum++; 372 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", 373 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, 374 ehdr->e_phnum, phdr->p_offset); 375 } 376 377 return ret; 378 } 379 380 static int prepare_elf64_headers(struct crash_elf_data *ced, 381 void **addr, unsigned long *sz) 382 { 383 Elf64_Ehdr *ehdr; 384 Elf64_Phdr *phdr; 385 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; 386 unsigned char *buf, *bufp; 387 unsigned int cpu; 388 unsigned long long notes_addr; 389 int ret; 390 391 /* extra phdr for vmcoreinfo elf note */ 392 nr_phdr = nr_cpus + 1; 393 nr_phdr += ced->max_nr_ranges; 394 395 /* 396 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping 397 * area on x86_64 (ffffffff80000000 - ffffffffa0000000). 398 * I think this is required by tools like gdb. So same physical 399 * memory will be mapped in two elf headers. One will contain kernel 400 * text virtual addresses and other will have __va(physical) addresses. 401 */ 402 403 nr_phdr++; 404 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); 405 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); 406 407 buf = vzalloc(elf_sz); 408 if (!buf) 409 return -ENOMEM; 410 411 bufp = buf; 412 ehdr = (Elf64_Ehdr *)bufp; 413 bufp += sizeof(Elf64_Ehdr); 414 memcpy(ehdr->e_ident, ELFMAG, SELFMAG); 415 ehdr->e_ident[EI_CLASS] = ELFCLASS64; 416 ehdr->e_ident[EI_DATA] = ELFDATA2LSB; 417 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 418 ehdr->e_ident[EI_OSABI] = ELF_OSABI; 419 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); 420 ehdr->e_type = ET_CORE; 421 ehdr->e_machine = ELF_ARCH; 422 ehdr->e_version = EV_CURRENT; 423 ehdr->e_phoff = sizeof(Elf64_Ehdr); 424 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 425 ehdr->e_phentsize = sizeof(Elf64_Phdr); 426 427 /* Prepare one phdr of type PT_NOTE for each present cpu */ 428 for_each_present_cpu(cpu) { 429 phdr = (Elf64_Phdr *)bufp; 430 bufp += sizeof(Elf64_Phdr); 431 phdr->p_type = PT_NOTE; 432 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); 433 phdr->p_offset = phdr->p_paddr = notes_addr; 434 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); 435 (ehdr->e_phnum)++; 436 } 437 438 /* Prepare one PT_NOTE header for vmcoreinfo */ 439 phdr = (Elf64_Phdr *)bufp; 440 bufp += sizeof(Elf64_Phdr); 441 phdr->p_type = PT_NOTE; 442 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); 443 phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note); 444 (ehdr->e_phnum)++; 445 446 #ifdef CONFIG_X86_64 447 /* Prepare PT_LOAD type program header for kernel text region */ 448 phdr = (Elf64_Phdr *)bufp; 449 bufp += sizeof(Elf64_Phdr); 450 phdr->p_type = PT_LOAD; 451 phdr->p_flags = PF_R|PF_W|PF_X; 452 phdr->p_vaddr = (Elf64_Addr)_text; 453 phdr->p_filesz = phdr->p_memsz = _end - _text; 454 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); 455 (ehdr->e_phnum)++; 456 #endif 457 458 /* Prepare PT_LOAD headers for system ram chunks. */ 459 ced->ehdr = ehdr; 460 ced->bufp = bufp; 461 ret = walk_system_ram_res(0, -1, ced, 462 prepare_elf64_ram_headers_callback); 463 if (ret < 0) 464 return ret; 465 466 *addr = buf; 467 *sz = elf_sz; 468 return 0; 469 } 470 471 /* Prepare elf headers. Return addr and size */ 472 static int prepare_elf_headers(struct kimage *image, void **addr, 473 unsigned long *sz) 474 { 475 struct crash_elf_data *ced; 476 int ret; 477 478 ced = kzalloc(sizeof(*ced), GFP_KERNEL); 479 if (!ced) 480 return -ENOMEM; 481 482 fill_up_crash_elf_data(ced, image); 483 484 /* By default prepare 64bit headers */ 485 ret = prepare_elf64_headers(ced, addr, sz); 486 kfree(ced); 487 return ret; 488 } 489 490 static int add_e820_entry(struct boot_params *params, struct e820entry *entry) 491 { 492 unsigned int nr_e820_entries; 493 494 nr_e820_entries = params->e820_entries; 495 if (nr_e820_entries >= E820MAX) 496 return 1; 497 498 memcpy(¶ms->e820_map[nr_e820_entries], entry, 499 sizeof(struct e820entry)); 500 params->e820_entries++; 501 return 0; 502 } 503 504 static int memmap_entry_callback(u64 start, u64 end, void *arg) 505 { 506 struct crash_memmap_data *cmd = arg; 507 struct boot_params *params = cmd->params; 508 struct e820entry ei; 509 510 ei.addr = start; 511 ei.size = end - start + 1; 512 ei.type = cmd->type; 513 add_e820_entry(params, &ei); 514 515 return 0; 516 } 517 518 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, 519 unsigned long long mstart, 520 unsigned long long mend) 521 { 522 unsigned long start, end; 523 int ret = 0; 524 525 cmem->ranges[0].start = mstart; 526 cmem->ranges[0].end = mend; 527 cmem->nr_ranges = 1; 528 529 /* Exclude Backup region */ 530 start = image->arch.backup_load_addr; 531 end = start + image->arch.backup_src_sz - 1; 532 ret = exclude_mem_range(cmem, start, end); 533 if (ret) 534 return ret; 535 536 /* Exclude elf header region */ 537 start = image->arch.elf_load_addr; 538 end = start + image->arch.elf_headers_sz - 1; 539 return exclude_mem_range(cmem, start, end); 540 } 541 542 /* Prepare memory map for crash dump kernel */ 543 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) 544 { 545 int i, ret = 0; 546 unsigned long flags; 547 struct e820entry ei; 548 struct crash_memmap_data cmd; 549 struct crash_mem *cmem; 550 551 cmem = vzalloc(sizeof(struct crash_mem)); 552 if (!cmem) 553 return -ENOMEM; 554 555 memset(&cmd, 0, sizeof(struct crash_memmap_data)); 556 cmd.params = params; 557 558 /* Add first 640K segment */ 559 ei.addr = image->arch.backup_src_start; 560 ei.size = image->arch.backup_src_sz; 561 ei.type = E820_RAM; 562 add_e820_entry(params, &ei); 563 564 /* Add ACPI tables */ 565 cmd.type = E820_ACPI; 566 flags = IORESOURCE_MEM | IORESOURCE_BUSY; 567 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, 568 memmap_entry_callback); 569 570 /* Add ACPI Non-volatile Storage */ 571 cmd.type = E820_NVS; 572 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, 573 memmap_entry_callback); 574 575 /* Add crashk_low_res region */ 576 if (crashk_low_res.end) { 577 ei.addr = crashk_low_res.start; 578 ei.size = crashk_low_res.end - crashk_low_res.start + 1; 579 ei.type = E820_RAM; 580 add_e820_entry(params, &ei); 581 } 582 583 /* Exclude some ranges from crashk_res and add rest to memmap */ 584 ret = memmap_exclude_ranges(image, cmem, crashk_res.start, 585 crashk_res.end); 586 if (ret) 587 goto out; 588 589 for (i = 0; i < cmem->nr_ranges; i++) { 590 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; 591 592 /* If entry is less than a page, skip it */ 593 if (ei.size < PAGE_SIZE) 594 continue; 595 ei.addr = cmem->ranges[i].start; 596 ei.type = E820_RAM; 597 add_e820_entry(params, &ei); 598 } 599 600 out: 601 vfree(cmem); 602 return ret; 603 } 604 605 static int determine_backup_region(u64 start, u64 end, void *arg) 606 { 607 struct kimage *image = arg; 608 609 image->arch.backup_src_start = start; 610 image->arch.backup_src_sz = end - start + 1; 611 612 /* Expecting only one range for backup region */ 613 return 1; 614 } 615 616 int crash_load_segments(struct kimage *image) 617 { 618 unsigned long src_start, src_sz, elf_sz; 619 void *elf_addr; 620 int ret; 621 622 /* 623 * Determine and load a segment for backup area. First 640K RAM 624 * region is backup source 625 */ 626 627 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END, 628 image, determine_backup_region); 629 630 /* Zero or postive return values are ok */ 631 if (ret < 0) 632 return ret; 633 634 src_start = image->arch.backup_src_start; 635 src_sz = image->arch.backup_src_sz; 636 637 /* Add backup segment. */ 638 if (src_sz) { 639 /* 640 * Ideally there is no source for backup segment. This is 641 * copied in purgatory after crash. Just add a zero filled 642 * segment for now to make sure checksum logic works fine. 643 */ 644 ret = kexec_add_buffer(image, (char *)&crash_zero_bytes, 645 sizeof(crash_zero_bytes), src_sz, 646 PAGE_SIZE, 0, -1, 0, 647 &image->arch.backup_load_addr); 648 if (ret) 649 return ret; 650 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", 651 image->arch.backup_load_addr, src_start, src_sz); 652 } 653 654 /* Prepare elf headers and add a segment */ 655 ret = prepare_elf_headers(image, &elf_addr, &elf_sz); 656 if (ret) 657 return ret; 658 659 image->arch.elf_headers = elf_addr; 660 image->arch.elf_headers_sz = elf_sz; 661 662 ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz, 663 ELF_CORE_HEADER_ALIGN, 0, -1, 0, 664 &image->arch.elf_load_addr); 665 if (ret) { 666 vfree((void *)image->arch.elf_headers); 667 return ret; 668 } 669 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 670 image->arch.elf_load_addr, elf_sz, elf_sz); 671 672 return ret; 673 } 674 #endif /* CONFIG_KEXEC_FILE */ 675