1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kexec.c - kexec system call core code. 4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/capability.h> 10 #include <linux/mm.h> 11 #include <linux/file.h> 12 #include <linux/slab.h> 13 #include <linux/fs.h> 14 #include <linux/kexec.h> 15 #include <linux/mutex.h> 16 #include <linux/list.h> 17 #include <linux/highmem.h> 18 #include <linux/syscalls.h> 19 #include <linux/reboot.h> 20 #include <linux/ioport.h> 21 #include <linux/hardirq.h> 22 #include <linux/elf.h> 23 #include <linux/elfcore.h> 24 #include <linux/utsname.h> 25 #include <linux/numa.h> 26 #include <linux/suspend.h> 27 #include <linux/device.h> 28 #include <linux/freezer.h> 29 #include <linux/panic_notifier.h> 30 #include <linux/pm.h> 31 #include <linux/cpu.h> 32 #include <linux/uaccess.h> 33 #include <linux/io.h> 34 #include <linux/console.h> 35 #include <linux/vmalloc.h> 36 #include <linux/swap.h> 37 #include <linux/syscore_ops.h> 38 #include <linux/compiler.h> 39 #include <linux/hugetlb.h> 40 #include <linux/objtool.h> 41 #include <linux/kmsg_dump.h> 42 43 #include <asm/page.h> 44 #include <asm/sections.h> 45 46 #include <crypto/hash.h> 47 #include "kexec_internal.h" 48 49 DEFINE_MUTEX(kexec_mutex); 50 51 /* Per cpu memory for storing cpu states in case of system crash. */ 52 note_buf_t __percpu *crash_notes; 53 54 /* Flag to indicate we are going to kexec a new kernel */ 55 bool kexec_in_progress = false; 56 57 58 /* Location of the reserved area for the crash kernel */ 59 struct resource crashk_res = { 60 .name = "Crash kernel", 61 .start = 0, 62 .end = 0, 63 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 64 .desc = IORES_DESC_CRASH_KERNEL 65 }; 66 struct resource crashk_low_res = { 67 .name = "Crash kernel", 68 .start = 0, 69 .end = 0, 70 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 71 .desc = IORES_DESC_CRASH_KERNEL 72 }; 73 74 int kexec_should_crash(struct task_struct *p) 75 { 76 /* 77 * If crash_kexec_post_notifiers is enabled, don't run 78 * crash_kexec() here yet, which must be run after panic 79 * notifiers in panic(). 80 */ 81 if (crash_kexec_post_notifiers) 82 return 0; 83 /* 84 * There are 4 panic() calls in make_task_dead() path, each of which 85 * corresponds to each of these 4 conditions. 86 */ 87 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) 88 return 1; 89 return 0; 90 } 91 92 int kexec_crash_loaded(void) 93 { 94 return !!kexec_crash_image; 95 } 96 EXPORT_SYMBOL_GPL(kexec_crash_loaded); 97 98 /* 99 * When kexec transitions to the new kernel there is a one-to-one 100 * mapping between physical and virtual addresses. On processors 101 * where you can disable the MMU this is trivial, and easy. For 102 * others it is still a simple predictable page table to setup. 103 * 104 * In that environment kexec copies the new kernel to its final 105 * resting place. This means I can only support memory whose 106 * physical address can fit in an unsigned long. In particular 107 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. 108 * If the assembly stub has more restrictive requirements 109 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be 110 * defined more restrictively in <asm/kexec.h>. 111 * 112 * The code for the transition from the current kernel to the 113 * new kernel is placed in the control_code_buffer, whose size 114 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single 115 * page of memory is necessary, but some architectures require more. 116 * Because this memory must be identity mapped in the transition from 117 * virtual to physical addresses it must live in the range 118 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily 119 * modifiable. 120 * 121 * The assembly stub in the control code buffer is passed a linked list 122 * of descriptor pages detailing the source pages of the new kernel, 123 * and the destination addresses of those source pages. As this data 124 * structure is not used in the context of the current OS, it must 125 * be self-contained. 126 * 127 * The code has been made to work with highmem pages and will use a 128 * destination page in its final resting place (if it happens 129 * to allocate it). The end product of this is that most of the 130 * physical address space, and most of RAM can be used. 131 * 132 * Future directions include: 133 * - allocating a page table with the control code buffer identity 134 * mapped, to simplify machine_kexec and make kexec_on_panic more 135 * reliable. 136 */ 137 138 /* 139 * KIMAGE_NO_DEST is an impossible destination address..., for 140 * allocating pages whose destination address we do not care about. 141 */ 142 #define KIMAGE_NO_DEST (-1UL) 143 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) 144 145 static struct page *kimage_alloc_page(struct kimage *image, 146 gfp_t gfp_mask, 147 unsigned long dest); 148 149 int sanity_check_segment_list(struct kimage *image) 150 { 151 int i; 152 unsigned long nr_segments = image->nr_segments; 153 unsigned long total_pages = 0; 154 unsigned long nr_pages = totalram_pages(); 155 156 /* 157 * Verify we have good destination addresses. The caller is 158 * responsible for making certain we don't attempt to load 159 * the new image into invalid or reserved areas of RAM. This 160 * just verifies it is an address we can use. 161 * 162 * Since the kernel does everything in page size chunks ensure 163 * the destination addresses are page aligned. Too many 164 * special cases crop of when we don't do this. The most 165 * insidious is getting overlapping destination addresses 166 * simply because addresses are changed to page size 167 * granularity. 168 */ 169 for (i = 0; i < nr_segments; i++) { 170 unsigned long mstart, mend; 171 172 mstart = image->segment[i].mem; 173 mend = mstart + image->segment[i].memsz; 174 if (mstart > mend) 175 return -EADDRNOTAVAIL; 176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) 177 return -EADDRNOTAVAIL; 178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) 179 return -EADDRNOTAVAIL; 180 } 181 182 /* Verify our destination addresses do not overlap. 183 * If we alloed overlapping destination addresses 184 * through very weird things can happen with no 185 * easy explanation as one segment stops on another. 186 */ 187 for (i = 0; i < nr_segments; i++) { 188 unsigned long mstart, mend; 189 unsigned long j; 190 191 mstart = image->segment[i].mem; 192 mend = mstart + image->segment[i].memsz; 193 for (j = 0; j < i; j++) { 194 unsigned long pstart, pend; 195 196 pstart = image->segment[j].mem; 197 pend = pstart + image->segment[j].memsz; 198 /* Do the segments overlap ? */ 199 if ((mend > pstart) && (mstart < pend)) 200 return -EINVAL; 201 } 202 } 203 204 /* Ensure our buffer sizes are strictly less than 205 * our memory sizes. This should always be the case, 206 * and it is easier to check up front than to be surprised 207 * later on. 208 */ 209 for (i = 0; i < nr_segments; i++) { 210 if (image->segment[i].bufsz > image->segment[i].memsz) 211 return -EINVAL; 212 } 213 214 /* 215 * Verify that no more than half of memory will be consumed. If the 216 * request from userspace is too large, a large amount of time will be 217 * wasted allocating pages, which can cause a soft lockup. 218 */ 219 for (i = 0; i < nr_segments; i++) { 220 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) 221 return -EINVAL; 222 223 total_pages += PAGE_COUNT(image->segment[i].memsz); 224 } 225 226 if (total_pages > nr_pages / 2) 227 return -EINVAL; 228 229 /* 230 * Verify we have good destination addresses. Normally 231 * the caller is responsible for making certain we don't 232 * attempt to load the new image into invalid or reserved 233 * areas of RAM. But crash kernels are preloaded into a 234 * reserved area of ram. We must ensure the addresses 235 * are in the reserved area otherwise preloading the 236 * kernel could corrupt things. 237 */ 238 239 if (image->type == KEXEC_TYPE_CRASH) { 240 for (i = 0; i < nr_segments; i++) { 241 unsigned long mstart, mend; 242 243 mstart = image->segment[i].mem; 244 mend = mstart + image->segment[i].memsz - 1; 245 /* Ensure we are within the crash kernel limits */ 246 if ((mstart < phys_to_boot_phys(crashk_res.start)) || 247 (mend > phys_to_boot_phys(crashk_res.end))) 248 return -EADDRNOTAVAIL; 249 } 250 } 251 252 return 0; 253 } 254 255 struct kimage *do_kimage_alloc_init(void) 256 { 257 struct kimage *image; 258 259 /* Allocate a controlling structure */ 260 image = kzalloc(sizeof(*image), GFP_KERNEL); 261 if (!image) 262 return NULL; 263 264 image->head = 0; 265 image->entry = &image->head; 266 image->last_entry = &image->head; 267 image->control_page = ~0; /* By default this does not apply */ 268 image->type = KEXEC_TYPE_DEFAULT; 269 270 /* Initialize the list of control pages */ 271 INIT_LIST_HEAD(&image->control_pages); 272 273 /* Initialize the list of destination pages */ 274 INIT_LIST_HEAD(&image->dest_pages); 275 276 /* Initialize the list of unusable pages */ 277 INIT_LIST_HEAD(&image->unusable_pages); 278 279 return image; 280 } 281 282 int kimage_is_destination_range(struct kimage *image, 283 unsigned long start, 284 unsigned long end) 285 { 286 unsigned long i; 287 288 for (i = 0; i < image->nr_segments; i++) { 289 unsigned long mstart, mend; 290 291 mstart = image->segment[i].mem; 292 mend = mstart + image->segment[i].memsz; 293 if ((end > mstart) && (start < mend)) 294 return 1; 295 } 296 297 return 0; 298 } 299 300 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) 301 { 302 struct page *pages; 303 304 if (fatal_signal_pending(current)) 305 return NULL; 306 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); 307 if (pages) { 308 unsigned int count, i; 309 310 pages->mapping = NULL; 311 set_page_private(pages, order); 312 count = 1 << order; 313 for (i = 0; i < count; i++) 314 SetPageReserved(pages + i); 315 316 arch_kexec_post_alloc_pages(page_address(pages), count, 317 gfp_mask); 318 319 if (gfp_mask & __GFP_ZERO) 320 for (i = 0; i < count; i++) 321 clear_highpage(pages + i); 322 } 323 324 return pages; 325 } 326 327 static void kimage_free_pages(struct page *page) 328 { 329 unsigned int order, count, i; 330 331 order = page_private(page); 332 count = 1 << order; 333 334 arch_kexec_pre_free_pages(page_address(page), count); 335 336 for (i = 0; i < count; i++) 337 ClearPageReserved(page + i); 338 __free_pages(page, order); 339 } 340 341 void kimage_free_page_list(struct list_head *list) 342 { 343 struct page *page, *next; 344 345 list_for_each_entry_safe(page, next, list, lru) { 346 list_del(&page->lru); 347 kimage_free_pages(page); 348 } 349 } 350 351 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 352 unsigned int order) 353 { 354 /* Control pages are special, they are the intermediaries 355 * that are needed while we copy the rest of the pages 356 * to their final resting place. As such they must 357 * not conflict with either the destination addresses 358 * or memory the kernel is already using. 359 * 360 * The only case where we really need more than one of 361 * these are for architectures where we cannot disable 362 * the MMU and must instead generate an identity mapped 363 * page table for all of the memory. 364 * 365 * At worst this runs in O(N) of the image size. 366 */ 367 struct list_head extra_pages; 368 struct page *pages; 369 unsigned int count; 370 371 count = 1 << order; 372 INIT_LIST_HEAD(&extra_pages); 373 374 /* Loop while I can allocate a page and the page allocated 375 * is a destination page. 376 */ 377 do { 378 unsigned long pfn, epfn, addr, eaddr; 379 380 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); 381 if (!pages) 382 break; 383 pfn = page_to_boot_pfn(pages); 384 epfn = pfn + count; 385 addr = pfn << PAGE_SHIFT; 386 eaddr = epfn << PAGE_SHIFT; 387 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 388 kimage_is_destination_range(image, addr, eaddr)) { 389 list_add(&pages->lru, &extra_pages); 390 pages = NULL; 391 } 392 } while (!pages); 393 394 if (pages) { 395 /* Remember the allocated page... */ 396 list_add(&pages->lru, &image->control_pages); 397 398 /* Because the page is already in it's destination 399 * location we will never allocate another page at 400 * that address. Therefore kimage_alloc_pages 401 * will not return it (again) and we don't need 402 * to give it an entry in image->segment[]. 403 */ 404 } 405 /* Deal with the destination pages I have inadvertently allocated. 406 * 407 * Ideally I would convert multi-page allocations into single 408 * page allocations, and add everything to image->dest_pages. 409 * 410 * For now it is simpler to just free the pages. 411 */ 412 kimage_free_page_list(&extra_pages); 413 414 return pages; 415 } 416 417 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 418 unsigned int order) 419 { 420 /* Control pages are special, they are the intermediaries 421 * that are needed while we copy the rest of the pages 422 * to their final resting place. As such they must 423 * not conflict with either the destination addresses 424 * or memory the kernel is already using. 425 * 426 * Control pages are also the only pags we must allocate 427 * when loading a crash kernel. All of the other pages 428 * are specified by the segments and we just memcpy 429 * into them directly. 430 * 431 * The only case where we really need more than one of 432 * these are for architectures where we cannot disable 433 * the MMU and must instead generate an identity mapped 434 * page table for all of the memory. 435 * 436 * Given the low demand this implements a very simple 437 * allocator that finds the first hole of the appropriate 438 * size in the reserved memory region, and allocates all 439 * of the memory up to and including the hole. 440 */ 441 unsigned long hole_start, hole_end, size; 442 struct page *pages; 443 444 pages = NULL; 445 size = (1 << order) << PAGE_SHIFT; 446 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 447 hole_end = hole_start + size - 1; 448 while (hole_end <= crashk_res.end) { 449 unsigned long i; 450 451 cond_resched(); 452 453 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 454 break; 455 /* See if I overlap any of the segments */ 456 for (i = 0; i < image->nr_segments; i++) { 457 unsigned long mstart, mend; 458 459 mstart = image->segment[i].mem; 460 mend = mstart + image->segment[i].memsz - 1; 461 if ((hole_end >= mstart) && (hole_start <= mend)) { 462 /* Advance the hole to the end of the segment */ 463 hole_start = (mend + (size - 1)) & ~(size - 1); 464 hole_end = hole_start + size - 1; 465 break; 466 } 467 } 468 /* If I don't overlap any segments I have found my hole! */ 469 if (i == image->nr_segments) { 470 pages = pfn_to_page(hole_start >> PAGE_SHIFT); 471 image->control_page = hole_end; 472 break; 473 } 474 } 475 476 /* Ensure that these pages are decrypted if SME is enabled. */ 477 if (pages) 478 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); 479 480 return pages; 481 } 482 483 484 struct page *kimage_alloc_control_pages(struct kimage *image, 485 unsigned int order) 486 { 487 struct page *pages = NULL; 488 489 switch (image->type) { 490 case KEXEC_TYPE_DEFAULT: 491 pages = kimage_alloc_normal_control_pages(image, order); 492 break; 493 case KEXEC_TYPE_CRASH: 494 pages = kimage_alloc_crash_control_pages(image, order); 495 break; 496 } 497 498 return pages; 499 } 500 501 int kimage_crash_copy_vmcoreinfo(struct kimage *image) 502 { 503 struct page *vmcoreinfo_page; 504 void *safecopy; 505 506 if (image->type != KEXEC_TYPE_CRASH) 507 return 0; 508 509 /* 510 * For kdump, allocate one vmcoreinfo safe copy from the 511 * crash memory. as we have arch_kexec_protect_crashkres() 512 * after kexec syscall, we naturally protect it from write 513 * (even read) access under kernel direct mapping. But on 514 * the other hand, we still need to operate it when crash 515 * happens to generate vmcoreinfo note, hereby we rely on 516 * vmap for this purpose. 517 */ 518 vmcoreinfo_page = kimage_alloc_control_pages(image, 0); 519 if (!vmcoreinfo_page) { 520 pr_warn("Could not allocate vmcoreinfo buffer\n"); 521 return -ENOMEM; 522 } 523 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL); 524 if (!safecopy) { 525 pr_warn("Could not vmap vmcoreinfo buffer\n"); 526 return -ENOMEM; 527 } 528 529 image->vmcoreinfo_data_copy = safecopy; 530 crash_update_vmcoreinfo_safecopy(safecopy); 531 532 return 0; 533 } 534 535 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 536 { 537 if (*image->entry != 0) 538 image->entry++; 539 540 if (image->entry == image->last_entry) { 541 kimage_entry_t *ind_page; 542 struct page *page; 543 544 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 545 if (!page) 546 return -ENOMEM; 547 548 ind_page = page_address(page); 549 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; 550 image->entry = ind_page; 551 image->last_entry = ind_page + 552 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 553 } 554 *image->entry = entry; 555 image->entry++; 556 *image->entry = 0; 557 558 return 0; 559 } 560 561 static int kimage_set_destination(struct kimage *image, 562 unsigned long destination) 563 { 564 int result; 565 566 destination &= PAGE_MASK; 567 result = kimage_add_entry(image, destination | IND_DESTINATION); 568 569 return result; 570 } 571 572 573 static int kimage_add_page(struct kimage *image, unsigned long page) 574 { 575 int result; 576 577 page &= PAGE_MASK; 578 result = kimage_add_entry(image, page | IND_SOURCE); 579 580 return result; 581 } 582 583 584 static void kimage_free_extra_pages(struct kimage *image) 585 { 586 /* Walk through and free any extra destination pages I may have */ 587 kimage_free_page_list(&image->dest_pages); 588 589 /* Walk through and free any unusable pages I have cached */ 590 kimage_free_page_list(&image->unusable_pages); 591 592 } 593 594 void kimage_terminate(struct kimage *image) 595 { 596 if (*image->entry != 0) 597 image->entry++; 598 599 *image->entry = IND_DONE; 600 } 601 602 #define for_each_kimage_entry(image, ptr, entry) \ 603 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 604 ptr = (entry & IND_INDIRECTION) ? \ 605 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 606 607 static void kimage_free_entry(kimage_entry_t entry) 608 { 609 struct page *page; 610 611 page = boot_pfn_to_page(entry >> PAGE_SHIFT); 612 kimage_free_pages(page); 613 } 614 615 void kimage_free(struct kimage *image) 616 { 617 kimage_entry_t *ptr, entry; 618 kimage_entry_t ind = 0; 619 620 if (!image) 621 return; 622 623 if (image->vmcoreinfo_data_copy) { 624 crash_update_vmcoreinfo_safecopy(NULL); 625 vunmap(image->vmcoreinfo_data_copy); 626 } 627 628 kimage_free_extra_pages(image); 629 for_each_kimage_entry(image, ptr, entry) { 630 if (entry & IND_INDIRECTION) { 631 /* Free the previous indirection page */ 632 if (ind & IND_INDIRECTION) 633 kimage_free_entry(ind); 634 /* Save this indirection page until we are 635 * done with it. 636 */ 637 ind = entry; 638 } else if (entry & IND_SOURCE) 639 kimage_free_entry(entry); 640 } 641 /* Free the final indirection page */ 642 if (ind & IND_INDIRECTION) 643 kimage_free_entry(ind); 644 645 /* Handle any machine specific cleanup */ 646 machine_kexec_cleanup(image); 647 648 /* Free the kexec control pages... */ 649 kimage_free_page_list(&image->control_pages); 650 651 /* 652 * Free up any temporary buffers allocated. This might hit if 653 * error occurred much later after buffer allocation. 654 */ 655 if (image->file_mode) 656 kimage_file_post_load_cleanup(image); 657 658 kfree(image); 659 } 660 661 static kimage_entry_t *kimage_dst_used(struct kimage *image, 662 unsigned long page) 663 { 664 kimage_entry_t *ptr, entry; 665 unsigned long destination = 0; 666 667 for_each_kimage_entry(image, ptr, entry) { 668 if (entry & IND_DESTINATION) 669 destination = entry & PAGE_MASK; 670 else if (entry & IND_SOURCE) { 671 if (page == destination) 672 return ptr; 673 destination += PAGE_SIZE; 674 } 675 } 676 677 return NULL; 678 } 679 680 static struct page *kimage_alloc_page(struct kimage *image, 681 gfp_t gfp_mask, 682 unsigned long destination) 683 { 684 /* 685 * Here we implement safeguards to ensure that a source page 686 * is not copied to its destination page before the data on 687 * the destination page is no longer useful. 688 * 689 * To do this we maintain the invariant that a source page is 690 * either its own destination page, or it is not a 691 * destination page at all. 692 * 693 * That is slightly stronger than required, but the proof 694 * that no problems will not occur is trivial, and the 695 * implementation is simply to verify. 696 * 697 * When allocating all pages normally this algorithm will run 698 * in O(N) time, but in the worst case it will run in O(N^2) 699 * time. If the runtime is a problem the data structures can 700 * be fixed. 701 */ 702 struct page *page; 703 unsigned long addr; 704 705 /* 706 * Walk through the list of destination pages, and see if I 707 * have a match. 708 */ 709 list_for_each_entry(page, &image->dest_pages, lru) { 710 addr = page_to_boot_pfn(page) << PAGE_SHIFT; 711 if (addr == destination) { 712 list_del(&page->lru); 713 return page; 714 } 715 } 716 page = NULL; 717 while (1) { 718 kimage_entry_t *old; 719 720 /* Allocate a page, if we run out of memory give up */ 721 page = kimage_alloc_pages(gfp_mask, 0); 722 if (!page) 723 return NULL; 724 /* If the page cannot be used file it away */ 725 if (page_to_boot_pfn(page) > 726 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 727 list_add(&page->lru, &image->unusable_pages); 728 continue; 729 } 730 addr = page_to_boot_pfn(page) << PAGE_SHIFT; 731 732 /* If it is the destination page we want use it */ 733 if (addr == destination) 734 break; 735 736 /* If the page is not a destination page use it */ 737 if (!kimage_is_destination_range(image, addr, 738 addr + PAGE_SIZE)) 739 break; 740 741 /* 742 * I know that the page is someones destination page. 743 * See if there is already a source page for this 744 * destination page. And if so swap the source pages. 745 */ 746 old = kimage_dst_used(image, addr); 747 if (old) { 748 /* If so move it */ 749 unsigned long old_addr; 750 struct page *old_page; 751 752 old_addr = *old & PAGE_MASK; 753 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT); 754 copy_highpage(page, old_page); 755 *old = addr | (*old & ~PAGE_MASK); 756 757 /* The old page I have found cannot be a 758 * destination page, so return it if it's 759 * gfp_flags honor the ones passed in. 760 */ 761 if (!(gfp_mask & __GFP_HIGHMEM) && 762 PageHighMem(old_page)) { 763 kimage_free_pages(old_page); 764 continue; 765 } 766 page = old_page; 767 break; 768 } 769 /* Place the page on the destination list, to be used later */ 770 list_add(&page->lru, &image->dest_pages); 771 } 772 773 return page; 774 } 775 776 static int kimage_load_normal_segment(struct kimage *image, 777 struct kexec_segment *segment) 778 { 779 unsigned long maddr; 780 size_t ubytes, mbytes; 781 int result; 782 unsigned char __user *buf = NULL; 783 unsigned char *kbuf = NULL; 784 785 if (image->file_mode) 786 kbuf = segment->kbuf; 787 else 788 buf = segment->buf; 789 ubytes = segment->bufsz; 790 mbytes = segment->memsz; 791 maddr = segment->mem; 792 793 result = kimage_set_destination(image, maddr); 794 if (result < 0) 795 goto out; 796 797 while (mbytes) { 798 struct page *page; 799 char *ptr; 800 size_t uchunk, mchunk; 801 802 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 803 if (!page) { 804 result = -ENOMEM; 805 goto out; 806 } 807 result = kimage_add_page(image, page_to_boot_pfn(page) 808 << PAGE_SHIFT); 809 if (result < 0) 810 goto out; 811 812 ptr = kmap(page); 813 /* Start with a clear page */ 814 clear_page(ptr); 815 ptr += maddr & ~PAGE_MASK; 816 mchunk = min_t(size_t, mbytes, 817 PAGE_SIZE - (maddr & ~PAGE_MASK)); 818 uchunk = min(ubytes, mchunk); 819 820 /* For file based kexec, source pages are in kernel memory */ 821 if (image->file_mode) 822 memcpy(ptr, kbuf, uchunk); 823 else 824 result = copy_from_user(ptr, buf, uchunk); 825 kunmap(page); 826 if (result) { 827 result = -EFAULT; 828 goto out; 829 } 830 ubytes -= uchunk; 831 maddr += mchunk; 832 if (image->file_mode) 833 kbuf += mchunk; 834 else 835 buf += mchunk; 836 mbytes -= mchunk; 837 838 cond_resched(); 839 } 840 out: 841 return result; 842 } 843 844 static int kimage_load_crash_segment(struct kimage *image, 845 struct kexec_segment *segment) 846 { 847 /* For crash dumps kernels we simply copy the data from 848 * user space to it's destination. 849 * We do things a page at a time for the sake of kmap. 850 */ 851 unsigned long maddr; 852 size_t ubytes, mbytes; 853 int result; 854 unsigned char __user *buf = NULL; 855 unsigned char *kbuf = NULL; 856 857 result = 0; 858 if (image->file_mode) 859 kbuf = segment->kbuf; 860 else 861 buf = segment->buf; 862 ubytes = segment->bufsz; 863 mbytes = segment->memsz; 864 maddr = segment->mem; 865 while (mbytes) { 866 struct page *page; 867 char *ptr; 868 size_t uchunk, mchunk; 869 870 page = boot_pfn_to_page(maddr >> PAGE_SHIFT); 871 if (!page) { 872 result = -ENOMEM; 873 goto out; 874 } 875 arch_kexec_post_alloc_pages(page_address(page), 1, 0); 876 ptr = kmap(page); 877 ptr += maddr & ~PAGE_MASK; 878 mchunk = min_t(size_t, mbytes, 879 PAGE_SIZE - (maddr & ~PAGE_MASK)); 880 uchunk = min(ubytes, mchunk); 881 if (mchunk > uchunk) { 882 /* Zero the trailing part of the page */ 883 memset(ptr + uchunk, 0, mchunk - uchunk); 884 } 885 886 /* For file based kexec, source pages are in kernel memory */ 887 if (image->file_mode) 888 memcpy(ptr, kbuf, uchunk); 889 else 890 result = copy_from_user(ptr, buf, uchunk); 891 kexec_flush_icache_page(page); 892 kunmap(page); 893 arch_kexec_pre_free_pages(page_address(page), 1); 894 if (result) { 895 result = -EFAULT; 896 goto out; 897 } 898 ubytes -= uchunk; 899 maddr += mchunk; 900 if (image->file_mode) 901 kbuf += mchunk; 902 else 903 buf += mchunk; 904 mbytes -= mchunk; 905 906 cond_resched(); 907 } 908 out: 909 return result; 910 } 911 912 int kimage_load_segment(struct kimage *image, 913 struct kexec_segment *segment) 914 { 915 int result = -ENOMEM; 916 917 switch (image->type) { 918 case KEXEC_TYPE_DEFAULT: 919 result = kimage_load_normal_segment(image, segment); 920 break; 921 case KEXEC_TYPE_CRASH: 922 result = kimage_load_crash_segment(image, segment); 923 break; 924 } 925 926 return result; 927 } 928 929 struct kimage *kexec_image; 930 struct kimage *kexec_crash_image; 931 int kexec_load_disabled; 932 #ifdef CONFIG_SYSCTL 933 static struct ctl_table kexec_core_sysctls[] = { 934 { 935 .procname = "kexec_load_disabled", 936 .data = &kexec_load_disabled, 937 .maxlen = sizeof(int), 938 .mode = 0644, 939 /* only handle a transition from default "0" to "1" */ 940 .proc_handler = proc_dointvec_minmax, 941 .extra1 = SYSCTL_ONE, 942 .extra2 = SYSCTL_ONE, 943 }, 944 { } 945 }; 946 947 static int __init kexec_core_sysctl_init(void) 948 { 949 register_sysctl_init("kernel", kexec_core_sysctls); 950 return 0; 951 } 952 late_initcall(kexec_core_sysctl_init); 953 #endif 954 955 /* 956 * No panic_cpu check version of crash_kexec(). This function is called 957 * only when panic_cpu holds the current CPU number; this is the only CPU 958 * which processes crash_kexec routines. 959 */ 960 void __noclone __crash_kexec(struct pt_regs *regs) 961 { 962 /* Take the kexec_mutex here to prevent sys_kexec_load 963 * running on one cpu from replacing the crash kernel 964 * we are using after a panic on a different cpu. 965 * 966 * If the crash kernel was not located in a fixed area 967 * of memory the xchg(&kexec_crash_image) would be 968 * sufficient. But since I reuse the memory... 969 */ 970 if (mutex_trylock(&kexec_mutex)) { 971 if (kexec_crash_image) { 972 struct pt_regs fixed_regs; 973 974 crash_setup_regs(&fixed_regs, regs); 975 crash_save_vmcoreinfo(); 976 machine_crash_shutdown(&fixed_regs); 977 machine_kexec(kexec_crash_image); 978 } 979 mutex_unlock(&kexec_mutex); 980 } 981 } 982 STACK_FRAME_NON_STANDARD(__crash_kexec); 983 984 void crash_kexec(struct pt_regs *regs) 985 { 986 int old_cpu, this_cpu; 987 988 /* 989 * Only one CPU is allowed to execute the crash_kexec() code as with 990 * panic(). Otherwise parallel calls of panic() and crash_kexec() 991 * may stop each other. To exclude them, we use panic_cpu here too. 992 */ 993 this_cpu = raw_smp_processor_id(); 994 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); 995 if (old_cpu == PANIC_CPU_INVALID) { 996 /* This is the 1st CPU which comes here, so go ahead. */ 997 __crash_kexec(regs); 998 999 /* 1000 * Reset panic_cpu to allow another panic()/crash_kexec() 1001 * call. 1002 */ 1003 atomic_set(&panic_cpu, PANIC_CPU_INVALID); 1004 } 1005 } 1006 1007 size_t crash_get_memory_size(void) 1008 { 1009 size_t size = 0; 1010 1011 mutex_lock(&kexec_mutex); 1012 if (crashk_res.end != crashk_res.start) 1013 size = resource_size(&crashk_res); 1014 mutex_unlock(&kexec_mutex); 1015 return size; 1016 } 1017 1018 int crash_shrink_memory(unsigned long new_size) 1019 { 1020 int ret = 0; 1021 unsigned long start, end; 1022 unsigned long old_size; 1023 struct resource *ram_res; 1024 1025 mutex_lock(&kexec_mutex); 1026 1027 if (kexec_crash_image) { 1028 ret = -ENOENT; 1029 goto unlock; 1030 } 1031 start = crashk_res.start; 1032 end = crashk_res.end; 1033 old_size = (end == 0) ? 0 : end - start + 1; 1034 if (new_size >= old_size) { 1035 ret = (new_size == old_size) ? 0 : -EINVAL; 1036 goto unlock; 1037 } 1038 1039 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); 1040 if (!ram_res) { 1041 ret = -ENOMEM; 1042 goto unlock; 1043 } 1044 1045 start = roundup(start, KEXEC_CRASH_MEM_ALIGN); 1046 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); 1047 1048 crash_free_reserved_phys_range(end, crashk_res.end); 1049 1050 if ((start == end) && (crashk_res.parent != NULL)) 1051 release_resource(&crashk_res); 1052 1053 ram_res->start = end; 1054 ram_res->end = crashk_res.end; 1055 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 1056 ram_res->name = "System RAM"; 1057 1058 crashk_res.end = end - 1; 1059 1060 insert_resource(&iomem_resource, ram_res); 1061 1062 unlock: 1063 mutex_unlock(&kexec_mutex); 1064 return ret; 1065 } 1066 1067 void crash_save_cpu(struct pt_regs *regs, int cpu) 1068 { 1069 struct elf_prstatus prstatus; 1070 u32 *buf; 1071 1072 if ((cpu < 0) || (cpu >= nr_cpu_ids)) 1073 return; 1074 1075 /* Using ELF notes here is opportunistic. 1076 * I need a well defined structure format 1077 * for the data I pass, and I need tags 1078 * on the data to indicate what information I have 1079 * squirrelled away. ELF notes happen to provide 1080 * all of that, so there is no need to invent something new. 1081 */ 1082 buf = (u32 *)per_cpu_ptr(crash_notes, cpu); 1083 if (!buf) 1084 return; 1085 memset(&prstatus, 0, sizeof(prstatus)); 1086 prstatus.common.pr_pid = current->pid; 1087 elf_core_copy_regs(&prstatus.pr_reg, regs); 1088 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, 1089 &prstatus, sizeof(prstatus)); 1090 final_note(buf); 1091 } 1092 1093 static int __init crash_notes_memory_init(void) 1094 { 1095 /* Allocate memory for saving cpu registers. */ 1096 size_t size, align; 1097 1098 /* 1099 * crash_notes could be allocated across 2 vmalloc pages when percpu 1100 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc 1101 * pages are also on 2 continuous physical pages. In this case the 1102 * 2nd part of crash_notes in 2nd page could be lost since only the 1103 * starting address and size of crash_notes are exported through sysfs. 1104 * Here round up the size of crash_notes to the nearest power of two 1105 * and pass it to __alloc_percpu as align value. This can make sure 1106 * crash_notes is allocated inside one physical page. 1107 */ 1108 size = sizeof(note_buf_t); 1109 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE); 1110 1111 /* 1112 * Break compile if size is bigger than PAGE_SIZE since crash_notes 1113 * definitely will be in 2 pages with that. 1114 */ 1115 BUILD_BUG_ON(size > PAGE_SIZE); 1116 1117 crash_notes = __alloc_percpu(size, align); 1118 if (!crash_notes) { 1119 pr_warn("Memory allocation for saving cpu register states failed\n"); 1120 return -ENOMEM; 1121 } 1122 return 0; 1123 } 1124 subsys_initcall(crash_notes_memory_init); 1125 1126 1127 /* 1128 * Move into place and start executing a preloaded standalone 1129 * executable. If nothing was preloaded return an error. 1130 */ 1131 int kernel_kexec(void) 1132 { 1133 int error = 0; 1134 1135 if (!mutex_trylock(&kexec_mutex)) 1136 return -EBUSY; 1137 if (!kexec_image) { 1138 error = -EINVAL; 1139 goto Unlock; 1140 } 1141 1142 #ifdef CONFIG_KEXEC_JUMP 1143 if (kexec_image->preserve_context) { 1144 pm_prepare_console(); 1145 error = freeze_processes(); 1146 if (error) { 1147 error = -EBUSY; 1148 goto Restore_console; 1149 } 1150 suspend_console(); 1151 error = dpm_suspend_start(PMSG_FREEZE); 1152 if (error) 1153 goto Resume_console; 1154 /* At this point, dpm_suspend_start() has been called, 1155 * but *not* dpm_suspend_end(). We *must* call 1156 * dpm_suspend_end() now. Otherwise, drivers for 1157 * some devices (e.g. interrupt controllers) become 1158 * desynchronized with the actual state of the 1159 * hardware at resume time, and evil weirdness ensues. 1160 */ 1161 error = dpm_suspend_end(PMSG_FREEZE); 1162 if (error) 1163 goto Resume_devices; 1164 error = suspend_disable_secondary_cpus(); 1165 if (error) 1166 goto Enable_cpus; 1167 local_irq_disable(); 1168 error = syscore_suspend(); 1169 if (error) 1170 goto Enable_irqs; 1171 } else 1172 #endif 1173 { 1174 kexec_in_progress = true; 1175 kernel_restart_prepare("kexec reboot"); 1176 migrate_to_reboot_cpu(); 1177 1178 /* 1179 * migrate_to_reboot_cpu() disables CPU hotplug assuming that 1180 * no further code needs to use CPU hotplug (which is true in 1181 * the reboot case). However, the kexec path depends on using 1182 * CPU hotplug again; so re-enable it here. 1183 */ 1184 cpu_hotplug_enable(); 1185 pr_notice("Starting new kernel\n"); 1186 machine_shutdown(); 1187 } 1188 1189 kmsg_dump(KMSG_DUMP_SHUTDOWN); 1190 machine_kexec(kexec_image); 1191 1192 #ifdef CONFIG_KEXEC_JUMP 1193 if (kexec_image->preserve_context) { 1194 syscore_resume(); 1195 Enable_irqs: 1196 local_irq_enable(); 1197 Enable_cpus: 1198 suspend_enable_secondary_cpus(); 1199 dpm_resume_start(PMSG_RESTORE); 1200 Resume_devices: 1201 dpm_resume_end(PMSG_RESTORE); 1202 Resume_console: 1203 resume_console(); 1204 thaw_processes(); 1205 Restore_console: 1206 pm_restore_console(); 1207 } 1208 #endif 1209 1210 Unlock: 1211 mutex_unlock(&kexec_mutex); 1212 return error; 1213 } 1214