1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kexec.c - kexec system call core code. 4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/capability.h> 10 #include <linux/mm.h> 11 #include <linux/file.h> 12 #include <linux/slab.h> 13 #include <linux/fs.h> 14 #include <linux/kexec.h> 15 #include <linux/mutex.h> 16 #include <linux/list.h> 17 #include <linux/highmem.h> 18 #include <linux/syscalls.h> 19 #include <linux/reboot.h> 20 #include <linux/ioport.h> 21 #include <linux/hardirq.h> 22 #include <linux/elf.h> 23 #include <linux/elfcore.h> 24 #include <linux/utsname.h> 25 #include <linux/numa.h> 26 #include <linux/suspend.h> 27 #include <linux/device.h> 28 #include <linux/freezer.h> 29 #include <linux/pm.h> 30 #include <linux/cpu.h> 31 #include <linux/uaccess.h> 32 #include <linux/io.h> 33 #include <linux/console.h> 34 #include <linux/vmalloc.h> 35 #include <linux/swap.h> 36 #include <linux/syscore_ops.h> 37 #include <linux/compiler.h> 38 #include <linux/hugetlb.h> 39 #include <linux/frame.h> 40 41 #include <asm/page.h> 42 #include <asm/sections.h> 43 44 #include <crypto/hash.h> 45 #include <crypto/sha.h> 46 #include "kexec_internal.h" 47 48 DEFINE_MUTEX(kexec_mutex); 49 50 /* Per cpu memory for storing cpu states in case of system crash. */ 51 note_buf_t __percpu *crash_notes; 52 53 /* Flag to indicate we are going to kexec a new kernel */ 54 bool kexec_in_progress = false; 55 56 57 /* Location of the reserved area for the crash kernel */ 58 struct resource crashk_res = { 59 .name = "Crash kernel", 60 .start = 0, 61 .end = 0, 62 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 63 .desc = IORES_DESC_CRASH_KERNEL 64 }; 65 struct resource crashk_low_res = { 66 .name = "Crash kernel", 67 .start = 0, 68 .end = 0, 69 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, 70 .desc = IORES_DESC_CRASH_KERNEL 71 }; 72 73 int kexec_should_crash(struct task_struct *p) 74 { 75 /* 76 * If crash_kexec_post_notifiers is enabled, don't run 77 * crash_kexec() here yet, which must be run after panic 78 * notifiers in panic(). 79 */ 80 if (crash_kexec_post_notifiers) 81 return 0; 82 /* 83 * There are 4 panic() calls in do_exit() path, each of which 84 * corresponds to each of these 4 conditions. 85 */ 86 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) 87 return 1; 88 return 0; 89 } 90 91 int kexec_crash_loaded(void) 92 { 93 return !!kexec_crash_image; 94 } 95 EXPORT_SYMBOL_GPL(kexec_crash_loaded); 96 97 /* 98 * When kexec transitions to the new kernel there is a one-to-one 99 * mapping between physical and virtual addresses. On processors 100 * where you can disable the MMU this is trivial, and easy. For 101 * others it is still a simple predictable page table to setup. 102 * 103 * In that environment kexec copies the new kernel to its final 104 * resting place. This means I can only support memory whose 105 * physical address can fit in an unsigned long. In particular 106 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. 107 * If the assembly stub has more restrictive requirements 108 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be 109 * defined more restrictively in <asm/kexec.h>. 110 * 111 * The code for the transition from the current kernel to the 112 * the new kernel is placed in the control_code_buffer, whose size 113 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single 114 * page of memory is necessary, but some architectures require more. 115 * Because this memory must be identity mapped in the transition from 116 * virtual to physical addresses it must live in the range 117 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily 118 * modifiable. 119 * 120 * The assembly stub in the control code buffer is passed a linked list 121 * of descriptor pages detailing the source pages of the new kernel, 122 * and the destination addresses of those source pages. As this data 123 * structure is not used in the context of the current OS, it must 124 * be self-contained. 125 * 126 * The code has been made to work with highmem pages and will use a 127 * destination page in its final resting place (if it happens 128 * to allocate it). The end product of this is that most of the 129 * physical address space, and most of RAM can be used. 130 * 131 * Future directions include: 132 * - allocating a page table with the control code buffer identity 133 * mapped, to simplify machine_kexec and make kexec_on_panic more 134 * reliable. 135 */ 136 137 /* 138 * KIMAGE_NO_DEST is an impossible destination address..., for 139 * allocating pages whose destination address we do not care about. 140 */ 141 #define KIMAGE_NO_DEST (-1UL) 142 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) 143 144 static struct page *kimage_alloc_page(struct kimage *image, 145 gfp_t gfp_mask, 146 unsigned long dest); 147 148 int sanity_check_segment_list(struct kimage *image) 149 { 150 int i; 151 unsigned long nr_segments = image->nr_segments; 152 unsigned long total_pages = 0; 153 unsigned long nr_pages = totalram_pages(); 154 155 /* 156 * Verify we have good destination addresses. The caller is 157 * responsible for making certain we don't attempt to load 158 * the new image into invalid or reserved areas of RAM. This 159 * just verifies it is an address we can use. 160 * 161 * Since the kernel does everything in page size chunks ensure 162 * the destination addresses are page aligned. Too many 163 * special cases crop of when we don't do this. The most 164 * insidious is getting overlapping destination addresses 165 * simply because addresses are changed to page size 166 * granularity. 167 */ 168 for (i = 0; i < nr_segments; i++) { 169 unsigned long mstart, mend; 170 171 mstart = image->segment[i].mem; 172 mend = mstart + image->segment[i].memsz; 173 if (mstart > mend) 174 return -EADDRNOTAVAIL; 175 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) 176 return -EADDRNOTAVAIL; 177 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) 178 return -EADDRNOTAVAIL; 179 } 180 181 /* Verify our destination addresses do not overlap. 182 * If we alloed overlapping destination addresses 183 * through very weird things can happen with no 184 * easy explanation as one segment stops on another. 185 */ 186 for (i = 0; i < nr_segments; i++) { 187 unsigned long mstart, mend; 188 unsigned long j; 189 190 mstart = image->segment[i].mem; 191 mend = mstart + image->segment[i].memsz; 192 for (j = 0; j < i; j++) { 193 unsigned long pstart, pend; 194 195 pstart = image->segment[j].mem; 196 pend = pstart + image->segment[j].memsz; 197 /* Do the segments overlap ? */ 198 if ((mend > pstart) && (mstart < pend)) 199 return -EINVAL; 200 } 201 } 202 203 /* Ensure our buffer sizes are strictly less than 204 * our memory sizes. This should always be the case, 205 * and it is easier to check up front than to be surprised 206 * later on. 207 */ 208 for (i = 0; i < nr_segments; i++) { 209 if (image->segment[i].bufsz > image->segment[i].memsz) 210 return -EINVAL; 211 } 212 213 /* 214 * Verify that no more than half of memory will be consumed. If the 215 * request from userspace is too large, a large amount of time will be 216 * wasted allocating pages, which can cause a soft lockup. 217 */ 218 for (i = 0; i < nr_segments; i++) { 219 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) 220 return -EINVAL; 221 222 total_pages += PAGE_COUNT(image->segment[i].memsz); 223 } 224 225 if (total_pages > nr_pages / 2) 226 return -EINVAL; 227 228 /* 229 * Verify we have good destination addresses. Normally 230 * the caller is responsible for making certain we don't 231 * attempt to load the new image into invalid or reserved 232 * areas of RAM. But crash kernels are preloaded into a 233 * reserved area of ram. We must ensure the addresses 234 * are in the reserved area otherwise preloading the 235 * kernel could corrupt things. 236 */ 237 238 if (image->type == KEXEC_TYPE_CRASH) { 239 for (i = 0; i < nr_segments; i++) { 240 unsigned long mstart, mend; 241 242 mstart = image->segment[i].mem; 243 mend = mstart + image->segment[i].memsz - 1; 244 /* Ensure we are within the crash kernel limits */ 245 if ((mstart < phys_to_boot_phys(crashk_res.start)) || 246 (mend > phys_to_boot_phys(crashk_res.end))) 247 return -EADDRNOTAVAIL; 248 } 249 } 250 251 return 0; 252 } 253 254 struct kimage *do_kimage_alloc_init(void) 255 { 256 struct kimage *image; 257 258 /* Allocate a controlling structure */ 259 image = kzalloc(sizeof(*image), GFP_KERNEL); 260 if (!image) 261 return NULL; 262 263 image->head = 0; 264 image->entry = &image->head; 265 image->last_entry = &image->head; 266 image->control_page = ~0; /* By default this does not apply */ 267 image->type = KEXEC_TYPE_DEFAULT; 268 269 /* Initialize the list of control pages */ 270 INIT_LIST_HEAD(&image->control_pages); 271 272 /* Initialize the list of destination pages */ 273 INIT_LIST_HEAD(&image->dest_pages); 274 275 /* Initialize the list of unusable pages */ 276 INIT_LIST_HEAD(&image->unusable_pages); 277 278 return image; 279 } 280 281 int kimage_is_destination_range(struct kimage *image, 282 unsigned long start, 283 unsigned long end) 284 { 285 unsigned long i; 286 287 for (i = 0; i < image->nr_segments; i++) { 288 unsigned long mstart, mend; 289 290 mstart = image->segment[i].mem; 291 mend = mstart + image->segment[i].memsz; 292 if ((end > mstart) && (start < mend)) 293 return 1; 294 } 295 296 return 0; 297 } 298 299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) 300 { 301 struct page *pages; 302 303 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); 304 if (pages) { 305 unsigned int count, i; 306 307 pages->mapping = NULL; 308 set_page_private(pages, order); 309 count = 1 << order; 310 for (i = 0; i < count; i++) 311 SetPageReserved(pages + i); 312 313 arch_kexec_post_alloc_pages(page_address(pages), count, 314 gfp_mask); 315 316 if (gfp_mask & __GFP_ZERO) 317 for (i = 0; i < count; i++) 318 clear_highpage(pages + i); 319 } 320 321 return pages; 322 } 323 324 static void kimage_free_pages(struct page *page) 325 { 326 unsigned int order, count, i; 327 328 order = page_private(page); 329 count = 1 << order; 330 331 arch_kexec_pre_free_pages(page_address(page), count); 332 333 for (i = 0; i < count; i++) 334 ClearPageReserved(page + i); 335 __free_pages(page, order); 336 } 337 338 void kimage_free_page_list(struct list_head *list) 339 { 340 struct page *page, *next; 341 342 list_for_each_entry_safe(page, next, list, lru) { 343 list_del(&page->lru); 344 kimage_free_pages(page); 345 } 346 } 347 348 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 349 unsigned int order) 350 { 351 /* Control pages are special, they are the intermediaries 352 * that are needed while we copy the rest of the pages 353 * to their final resting place. As such they must 354 * not conflict with either the destination addresses 355 * or memory the kernel is already using. 356 * 357 * The only case where we really need more than one of 358 * these are for architectures where we cannot disable 359 * the MMU and must instead generate an identity mapped 360 * page table for all of the memory. 361 * 362 * At worst this runs in O(N) of the image size. 363 */ 364 struct list_head extra_pages; 365 struct page *pages; 366 unsigned int count; 367 368 count = 1 << order; 369 INIT_LIST_HEAD(&extra_pages); 370 371 /* Loop while I can allocate a page and the page allocated 372 * is a destination page. 373 */ 374 do { 375 unsigned long pfn, epfn, addr, eaddr; 376 377 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); 378 if (!pages) 379 break; 380 pfn = page_to_boot_pfn(pages); 381 epfn = pfn + count; 382 addr = pfn << PAGE_SHIFT; 383 eaddr = epfn << PAGE_SHIFT; 384 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 385 kimage_is_destination_range(image, addr, eaddr)) { 386 list_add(&pages->lru, &extra_pages); 387 pages = NULL; 388 } 389 } while (!pages); 390 391 if (pages) { 392 /* Remember the allocated page... */ 393 list_add(&pages->lru, &image->control_pages); 394 395 /* Because the page is already in it's destination 396 * location we will never allocate another page at 397 * that address. Therefore kimage_alloc_pages 398 * will not return it (again) and we don't need 399 * to give it an entry in image->segment[]. 400 */ 401 } 402 /* Deal with the destination pages I have inadvertently allocated. 403 * 404 * Ideally I would convert multi-page allocations into single 405 * page allocations, and add everything to image->dest_pages. 406 * 407 * For now it is simpler to just free the pages. 408 */ 409 kimage_free_page_list(&extra_pages); 410 411 return pages; 412 } 413 414 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 415 unsigned int order) 416 { 417 /* Control pages are special, they are the intermediaries 418 * that are needed while we copy the rest of the pages 419 * to their final resting place. As such they must 420 * not conflict with either the destination addresses 421 * or memory the kernel is already using. 422 * 423 * Control pages are also the only pags we must allocate 424 * when loading a crash kernel. All of the other pages 425 * are specified by the segments and we just memcpy 426 * into them directly. 427 * 428 * The only case where we really need more than one of 429 * these are for architectures where we cannot disable 430 * the MMU and must instead generate an identity mapped 431 * page table for all of the memory. 432 * 433 * Given the low demand this implements a very simple 434 * allocator that finds the first hole of the appropriate 435 * size in the reserved memory region, and allocates all 436 * of the memory up to and including the hole. 437 */ 438 unsigned long hole_start, hole_end, size; 439 struct page *pages; 440 441 pages = NULL; 442 size = (1 << order) << PAGE_SHIFT; 443 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 444 hole_end = hole_start + size - 1; 445 while (hole_end <= crashk_res.end) { 446 unsigned long i; 447 448 cond_resched(); 449 450 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 451 break; 452 /* See if I overlap any of the segments */ 453 for (i = 0; i < image->nr_segments; i++) { 454 unsigned long mstart, mend; 455 456 mstart = image->segment[i].mem; 457 mend = mstart + image->segment[i].memsz - 1; 458 if ((hole_end >= mstart) && (hole_start <= mend)) { 459 /* Advance the hole to the end of the segment */ 460 hole_start = (mend + (size - 1)) & ~(size - 1); 461 hole_end = hole_start + size - 1; 462 break; 463 } 464 } 465 /* If I don't overlap any segments I have found my hole! */ 466 if (i == image->nr_segments) { 467 pages = pfn_to_page(hole_start >> PAGE_SHIFT); 468 image->control_page = hole_end; 469 break; 470 } 471 } 472 473 /* Ensure that these pages are decrypted if SME is enabled. */ 474 if (pages) 475 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); 476 477 return pages; 478 } 479 480 481 struct page *kimage_alloc_control_pages(struct kimage *image, 482 unsigned int order) 483 { 484 struct page *pages = NULL; 485 486 switch (image->type) { 487 case KEXEC_TYPE_DEFAULT: 488 pages = kimage_alloc_normal_control_pages(image, order); 489 break; 490 case KEXEC_TYPE_CRASH: 491 pages = kimage_alloc_crash_control_pages(image, order); 492 break; 493 } 494 495 return pages; 496 } 497 498 int kimage_crash_copy_vmcoreinfo(struct kimage *image) 499 { 500 struct page *vmcoreinfo_page; 501 void *safecopy; 502 503 if (image->type != KEXEC_TYPE_CRASH) 504 return 0; 505 506 /* 507 * For kdump, allocate one vmcoreinfo safe copy from the 508 * crash memory. as we have arch_kexec_protect_crashkres() 509 * after kexec syscall, we naturally protect it from write 510 * (even read) access under kernel direct mapping. But on 511 * the other hand, we still need to operate it when crash 512 * happens to generate vmcoreinfo note, hereby we rely on 513 * vmap for this purpose. 514 */ 515 vmcoreinfo_page = kimage_alloc_control_pages(image, 0); 516 if (!vmcoreinfo_page) { 517 pr_warn("Could not allocate vmcoreinfo buffer\n"); 518 return -ENOMEM; 519 } 520 safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL); 521 if (!safecopy) { 522 pr_warn("Could not vmap vmcoreinfo buffer\n"); 523 return -ENOMEM; 524 } 525 526 image->vmcoreinfo_data_copy = safecopy; 527 crash_update_vmcoreinfo_safecopy(safecopy); 528 529 return 0; 530 } 531 532 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 533 { 534 if (*image->entry != 0) 535 image->entry++; 536 537 if (image->entry == image->last_entry) { 538 kimage_entry_t *ind_page; 539 struct page *page; 540 541 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 542 if (!page) 543 return -ENOMEM; 544 545 ind_page = page_address(page); 546 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; 547 image->entry = ind_page; 548 image->last_entry = ind_page + 549 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 550 } 551 *image->entry = entry; 552 image->entry++; 553 *image->entry = 0; 554 555 return 0; 556 } 557 558 static int kimage_set_destination(struct kimage *image, 559 unsigned long destination) 560 { 561 int result; 562 563 destination &= PAGE_MASK; 564 result = kimage_add_entry(image, destination | IND_DESTINATION); 565 566 return result; 567 } 568 569 570 static int kimage_add_page(struct kimage *image, unsigned long page) 571 { 572 int result; 573 574 page &= PAGE_MASK; 575 result = kimage_add_entry(image, page | IND_SOURCE); 576 577 return result; 578 } 579 580 581 static void kimage_free_extra_pages(struct kimage *image) 582 { 583 /* Walk through and free any extra destination pages I may have */ 584 kimage_free_page_list(&image->dest_pages); 585 586 /* Walk through and free any unusable pages I have cached */ 587 kimage_free_page_list(&image->unusable_pages); 588 589 } 590 void kimage_terminate(struct kimage *image) 591 { 592 if (*image->entry != 0) 593 image->entry++; 594 595 *image->entry = IND_DONE; 596 } 597 598 #define for_each_kimage_entry(image, ptr, entry) \ 599 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 600 ptr = (entry & IND_INDIRECTION) ? \ 601 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 602 603 static void kimage_free_entry(kimage_entry_t entry) 604 { 605 struct page *page; 606 607 page = boot_pfn_to_page(entry >> PAGE_SHIFT); 608 kimage_free_pages(page); 609 } 610 611 void kimage_free(struct kimage *image) 612 { 613 kimage_entry_t *ptr, entry; 614 kimage_entry_t ind = 0; 615 616 if (!image) 617 return; 618 619 if (image->vmcoreinfo_data_copy) { 620 crash_update_vmcoreinfo_safecopy(NULL); 621 vunmap(image->vmcoreinfo_data_copy); 622 } 623 624 kimage_free_extra_pages(image); 625 for_each_kimage_entry(image, ptr, entry) { 626 if (entry & IND_INDIRECTION) { 627 /* Free the previous indirection page */ 628 if (ind & IND_INDIRECTION) 629 kimage_free_entry(ind); 630 /* Save this indirection page until we are 631 * done with it. 632 */ 633 ind = entry; 634 } else if (entry & IND_SOURCE) 635 kimage_free_entry(entry); 636 } 637 /* Free the final indirection page */ 638 if (ind & IND_INDIRECTION) 639 kimage_free_entry(ind); 640 641 /* Handle any machine specific cleanup */ 642 machine_kexec_cleanup(image); 643 644 /* Free the kexec control pages... */ 645 kimage_free_page_list(&image->control_pages); 646 647 /* 648 * Free up any temporary buffers allocated. This might hit if 649 * error occurred much later after buffer allocation. 650 */ 651 if (image->file_mode) 652 kimage_file_post_load_cleanup(image); 653 654 kfree(image); 655 } 656 657 static kimage_entry_t *kimage_dst_used(struct kimage *image, 658 unsigned long page) 659 { 660 kimage_entry_t *ptr, entry; 661 unsigned long destination = 0; 662 663 for_each_kimage_entry(image, ptr, entry) { 664 if (entry & IND_DESTINATION) 665 destination = entry & PAGE_MASK; 666 else if (entry & IND_SOURCE) { 667 if (page == destination) 668 return ptr; 669 destination += PAGE_SIZE; 670 } 671 } 672 673 return NULL; 674 } 675 676 static struct page *kimage_alloc_page(struct kimage *image, 677 gfp_t gfp_mask, 678 unsigned long destination) 679 { 680 /* 681 * Here we implement safeguards to ensure that a source page 682 * is not copied to its destination page before the data on 683 * the destination page is no longer useful. 684 * 685 * To do this we maintain the invariant that a source page is 686 * either its own destination page, or it is not a 687 * destination page at all. 688 * 689 * That is slightly stronger than required, but the proof 690 * that no problems will not occur is trivial, and the 691 * implementation is simply to verify. 692 * 693 * When allocating all pages normally this algorithm will run 694 * in O(N) time, but in the worst case it will run in O(N^2) 695 * time. If the runtime is a problem the data structures can 696 * be fixed. 697 */ 698 struct page *page; 699 unsigned long addr; 700 701 /* 702 * Walk through the list of destination pages, and see if I 703 * have a match. 704 */ 705 list_for_each_entry(page, &image->dest_pages, lru) { 706 addr = page_to_boot_pfn(page) << PAGE_SHIFT; 707 if (addr == destination) { 708 list_del(&page->lru); 709 return page; 710 } 711 } 712 page = NULL; 713 while (1) { 714 kimage_entry_t *old; 715 716 /* Allocate a page, if we run out of memory give up */ 717 page = kimage_alloc_pages(gfp_mask, 0); 718 if (!page) 719 return NULL; 720 /* If the page cannot be used file it away */ 721 if (page_to_boot_pfn(page) > 722 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 723 list_add(&page->lru, &image->unusable_pages); 724 continue; 725 } 726 addr = page_to_boot_pfn(page) << PAGE_SHIFT; 727 728 /* If it is the destination page we want use it */ 729 if (addr == destination) 730 break; 731 732 /* If the page is not a destination page use it */ 733 if (!kimage_is_destination_range(image, addr, 734 addr + PAGE_SIZE)) 735 break; 736 737 /* 738 * I know that the page is someones destination page. 739 * See if there is already a source page for this 740 * destination page. And if so swap the source pages. 741 */ 742 old = kimage_dst_used(image, addr); 743 if (old) { 744 /* If so move it */ 745 unsigned long old_addr; 746 struct page *old_page; 747 748 old_addr = *old & PAGE_MASK; 749 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT); 750 copy_highpage(page, old_page); 751 *old = addr | (*old & ~PAGE_MASK); 752 753 /* The old page I have found cannot be a 754 * destination page, so return it if it's 755 * gfp_flags honor the ones passed in. 756 */ 757 if (!(gfp_mask & __GFP_HIGHMEM) && 758 PageHighMem(old_page)) { 759 kimage_free_pages(old_page); 760 continue; 761 } 762 addr = old_addr; 763 page = old_page; 764 break; 765 } 766 /* Place the page on the destination list, to be used later */ 767 list_add(&page->lru, &image->dest_pages); 768 } 769 770 return page; 771 } 772 773 static int kimage_load_normal_segment(struct kimage *image, 774 struct kexec_segment *segment) 775 { 776 unsigned long maddr; 777 size_t ubytes, mbytes; 778 int result; 779 unsigned char __user *buf = NULL; 780 unsigned char *kbuf = NULL; 781 782 result = 0; 783 if (image->file_mode) 784 kbuf = segment->kbuf; 785 else 786 buf = segment->buf; 787 ubytes = segment->bufsz; 788 mbytes = segment->memsz; 789 maddr = segment->mem; 790 791 result = kimage_set_destination(image, maddr); 792 if (result < 0) 793 goto out; 794 795 while (mbytes) { 796 struct page *page; 797 char *ptr; 798 size_t uchunk, mchunk; 799 800 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 801 if (!page) { 802 result = -ENOMEM; 803 goto out; 804 } 805 result = kimage_add_page(image, page_to_boot_pfn(page) 806 << PAGE_SHIFT); 807 if (result < 0) 808 goto out; 809 810 ptr = kmap(page); 811 /* Start with a clear page */ 812 clear_page(ptr); 813 ptr += maddr & ~PAGE_MASK; 814 mchunk = min_t(size_t, mbytes, 815 PAGE_SIZE - (maddr & ~PAGE_MASK)); 816 uchunk = min(ubytes, mchunk); 817 818 /* For file based kexec, source pages are in kernel memory */ 819 if (image->file_mode) 820 memcpy(ptr, kbuf, uchunk); 821 else 822 result = copy_from_user(ptr, buf, uchunk); 823 kunmap(page); 824 if (result) { 825 result = -EFAULT; 826 goto out; 827 } 828 ubytes -= uchunk; 829 maddr += mchunk; 830 if (image->file_mode) 831 kbuf += mchunk; 832 else 833 buf += mchunk; 834 mbytes -= mchunk; 835 836 cond_resched(); 837 } 838 out: 839 return result; 840 } 841 842 static int kimage_load_crash_segment(struct kimage *image, 843 struct kexec_segment *segment) 844 { 845 /* For crash dumps kernels we simply copy the data from 846 * user space to it's destination. 847 * We do things a page at a time for the sake of kmap. 848 */ 849 unsigned long maddr; 850 size_t ubytes, mbytes; 851 int result; 852 unsigned char __user *buf = NULL; 853 unsigned char *kbuf = NULL; 854 855 result = 0; 856 if (image->file_mode) 857 kbuf = segment->kbuf; 858 else 859 buf = segment->buf; 860 ubytes = segment->bufsz; 861 mbytes = segment->memsz; 862 maddr = segment->mem; 863 while (mbytes) { 864 struct page *page; 865 char *ptr; 866 size_t uchunk, mchunk; 867 868 page = boot_pfn_to_page(maddr >> PAGE_SHIFT); 869 if (!page) { 870 result = -ENOMEM; 871 goto out; 872 } 873 arch_kexec_post_alloc_pages(page_address(page), 1, 0); 874 ptr = kmap(page); 875 ptr += maddr & ~PAGE_MASK; 876 mchunk = min_t(size_t, mbytes, 877 PAGE_SIZE - (maddr & ~PAGE_MASK)); 878 uchunk = min(ubytes, mchunk); 879 if (mchunk > uchunk) { 880 /* Zero the trailing part of the page */ 881 memset(ptr + uchunk, 0, mchunk - uchunk); 882 } 883 884 /* For file based kexec, source pages are in kernel memory */ 885 if (image->file_mode) 886 memcpy(ptr, kbuf, uchunk); 887 else 888 result = copy_from_user(ptr, buf, uchunk); 889 kexec_flush_icache_page(page); 890 kunmap(page); 891 arch_kexec_pre_free_pages(page_address(page), 1); 892 if (result) { 893 result = -EFAULT; 894 goto out; 895 } 896 ubytes -= uchunk; 897 maddr += mchunk; 898 if (image->file_mode) 899 kbuf += mchunk; 900 else 901 buf += mchunk; 902 mbytes -= mchunk; 903 904 cond_resched(); 905 } 906 out: 907 return result; 908 } 909 910 int kimage_load_segment(struct kimage *image, 911 struct kexec_segment *segment) 912 { 913 int result = -ENOMEM; 914 915 switch (image->type) { 916 case KEXEC_TYPE_DEFAULT: 917 result = kimage_load_normal_segment(image, segment); 918 break; 919 case KEXEC_TYPE_CRASH: 920 result = kimage_load_crash_segment(image, segment); 921 break; 922 } 923 924 return result; 925 } 926 927 struct kimage *kexec_image; 928 struct kimage *kexec_crash_image; 929 int kexec_load_disabled; 930 931 /* 932 * No panic_cpu check version of crash_kexec(). This function is called 933 * only when panic_cpu holds the current CPU number; this is the only CPU 934 * which processes crash_kexec routines. 935 */ 936 void __noclone __crash_kexec(struct pt_regs *regs) 937 { 938 /* Take the kexec_mutex here to prevent sys_kexec_load 939 * running on one cpu from replacing the crash kernel 940 * we are using after a panic on a different cpu. 941 * 942 * If the crash kernel was not located in a fixed area 943 * of memory the xchg(&kexec_crash_image) would be 944 * sufficient. But since I reuse the memory... 945 */ 946 if (mutex_trylock(&kexec_mutex)) { 947 if (kexec_crash_image) { 948 struct pt_regs fixed_regs; 949 950 crash_setup_regs(&fixed_regs, regs); 951 crash_save_vmcoreinfo(); 952 machine_crash_shutdown(&fixed_regs); 953 machine_kexec(kexec_crash_image); 954 } 955 mutex_unlock(&kexec_mutex); 956 } 957 } 958 STACK_FRAME_NON_STANDARD(__crash_kexec); 959 960 void crash_kexec(struct pt_regs *regs) 961 { 962 int old_cpu, this_cpu; 963 964 /* 965 * Only one CPU is allowed to execute the crash_kexec() code as with 966 * panic(). Otherwise parallel calls of panic() and crash_kexec() 967 * may stop each other. To exclude them, we use panic_cpu here too. 968 */ 969 this_cpu = raw_smp_processor_id(); 970 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); 971 if (old_cpu == PANIC_CPU_INVALID) { 972 /* This is the 1st CPU which comes here, so go ahead. */ 973 printk_safe_flush_on_panic(); 974 __crash_kexec(regs); 975 976 /* 977 * Reset panic_cpu to allow another panic()/crash_kexec() 978 * call. 979 */ 980 atomic_set(&panic_cpu, PANIC_CPU_INVALID); 981 } 982 } 983 984 size_t crash_get_memory_size(void) 985 { 986 size_t size = 0; 987 988 mutex_lock(&kexec_mutex); 989 if (crashk_res.end != crashk_res.start) 990 size = resource_size(&crashk_res); 991 mutex_unlock(&kexec_mutex); 992 return size; 993 } 994 995 void __weak crash_free_reserved_phys_range(unsigned long begin, 996 unsigned long end) 997 { 998 unsigned long addr; 999 1000 for (addr = begin; addr < end; addr += PAGE_SIZE) 1001 free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); 1002 } 1003 1004 int crash_shrink_memory(unsigned long new_size) 1005 { 1006 int ret = 0; 1007 unsigned long start, end; 1008 unsigned long old_size; 1009 struct resource *ram_res; 1010 1011 mutex_lock(&kexec_mutex); 1012 1013 if (kexec_crash_image) { 1014 ret = -ENOENT; 1015 goto unlock; 1016 } 1017 start = crashk_res.start; 1018 end = crashk_res.end; 1019 old_size = (end == 0) ? 0 : end - start + 1; 1020 if (new_size >= old_size) { 1021 ret = (new_size == old_size) ? 0 : -EINVAL; 1022 goto unlock; 1023 } 1024 1025 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); 1026 if (!ram_res) { 1027 ret = -ENOMEM; 1028 goto unlock; 1029 } 1030 1031 start = roundup(start, KEXEC_CRASH_MEM_ALIGN); 1032 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); 1033 1034 crash_free_reserved_phys_range(end, crashk_res.end); 1035 1036 if ((start == end) && (crashk_res.parent != NULL)) 1037 release_resource(&crashk_res); 1038 1039 ram_res->start = end; 1040 ram_res->end = crashk_res.end; 1041 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; 1042 ram_res->name = "System RAM"; 1043 1044 crashk_res.end = end - 1; 1045 1046 insert_resource(&iomem_resource, ram_res); 1047 1048 unlock: 1049 mutex_unlock(&kexec_mutex); 1050 return ret; 1051 } 1052 1053 void crash_save_cpu(struct pt_regs *regs, int cpu) 1054 { 1055 struct elf_prstatus prstatus; 1056 u32 *buf; 1057 1058 if ((cpu < 0) || (cpu >= nr_cpu_ids)) 1059 return; 1060 1061 /* Using ELF notes here is opportunistic. 1062 * I need a well defined structure format 1063 * for the data I pass, and I need tags 1064 * on the data to indicate what information I have 1065 * squirrelled away. ELF notes happen to provide 1066 * all of that, so there is no need to invent something new. 1067 */ 1068 buf = (u32 *)per_cpu_ptr(crash_notes, cpu); 1069 if (!buf) 1070 return; 1071 memset(&prstatus, 0, sizeof(prstatus)); 1072 prstatus.pr_pid = current->pid; 1073 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); 1074 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, 1075 &prstatus, sizeof(prstatus)); 1076 final_note(buf); 1077 } 1078 1079 static int __init crash_notes_memory_init(void) 1080 { 1081 /* Allocate memory for saving cpu registers. */ 1082 size_t size, align; 1083 1084 /* 1085 * crash_notes could be allocated across 2 vmalloc pages when percpu 1086 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc 1087 * pages are also on 2 continuous physical pages. In this case the 1088 * 2nd part of crash_notes in 2nd page could be lost since only the 1089 * starting address and size of crash_notes are exported through sysfs. 1090 * Here round up the size of crash_notes to the nearest power of two 1091 * and pass it to __alloc_percpu as align value. This can make sure 1092 * crash_notes is allocated inside one physical page. 1093 */ 1094 size = sizeof(note_buf_t); 1095 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE); 1096 1097 /* 1098 * Break compile if size is bigger than PAGE_SIZE since crash_notes 1099 * definitely will be in 2 pages with that. 1100 */ 1101 BUILD_BUG_ON(size > PAGE_SIZE); 1102 1103 crash_notes = __alloc_percpu(size, align); 1104 if (!crash_notes) { 1105 pr_warn("Memory allocation for saving cpu register states failed\n"); 1106 return -ENOMEM; 1107 } 1108 return 0; 1109 } 1110 subsys_initcall(crash_notes_memory_init); 1111 1112 1113 /* 1114 * Move into place and start executing a preloaded standalone 1115 * executable. If nothing was preloaded return an error. 1116 */ 1117 int kernel_kexec(void) 1118 { 1119 int error = 0; 1120 1121 if (!mutex_trylock(&kexec_mutex)) 1122 return -EBUSY; 1123 if (!kexec_image) { 1124 error = -EINVAL; 1125 goto Unlock; 1126 } 1127 1128 #ifdef CONFIG_KEXEC_JUMP 1129 if (kexec_image->preserve_context) { 1130 lock_system_sleep(); 1131 pm_prepare_console(); 1132 error = freeze_processes(); 1133 if (error) { 1134 error = -EBUSY; 1135 goto Restore_console; 1136 } 1137 suspend_console(); 1138 error = dpm_suspend_start(PMSG_FREEZE); 1139 if (error) 1140 goto Resume_console; 1141 /* At this point, dpm_suspend_start() has been called, 1142 * but *not* dpm_suspend_end(). We *must* call 1143 * dpm_suspend_end() now. Otherwise, drivers for 1144 * some devices (e.g. interrupt controllers) become 1145 * desynchronized with the actual state of the 1146 * hardware at resume time, and evil weirdness ensues. 1147 */ 1148 error = dpm_suspend_end(PMSG_FREEZE); 1149 if (error) 1150 goto Resume_devices; 1151 error = suspend_disable_secondary_cpus(); 1152 if (error) 1153 goto Enable_cpus; 1154 local_irq_disable(); 1155 error = syscore_suspend(); 1156 if (error) 1157 goto Enable_irqs; 1158 } else 1159 #endif 1160 { 1161 kexec_in_progress = true; 1162 kernel_restart_prepare(NULL); 1163 migrate_to_reboot_cpu(); 1164 1165 /* 1166 * migrate_to_reboot_cpu() disables CPU hotplug assuming that 1167 * no further code needs to use CPU hotplug (which is true in 1168 * the reboot case). However, the kexec path depends on using 1169 * CPU hotplug again; so re-enable it here. 1170 */ 1171 cpu_hotplug_enable(); 1172 pr_emerg("Starting new kernel\n"); 1173 machine_shutdown(); 1174 } 1175 1176 machine_kexec(kexec_image); 1177 1178 #ifdef CONFIG_KEXEC_JUMP 1179 if (kexec_image->preserve_context) { 1180 syscore_resume(); 1181 Enable_irqs: 1182 local_irq_enable(); 1183 Enable_cpus: 1184 suspend_enable_secondary_cpus(); 1185 dpm_resume_start(PMSG_RESTORE); 1186 Resume_devices: 1187 dpm_resume_end(PMSG_RESTORE); 1188 Resume_console: 1189 resume_console(); 1190 thaw_processes(); 1191 Restore_console: 1192 pm_restore_console(); 1193 unlock_system_sleep(); 1194 } 1195 #endif 1196 1197 Unlock: 1198 mutex_unlock(&kexec_mutex); 1199 return error; 1200 } 1201 1202 /* 1203 * Protection mechanism for crashkernel reserved memory after 1204 * the kdump kernel is loaded. 1205 * 1206 * Provide an empty default implementation here -- architecture 1207 * code may override this 1208 */ 1209 void __weak arch_kexec_protect_crashkres(void) 1210 {} 1211 1212 void __weak arch_kexec_unprotect_crashkres(void) 1213 {} 1214