1 /* 2 * kexec.c - kexec system call 3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> 4 * 5 * This source code is licensed under the GNU General Public License, 6 * Version 2. See the file COPYING for more details. 7 */ 8 9 #include <linux/capability.h> 10 #include <linux/mm.h> 11 #include <linux/file.h> 12 #include <linux/slab.h> 13 #include <linux/fs.h> 14 #include <linux/kexec.h> 15 #include <linux/mutex.h> 16 #include <linux/list.h> 17 #include <linux/highmem.h> 18 #include <linux/syscalls.h> 19 #include <linux/reboot.h> 20 #include <linux/ioport.h> 21 #include <linux/hardirq.h> 22 #include <linux/elf.h> 23 #include <linux/elfcore.h> 24 #include <linux/utsname.h> 25 #include <linux/numa.h> 26 #include <linux/suspend.h> 27 #include <linux/device.h> 28 #include <linux/freezer.h> 29 #include <linux/pm.h> 30 #include <linux/cpu.h> 31 #include <linux/console.h> 32 #include <linux/vmalloc.h> 33 #include <linux/swap.h> 34 #include <linux/syscore_ops.h> 35 36 #include <asm/page.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <asm/sections.h> 40 41 /* Per cpu memory for storing cpu states in case of system crash. */ 42 note_buf_t __percpu *crash_notes; 43 44 /* vmcoreinfo stuff */ 45 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; 46 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; 47 size_t vmcoreinfo_size; 48 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 49 50 /* Location of the reserved area for the crash kernel */ 51 struct resource crashk_res = { 52 .name = "Crash kernel", 53 .start = 0, 54 .end = 0, 55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 56 }; 57 struct resource crashk_low_res = { 58 .name = "Crash kernel low", 59 .start = 0, 60 .end = 0, 61 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 62 }; 63 64 int kexec_should_crash(struct task_struct *p) 65 { 66 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) 67 return 1; 68 return 0; 69 } 70 71 /* 72 * When kexec transitions to the new kernel there is a one-to-one 73 * mapping between physical and virtual addresses. On processors 74 * where you can disable the MMU this is trivial, and easy. For 75 * others it is still a simple predictable page table to setup. 76 * 77 * In that environment kexec copies the new kernel to its final 78 * resting place. This means I can only support memory whose 79 * physical address can fit in an unsigned long. In particular 80 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. 81 * If the assembly stub has more restrictive requirements 82 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be 83 * defined more restrictively in <asm/kexec.h>. 84 * 85 * The code for the transition from the current kernel to the 86 * the new kernel is placed in the control_code_buffer, whose size 87 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single 88 * page of memory is necessary, but some architectures require more. 89 * Because this memory must be identity mapped in the transition from 90 * virtual to physical addresses it must live in the range 91 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily 92 * modifiable. 93 * 94 * The assembly stub in the control code buffer is passed a linked list 95 * of descriptor pages detailing the source pages of the new kernel, 96 * and the destination addresses of those source pages. As this data 97 * structure is not used in the context of the current OS, it must 98 * be self-contained. 99 * 100 * The code has been made to work with highmem pages and will use a 101 * destination page in its final resting place (if it happens 102 * to allocate it). The end product of this is that most of the 103 * physical address space, and most of RAM can be used. 104 * 105 * Future directions include: 106 * - allocating a page table with the control code buffer identity 107 * mapped, to simplify machine_kexec and make kexec_on_panic more 108 * reliable. 109 */ 110 111 /* 112 * KIMAGE_NO_DEST is an impossible destination address..., for 113 * allocating pages whose destination address we do not care about. 114 */ 115 #define KIMAGE_NO_DEST (-1UL) 116 117 static int kimage_is_destination_range(struct kimage *image, 118 unsigned long start, unsigned long end); 119 static struct page *kimage_alloc_page(struct kimage *image, 120 gfp_t gfp_mask, 121 unsigned long dest); 122 123 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 124 unsigned long nr_segments, 125 struct kexec_segment __user *segments) 126 { 127 size_t segment_bytes; 128 struct kimage *image; 129 unsigned long i; 130 int result; 131 132 /* Allocate a controlling structure */ 133 result = -ENOMEM; 134 image = kzalloc(sizeof(*image), GFP_KERNEL); 135 if (!image) 136 goto out; 137 138 image->head = 0; 139 image->entry = &image->head; 140 image->last_entry = &image->head; 141 image->control_page = ~0; /* By default this does not apply */ 142 image->start = entry; 143 image->type = KEXEC_TYPE_DEFAULT; 144 145 /* Initialize the list of control pages */ 146 INIT_LIST_HEAD(&image->control_pages); 147 148 /* Initialize the list of destination pages */ 149 INIT_LIST_HEAD(&image->dest_pages); 150 151 /* Initialize the list of unusable pages */ 152 INIT_LIST_HEAD(&image->unuseable_pages); 153 154 /* Read in the segments */ 155 image->nr_segments = nr_segments; 156 segment_bytes = nr_segments * sizeof(*segments); 157 result = copy_from_user(image->segment, segments, segment_bytes); 158 if (result) { 159 result = -EFAULT; 160 goto out; 161 } 162 163 /* 164 * Verify we have good destination addresses. The caller is 165 * responsible for making certain we don't attempt to load 166 * the new image into invalid or reserved areas of RAM. This 167 * just verifies it is an address we can use. 168 * 169 * Since the kernel does everything in page size chunks ensure 170 * the destination addresses are page aligned. Too many 171 * special cases crop of when we don't do this. The most 172 * insidious is getting overlapping destination addresses 173 * simply because addresses are changed to page size 174 * granularity. 175 */ 176 result = -EADDRNOTAVAIL; 177 for (i = 0; i < nr_segments; i++) { 178 unsigned long mstart, mend; 179 180 mstart = image->segment[i].mem; 181 mend = mstart + image->segment[i].memsz; 182 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) 183 goto out; 184 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) 185 goto out; 186 } 187 188 /* Verify our destination addresses do not overlap. 189 * If we alloed overlapping destination addresses 190 * through very weird things can happen with no 191 * easy explanation as one segment stops on another. 192 */ 193 result = -EINVAL; 194 for (i = 0; i < nr_segments; i++) { 195 unsigned long mstart, mend; 196 unsigned long j; 197 198 mstart = image->segment[i].mem; 199 mend = mstart + image->segment[i].memsz; 200 for (j = 0; j < i; j++) { 201 unsigned long pstart, pend; 202 pstart = image->segment[j].mem; 203 pend = pstart + image->segment[j].memsz; 204 /* Do the segments overlap ? */ 205 if ((mend > pstart) && (mstart < pend)) 206 goto out; 207 } 208 } 209 210 /* Ensure our buffer sizes are strictly less than 211 * our memory sizes. This should always be the case, 212 * and it is easier to check up front than to be surprised 213 * later on. 214 */ 215 result = -EINVAL; 216 for (i = 0; i < nr_segments; i++) { 217 if (image->segment[i].bufsz > image->segment[i].memsz) 218 goto out; 219 } 220 221 result = 0; 222 out: 223 if (result == 0) 224 *rimage = image; 225 else 226 kfree(image); 227 228 return result; 229 230 } 231 232 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 233 unsigned long nr_segments, 234 struct kexec_segment __user *segments) 235 { 236 int result; 237 struct kimage *image; 238 239 /* Allocate and initialize a controlling structure */ 240 image = NULL; 241 result = do_kimage_alloc(&image, entry, nr_segments, segments); 242 if (result) 243 goto out; 244 245 *rimage = image; 246 247 /* 248 * Find a location for the control code buffer, and add it 249 * the vector of segments so that it's pages will also be 250 * counted as destination pages. 251 */ 252 result = -ENOMEM; 253 image->control_code_page = kimage_alloc_control_pages(image, 254 get_order(KEXEC_CONTROL_PAGE_SIZE)); 255 if (!image->control_code_page) { 256 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 257 goto out; 258 } 259 260 image->swap_page = kimage_alloc_control_pages(image, 0); 261 if (!image->swap_page) { 262 printk(KERN_ERR "Could not allocate swap buffer\n"); 263 goto out; 264 } 265 266 result = 0; 267 out: 268 if (result == 0) 269 *rimage = image; 270 else 271 kfree(image); 272 273 return result; 274 } 275 276 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 277 unsigned long nr_segments, 278 struct kexec_segment __user *segments) 279 { 280 int result; 281 struct kimage *image; 282 unsigned long i; 283 284 image = NULL; 285 /* Verify we have a valid entry point */ 286 if ((entry < crashk_res.start) || (entry > crashk_res.end)) { 287 result = -EADDRNOTAVAIL; 288 goto out; 289 } 290 291 /* Allocate and initialize a controlling structure */ 292 result = do_kimage_alloc(&image, entry, nr_segments, segments); 293 if (result) 294 goto out; 295 296 /* Enable the special crash kernel control page 297 * allocation policy. 298 */ 299 image->control_page = crashk_res.start; 300 image->type = KEXEC_TYPE_CRASH; 301 302 /* 303 * Verify we have good destination addresses. Normally 304 * the caller is responsible for making certain we don't 305 * attempt to load the new image into invalid or reserved 306 * areas of RAM. But crash kernels are preloaded into a 307 * reserved area of ram. We must ensure the addresses 308 * are in the reserved area otherwise preloading the 309 * kernel could corrupt things. 310 */ 311 result = -EADDRNOTAVAIL; 312 for (i = 0; i < nr_segments; i++) { 313 unsigned long mstart, mend; 314 315 mstart = image->segment[i].mem; 316 mend = mstart + image->segment[i].memsz - 1; 317 /* Ensure we are within the crash kernel limits */ 318 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 319 goto out; 320 } 321 322 /* 323 * Find a location for the control code buffer, and add 324 * the vector of segments so that it's pages will also be 325 * counted as destination pages. 326 */ 327 result = -ENOMEM; 328 image->control_code_page = kimage_alloc_control_pages(image, 329 get_order(KEXEC_CONTROL_PAGE_SIZE)); 330 if (!image->control_code_page) { 331 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 332 goto out; 333 } 334 335 result = 0; 336 out: 337 if (result == 0) 338 *rimage = image; 339 else 340 kfree(image); 341 342 return result; 343 } 344 345 static int kimage_is_destination_range(struct kimage *image, 346 unsigned long start, 347 unsigned long end) 348 { 349 unsigned long i; 350 351 for (i = 0; i < image->nr_segments; i++) { 352 unsigned long mstart, mend; 353 354 mstart = image->segment[i].mem; 355 mend = mstart + image->segment[i].memsz; 356 if ((end > mstart) && (start < mend)) 357 return 1; 358 } 359 360 return 0; 361 } 362 363 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) 364 { 365 struct page *pages; 366 367 pages = alloc_pages(gfp_mask, order); 368 if (pages) { 369 unsigned int count, i; 370 pages->mapping = NULL; 371 set_page_private(pages, order); 372 count = 1 << order; 373 for (i = 0; i < count; i++) 374 SetPageReserved(pages + i); 375 } 376 377 return pages; 378 } 379 380 static void kimage_free_pages(struct page *page) 381 { 382 unsigned int order, count, i; 383 384 order = page_private(page); 385 count = 1 << order; 386 for (i = 0; i < count; i++) 387 ClearPageReserved(page + i); 388 __free_pages(page, order); 389 } 390 391 static void kimage_free_page_list(struct list_head *list) 392 { 393 struct list_head *pos, *next; 394 395 list_for_each_safe(pos, next, list) { 396 struct page *page; 397 398 page = list_entry(pos, struct page, lru); 399 list_del(&page->lru); 400 kimage_free_pages(page); 401 } 402 } 403 404 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 405 unsigned int order) 406 { 407 /* Control pages are special, they are the intermediaries 408 * that are needed while we copy the rest of the pages 409 * to their final resting place. As such they must 410 * not conflict with either the destination addresses 411 * or memory the kernel is already using. 412 * 413 * The only case where we really need more than one of 414 * these are for architectures where we cannot disable 415 * the MMU and must instead generate an identity mapped 416 * page table for all of the memory. 417 * 418 * At worst this runs in O(N) of the image size. 419 */ 420 struct list_head extra_pages; 421 struct page *pages; 422 unsigned int count; 423 424 count = 1 << order; 425 INIT_LIST_HEAD(&extra_pages); 426 427 /* Loop while I can allocate a page and the page allocated 428 * is a destination page. 429 */ 430 do { 431 unsigned long pfn, epfn, addr, eaddr; 432 433 pages = kimage_alloc_pages(GFP_KERNEL, order); 434 if (!pages) 435 break; 436 pfn = page_to_pfn(pages); 437 epfn = pfn + count; 438 addr = pfn << PAGE_SHIFT; 439 eaddr = epfn << PAGE_SHIFT; 440 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 441 kimage_is_destination_range(image, addr, eaddr)) { 442 list_add(&pages->lru, &extra_pages); 443 pages = NULL; 444 } 445 } while (!pages); 446 447 if (pages) { 448 /* Remember the allocated page... */ 449 list_add(&pages->lru, &image->control_pages); 450 451 /* Because the page is already in it's destination 452 * location we will never allocate another page at 453 * that address. Therefore kimage_alloc_pages 454 * will not return it (again) and we don't need 455 * to give it an entry in image->segment[]. 456 */ 457 } 458 /* Deal with the destination pages I have inadvertently allocated. 459 * 460 * Ideally I would convert multi-page allocations into single 461 * page allocations, and add everything to image->dest_pages. 462 * 463 * For now it is simpler to just free the pages. 464 */ 465 kimage_free_page_list(&extra_pages); 466 467 return pages; 468 } 469 470 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 471 unsigned int order) 472 { 473 /* Control pages are special, they are the intermediaries 474 * that are needed while we copy the rest of the pages 475 * to their final resting place. As such they must 476 * not conflict with either the destination addresses 477 * or memory the kernel is already using. 478 * 479 * Control pages are also the only pags we must allocate 480 * when loading a crash kernel. All of the other pages 481 * are specified by the segments and we just memcpy 482 * into them directly. 483 * 484 * The only case where we really need more than one of 485 * these are for architectures where we cannot disable 486 * the MMU and must instead generate an identity mapped 487 * page table for all of the memory. 488 * 489 * Given the low demand this implements a very simple 490 * allocator that finds the first hole of the appropriate 491 * size in the reserved memory region, and allocates all 492 * of the memory up to and including the hole. 493 */ 494 unsigned long hole_start, hole_end, size; 495 struct page *pages; 496 497 pages = NULL; 498 size = (1 << order) << PAGE_SHIFT; 499 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 500 hole_end = hole_start + size - 1; 501 while (hole_end <= crashk_res.end) { 502 unsigned long i; 503 504 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 505 break; 506 /* See if I overlap any of the segments */ 507 for (i = 0; i < image->nr_segments; i++) { 508 unsigned long mstart, mend; 509 510 mstart = image->segment[i].mem; 511 mend = mstart + image->segment[i].memsz - 1; 512 if ((hole_end >= mstart) && (hole_start <= mend)) { 513 /* Advance the hole to the end of the segment */ 514 hole_start = (mend + (size - 1)) & ~(size - 1); 515 hole_end = hole_start + size - 1; 516 break; 517 } 518 } 519 /* If I don't overlap any segments I have found my hole! */ 520 if (i == image->nr_segments) { 521 pages = pfn_to_page(hole_start >> PAGE_SHIFT); 522 break; 523 } 524 } 525 if (pages) 526 image->control_page = hole_end; 527 528 return pages; 529 } 530 531 532 struct page *kimage_alloc_control_pages(struct kimage *image, 533 unsigned int order) 534 { 535 struct page *pages = NULL; 536 537 switch (image->type) { 538 case KEXEC_TYPE_DEFAULT: 539 pages = kimage_alloc_normal_control_pages(image, order); 540 break; 541 case KEXEC_TYPE_CRASH: 542 pages = kimage_alloc_crash_control_pages(image, order); 543 break; 544 } 545 546 return pages; 547 } 548 549 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 550 { 551 if (*image->entry != 0) 552 image->entry++; 553 554 if (image->entry == image->last_entry) { 555 kimage_entry_t *ind_page; 556 struct page *page; 557 558 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 559 if (!page) 560 return -ENOMEM; 561 562 ind_page = page_address(page); 563 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 564 image->entry = ind_page; 565 image->last_entry = ind_page + 566 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 567 } 568 *image->entry = entry; 569 image->entry++; 570 *image->entry = 0; 571 572 return 0; 573 } 574 575 static int kimage_set_destination(struct kimage *image, 576 unsigned long destination) 577 { 578 int result; 579 580 destination &= PAGE_MASK; 581 result = kimage_add_entry(image, destination | IND_DESTINATION); 582 if (result == 0) 583 image->destination = destination; 584 585 return result; 586 } 587 588 589 static int kimage_add_page(struct kimage *image, unsigned long page) 590 { 591 int result; 592 593 page &= PAGE_MASK; 594 result = kimage_add_entry(image, page | IND_SOURCE); 595 if (result == 0) 596 image->destination += PAGE_SIZE; 597 598 return result; 599 } 600 601 602 static void kimage_free_extra_pages(struct kimage *image) 603 { 604 /* Walk through and free any extra destination pages I may have */ 605 kimage_free_page_list(&image->dest_pages); 606 607 /* Walk through and free any unusable pages I have cached */ 608 kimage_free_page_list(&image->unuseable_pages); 609 610 } 611 static void kimage_terminate(struct kimage *image) 612 { 613 if (*image->entry != 0) 614 image->entry++; 615 616 *image->entry = IND_DONE; 617 } 618 619 #define for_each_kimage_entry(image, ptr, entry) \ 620 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 621 ptr = (entry & IND_INDIRECTION)? \ 622 phys_to_virt((entry & PAGE_MASK)): ptr +1) 623 624 static void kimage_free_entry(kimage_entry_t entry) 625 { 626 struct page *page; 627 628 page = pfn_to_page(entry >> PAGE_SHIFT); 629 kimage_free_pages(page); 630 } 631 632 static void kimage_free(struct kimage *image) 633 { 634 kimage_entry_t *ptr, entry; 635 kimage_entry_t ind = 0; 636 637 if (!image) 638 return; 639 640 kimage_free_extra_pages(image); 641 for_each_kimage_entry(image, ptr, entry) { 642 if (entry & IND_INDIRECTION) { 643 /* Free the previous indirection page */ 644 if (ind & IND_INDIRECTION) 645 kimage_free_entry(ind); 646 /* Save this indirection page until we are 647 * done with it. 648 */ 649 ind = entry; 650 } 651 else if (entry & IND_SOURCE) 652 kimage_free_entry(entry); 653 } 654 /* Free the final indirection page */ 655 if (ind & IND_INDIRECTION) 656 kimage_free_entry(ind); 657 658 /* Handle any machine specific cleanup */ 659 machine_kexec_cleanup(image); 660 661 /* Free the kexec control pages... */ 662 kimage_free_page_list(&image->control_pages); 663 kfree(image); 664 } 665 666 static kimage_entry_t *kimage_dst_used(struct kimage *image, 667 unsigned long page) 668 { 669 kimage_entry_t *ptr, entry; 670 unsigned long destination = 0; 671 672 for_each_kimage_entry(image, ptr, entry) { 673 if (entry & IND_DESTINATION) 674 destination = entry & PAGE_MASK; 675 else if (entry & IND_SOURCE) { 676 if (page == destination) 677 return ptr; 678 destination += PAGE_SIZE; 679 } 680 } 681 682 return NULL; 683 } 684 685 static struct page *kimage_alloc_page(struct kimage *image, 686 gfp_t gfp_mask, 687 unsigned long destination) 688 { 689 /* 690 * Here we implement safeguards to ensure that a source page 691 * is not copied to its destination page before the data on 692 * the destination page is no longer useful. 693 * 694 * To do this we maintain the invariant that a source page is 695 * either its own destination page, or it is not a 696 * destination page at all. 697 * 698 * That is slightly stronger than required, but the proof 699 * that no problems will not occur is trivial, and the 700 * implementation is simply to verify. 701 * 702 * When allocating all pages normally this algorithm will run 703 * in O(N) time, but in the worst case it will run in O(N^2) 704 * time. If the runtime is a problem the data structures can 705 * be fixed. 706 */ 707 struct page *page; 708 unsigned long addr; 709 710 /* 711 * Walk through the list of destination pages, and see if I 712 * have a match. 713 */ 714 list_for_each_entry(page, &image->dest_pages, lru) { 715 addr = page_to_pfn(page) << PAGE_SHIFT; 716 if (addr == destination) { 717 list_del(&page->lru); 718 return page; 719 } 720 } 721 page = NULL; 722 while (1) { 723 kimage_entry_t *old; 724 725 /* Allocate a page, if we run out of memory give up */ 726 page = kimage_alloc_pages(gfp_mask, 0); 727 if (!page) 728 return NULL; 729 /* If the page cannot be used file it away */ 730 if (page_to_pfn(page) > 731 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 732 list_add(&page->lru, &image->unuseable_pages); 733 continue; 734 } 735 addr = page_to_pfn(page) << PAGE_SHIFT; 736 737 /* If it is the destination page we want use it */ 738 if (addr == destination) 739 break; 740 741 /* If the page is not a destination page use it */ 742 if (!kimage_is_destination_range(image, addr, 743 addr + PAGE_SIZE)) 744 break; 745 746 /* 747 * I know that the page is someones destination page. 748 * See if there is already a source page for this 749 * destination page. And if so swap the source pages. 750 */ 751 old = kimage_dst_used(image, addr); 752 if (old) { 753 /* If so move it */ 754 unsigned long old_addr; 755 struct page *old_page; 756 757 old_addr = *old & PAGE_MASK; 758 old_page = pfn_to_page(old_addr >> PAGE_SHIFT); 759 copy_highpage(page, old_page); 760 *old = addr | (*old & ~PAGE_MASK); 761 762 /* The old page I have found cannot be a 763 * destination page, so return it if it's 764 * gfp_flags honor the ones passed in. 765 */ 766 if (!(gfp_mask & __GFP_HIGHMEM) && 767 PageHighMem(old_page)) { 768 kimage_free_pages(old_page); 769 continue; 770 } 771 addr = old_addr; 772 page = old_page; 773 break; 774 } 775 else { 776 /* Place the page on the destination list I 777 * will use it later. 778 */ 779 list_add(&page->lru, &image->dest_pages); 780 } 781 } 782 783 return page; 784 } 785 786 static int kimage_load_normal_segment(struct kimage *image, 787 struct kexec_segment *segment) 788 { 789 unsigned long maddr; 790 unsigned long ubytes, mbytes; 791 int result; 792 unsigned char __user *buf; 793 794 result = 0; 795 buf = segment->buf; 796 ubytes = segment->bufsz; 797 mbytes = segment->memsz; 798 maddr = segment->mem; 799 800 result = kimage_set_destination(image, maddr); 801 if (result < 0) 802 goto out; 803 804 while (mbytes) { 805 struct page *page; 806 char *ptr; 807 size_t uchunk, mchunk; 808 809 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 810 if (!page) { 811 result = -ENOMEM; 812 goto out; 813 } 814 result = kimage_add_page(image, page_to_pfn(page) 815 << PAGE_SHIFT); 816 if (result < 0) 817 goto out; 818 819 ptr = kmap(page); 820 /* Start with a clear page */ 821 clear_page(ptr); 822 ptr += maddr & ~PAGE_MASK; 823 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 824 if (mchunk > mbytes) 825 mchunk = mbytes; 826 827 uchunk = mchunk; 828 if (uchunk > ubytes) 829 uchunk = ubytes; 830 831 result = copy_from_user(ptr, buf, uchunk); 832 kunmap(page); 833 if (result) { 834 result = -EFAULT; 835 goto out; 836 } 837 ubytes -= uchunk; 838 maddr += mchunk; 839 buf += mchunk; 840 mbytes -= mchunk; 841 } 842 out: 843 return result; 844 } 845 846 static int kimage_load_crash_segment(struct kimage *image, 847 struct kexec_segment *segment) 848 { 849 /* For crash dumps kernels we simply copy the data from 850 * user space to it's destination. 851 * We do things a page at a time for the sake of kmap. 852 */ 853 unsigned long maddr; 854 unsigned long ubytes, mbytes; 855 int result; 856 unsigned char __user *buf; 857 858 result = 0; 859 buf = segment->buf; 860 ubytes = segment->bufsz; 861 mbytes = segment->memsz; 862 maddr = segment->mem; 863 while (mbytes) { 864 struct page *page; 865 char *ptr; 866 size_t uchunk, mchunk; 867 868 page = pfn_to_page(maddr >> PAGE_SHIFT); 869 if (!page) { 870 result = -ENOMEM; 871 goto out; 872 } 873 ptr = kmap(page); 874 ptr += maddr & ~PAGE_MASK; 875 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 876 if (mchunk > mbytes) 877 mchunk = mbytes; 878 879 uchunk = mchunk; 880 if (uchunk > ubytes) { 881 uchunk = ubytes; 882 /* Zero the trailing part of the page */ 883 memset(ptr + uchunk, 0, mchunk - uchunk); 884 } 885 result = copy_from_user(ptr, buf, uchunk); 886 kexec_flush_icache_page(page); 887 kunmap(page); 888 if (result) { 889 result = -EFAULT; 890 goto out; 891 } 892 ubytes -= uchunk; 893 maddr += mchunk; 894 buf += mchunk; 895 mbytes -= mchunk; 896 } 897 out: 898 return result; 899 } 900 901 static int kimage_load_segment(struct kimage *image, 902 struct kexec_segment *segment) 903 { 904 int result = -ENOMEM; 905 906 switch (image->type) { 907 case KEXEC_TYPE_DEFAULT: 908 result = kimage_load_normal_segment(image, segment); 909 break; 910 case KEXEC_TYPE_CRASH: 911 result = kimage_load_crash_segment(image, segment); 912 break; 913 } 914 915 return result; 916 } 917 918 /* 919 * Exec Kernel system call: for obvious reasons only root may call it. 920 * 921 * This call breaks up into three pieces. 922 * - A generic part which loads the new kernel from the current 923 * address space, and very carefully places the data in the 924 * allocated pages. 925 * 926 * - A generic part that interacts with the kernel and tells all of 927 * the devices to shut down. Preventing on-going dmas, and placing 928 * the devices in a consistent state so a later kernel can 929 * reinitialize them. 930 * 931 * - A machine specific part that includes the syscall number 932 * and the copies the image to it's final destination. And 933 * jumps into the image at entry. 934 * 935 * kexec does not sync, or unmount filesystems so if you need 936 * that to happen you need to do that yourself. 937 */ 938 struct kimage *kexec_image; 939 struct kimage *kexec_crash_image; 940 941 static DEFINE_MUTEX(kexec_mutex); 942 943 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, 944 struct kexec_segment __user *, segments, unsigned long, flags) 945 { 946 struct kimage **dest_image, *image; 947 int result; 948 949 /* We only trust the superuser with rebooting the system. */ 950 if (!capable(CAP_SYS_BOOT)) 951 return -EPERM; 952 953 /* 954 * Verify we have a legal set of flags 955 * This leaves us room for future extensions. 956 */ 957 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) 958 return -EINVAL; 959 960 /* Verify we are on the appropriate architecture */ 961 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && 962 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) 963 return -EINVAL; 964 965 /* Put an artificial cap on the number 966 * of segments passed to kexec_load. 967 */ 968 if (nr_segments > KEXEC_SEGMENT_MAX) 969 return -EINVAL; 970 971 image = NULL; 972 result = 0; 973 974 /* Because we write directly to the reserved memory 975 * region when loading crash kernels we need a mutex here to 976 * prevent multiple crash kernels from attempting to load 977 * simultaneously, and to prevent a crash kernel from loading 978 * over the top of a in use crash kernel. 979 * 980 * KISS: always take the mutex. 981 */ 982 if (!mutex_trylock(&kexec_mutex)) 983 return -EBUSY; 984 985 dest_image = &kexec_image; 986 if (flags & KEXEC_ON_CRASH) 987 dest_image = &kexec_crash_image; 988 if (nr_segments > 0) { 989 unsigned long i; 990 991 /* Loading another kernel to reboot into */ 992 if ((flags & KEXEC_ON_CRASH) == 0) 993 result = kimage_normal_alloc(&image, entry, 994 nr_segments, segments); 995 /* Loading another kernel to switch to if this one crashes */ 996 else if (flags & KEXEC_ON_CRASH) { 997 /* Free any current crash dump kernel before 998 * we corrupt it. 999 */ 1000 kimage_free(xchg(&kexec_crash_image, NULL)); 1001 result = kimage_crash_alloc(&image, entry, 1002 nr_segments, segments); 1003 crash_map_reserved_pages(); 1004 } 1005 if (result) 1006 goto out; 1007 1008 if (flags & KEXEC_PRESERVE_CONTEXT) 1009 image->preserve_context = 1; 1010 result = machine_kexec_prepare(image); 1011 if (result) 1012 goto out; 1013 1014 for (i = 0; i < nr_segments; i++) { 1015 result = kimage_load_segment(image, &image->segment[i]); 1016 if (result) 1017 goto out; 1018 } 1019 kimage_terminate(image); 1020 if (flags & KEXEC_ON_CRASH) 1021 crash_unmap_reserved_pages(); 1022 } 1023 /* Install the new kernel, and Uninstall the old */ 1024 image = xchg(dest_image, image); 1025 1026 out: 1027 mutex_unlock(&kexec_mutex); 1028 kimage_free(image); 1029 1030 return result; 1031 } 1032 1033 /* 1034 * Add and remove page tables for crashkernel memory 1035 * 1036 * Provide an empty default implementation here -- architecture 1037 * code may override this 1038 */ 1039 void __weak crash_map_reserved_pages(void) 1040 {} 1041 1042 void __weak crash_unmap_reserved_pages(void) 1043 {} 1044 1045 #ifdef CONFIG_COMPAT 1046 asmlinkage long compat_sys_kexec_load(unsigned long entry, 1047 unsigned long nr_segments, 1048 struct compat_kexec_segment __user *segments, 1049 unsigned long flags) 1050 { 1051 struct compat_kexec_segment in; 1052 struct kexec_segment out, __user *ksegments; 1053 unsigned long i, result; 1054 1055 /* Don't allow clients that don't understand the native 1056 * architecture to do anything. 1057 */ 1058 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) 1059 return -EINVAL; 1060 1061 if (nr_segments > KEXEC_SEGMENT_MAX) 1062 return -EINVAL; 1063 1064 ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); 1065 for (i=0; i < nr_segments; i++) { 1066 result = copy_from_user(&in, &segments[i], sizeof(in)); 1067 if (result) 1068 return -EFAULT; 1069 1070 out.buf = compat_ptr(in.buf); 1071 out.bufsz = in.bufsz; 1072 out.mem = in.mem; 1073 out.memsz = in.memsz; 1074 1075 result = copy_to_user(&ksegments[i], &out, sizeof(out)); 1076 if (result) 1077 return -EFAULT; 1078 } 1079 1080 return sys_kexec_load(entry, nr_segments, ksegments, flags); 1081 } 1082 #endif 1083 1084 void crash_kexec(struct pt_regs *regs) 1085 { 1086 /* Take the kexec_mutex here to prevent sys_kexec_load 1087 * running on one cpu from replacing the crash kernel 1088 * we are using after a panic on a different cpu. 1089 * 1090 * If the crash kernel was not located in a fixed area 1091 * of memory the xchg(&kexec_crash_image) would be 1092 * sufficient. But since I reuse the memory... 1093 */ 1094 if (mutex_trylock(&kexec_mutex)) { 1095 if (kexec_crash_image) { 1096 struct pt_regs fixed_regs; 1097 1098 crash_setup_regs(&fixed_regs, regs); 1099 crash_save_vmcoreinfo(); 1100 machine_crash_shutdown(&fixed_regs); 1101 machine_kexec(kexec_crash_image); 1102 } 1103 mutex_unlock(&kexec_mutex); 1104 } 1105 } 1106 1107 size_t crash_get_memory_size(void) 1108 { 1109 size_t size = 0; 1110 mutex_lock(&kexec_mutex); 1111 if (crashk_res.end != crashk_res.start) 1112 size = resource_size(&crashk_res); 1113 mutex_unlock(&kexec_mutex); 1114 return size; 1115 } 1116 1117 void __weak crash_free_reserved_phys_range(unsigned long begin, 1118 unsigned long end) 1119 { 1120 unsigned long addr; 1121 1122 for (addr = begin; addr < end; addr += PAGE_SIZE) { 1123 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); 1124 init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); 1125 free_page((unsigned long)__va(addr)); 1126 totalram_pages++; 1127 } 1128 } 1129 1130 int crash_shrink_memory(unsigned long new_size) 1131 { 1132 int ret = 0; 1133 unsigned long start, end; 1134 unsigned long old_size; 1135 struct resource *ram_res; 1136 1137 mutex_lock(&kexec_mutex); 1138 1139 if (kexec_crash_image) { 1140 ret = -ENOENT; 1141 goto unlock; 1142 } 1143 start = crashk_res.start; 1144 end = crashk_res.end; 1145 old_size = (end == 0) ? 0 : end - start + 1; 1146 if (new_size >= old_size) { 1147 ret = (new_size == old_size) ? 0 : -EINVAL; 1148 goto unlock; 1149 } 1150 1151 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); 1152 if (!ram_res) { 1153 ret = -ENOMEM; 1154 goto unlock; 1155 } 1156 1157 start = roundup(start, KEXEC_CRASH_MEM_ALIGN); 1158 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); 1159 1160 crash_map_reserved_pages(); 1161 crash_free_reserved_phys_range(end, crashk_res.end); 1162 1163 if ((start == end) && (crashk_res.parent != NULL)) 1164 release_resource(&crashk_res); 1165 1166 ram_res->start = end; 1167 ram_res->end = crashk_res.end; 1168 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 1169 ram_res->name = "System RAM"; 1170 1171 crashk_res.end = end - 1; 1172 1173 insert_resource(&iomem_resource, ram_res); 1174 crash_unmap_reserved_pages(); 1175 1176 unlock: 1177 mutex_unlock(&kexec_mutex); 1178 return ret; 1179 } 1180 1181 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, 1182 size_t data_len) 1183 { 1184 struct elf_note note; 1185 1186 note.n_namesz = strlen(name) + 1; 1187 note.n_descsz = data_len; 1188 note.n_type = type; 1189 memcpy(buf, ¬e, sizeof(note)); 1190 buf += (sizeof(note) + 3)/4; 1191 memcpy(buf, name, note.n_namesz); 1192 buf += (note.n_namesz + 3)/4; 1193 memcpy(buf, data, note.n_descsz); 1194 buf += (note.n_descsz + 3)/4; 1195 1196 return buf; 1197 } 1198 1199 static void final_note(u32 *buf) 1200 { 1201 struct elf_note note; 1202 1203 note.n_namesz = 0; 1204 note.n_descsz = 0; 1205 note.n_type = 0; 1206 memcpy(buf, ¬e, sizeof(note)); 1207 } 1208 1209 void crash_save_cpu(struct pt_regs *regs, int cpu) 1210 { 1211 struct elf_prstatus prstatus; 1212 u32 *buf; 1213 1214 if ((cpu < 0) || (cpu >= nr_cpu_ids)) 1215 return; 1216 1217 /* Using ELF notes here is opportunistic. 1218 * I need a well defined structure format 1219 * for the data I pass, and I need tags 1220 * on the data to indicate what information I have 1221 * squirrelled away. ELF notes happen to provide 1222 * all of that, so there is no need to invent something new. 1223 */ 1224 buf = (u32*)per_cpu_ptr(crash_notes, cpu); 1225 if (!buf) 1226 return; 1227 memset(&prstatus, 0, sizeof(prstatus)); 1228 prstatus.pr_pid = current->pid; 1229 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); 1230 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, 1231 &prstatus, sizeof(prstatus)); 1232 final_note(buf); 1233 } 1234 1235 static int __init crash_notes_memory_init(void) 1236 { 1237 /* Allocate memory for saving cpu registers. */ 1238 crash_notes = alloc_percpu(note_buf_t); 1239 if (!crash_notes) { 1240 printk("Kexec: Memory allocation for saving cpu register" 1241 " states failed\n"); 1242 return -ENOMEM; 1243 } 1244 return 0; 1245 } 1246 module_init(crash_notes_memory_init) 1247 1248 1249 /* 1250 * parsing the "crashkernel" commandline 1251 * 1252 * this code is intended to be called from architecture specific code 1253 */ 1254 1255 1256 /* 1257 * This function parses command lines in the format 1258 * 1259 * crashkernel=ramsize-range:size[,...][@offset] 1260 * 1261 * The function returns 0 on success and -EINVAL on failure. 1262 */ 1263 static int __init parse_crashkernel_mem(char *cmdline, 1264 unsigned long long system_ram, 1265 unsigned long long *crash_size, 1266 unsigned long long *crash_base) 1267 { 1268 char *cur = cmdline, *tmp; 1269 1270 /* for each entry of the comma-separated list */ 1271 do { 1272 unsigned long long start, end = ULLONG_MAX, size; 1273 1274 /* get the start of the range */ 1275 start = memparse(cur, &tmp); 1276 if (cur == tmp) { 1277 pr_warning("crashkernel: Memory value expected\n"); 1278 return -EINVAL; 1279 } 1280 cur = tmp; 1281 if (*cur != '-') { 1282 pr_warning("crashkernel: '-' expected\n"); 1283 return -EINVAL; 1284 } 1285 cur++; 1286 1287 /* if no ':' is here, than we read the end */ 1288 if (*cur != ':') { 1289 end = memparse(cur, &tmp); 1290 if (cur == tmp) { 1291 pr_warning("crashkernel: Memory " 1292 "value expected\n"); 1293 return -EINVAL; 1294 } 1295 cur = tmp; 1296 if (end <= start) { 1297 pr_warning("crashkernel: end <= start\n"); 1298 return -EINVAL; 1299 } 1300 } 1301 1302 if (*cur != ':') { 1303 pr_warning("crashkernel: ':' expected\n"); 1304 return -EINVAL; 1305 } 1306 cur++; 1307 1308 size = memparse(cur, &tmp); 1309 if (cur == tmp) { 1310 pr_warning("Memory value expected\n"); 1311 return -EINVAL; 1312 } 1313 cur = tmp; 1314 if (size >= system_ram) { 1315 pr_warning("crashkernel: invalid size\n"); 1316 return -EINVAL; 1317 } 1318 1319 /* match ? */ 1320 if (system_ram >= start && system_ram < end) { 1321 *crash_size = size; 1322 break; 1323 } 1324 } while (*cur++ == ','); 1325 1326 if (*crash_size > 0) { 1327 while (*cur && *cur != ' ' && *cur != '@') 1328 cur++; 1329 if (*cur == '@') { 1330 cur++; 1331 *crash_base = memparse(cur, &tmp); 1332 if (cur == tmp) { 1333 pr_warning("Memory value expected " 1334 "after '@'\n"); 1335 return -EINVAL; 1336 } 1337 } 1338 } 1339 1340 return 0; 1341 } 1342 1343 /* 1344 * That function parses "simple" (old) crashkernel command lines like 1345 * 1346 * crashkernel=size[@offset] 1347 * 1348 * It returns 0 on success and -EINVAL on failure. 1349 */ 1350 static int __init parse_crashkernel_simple(char *cmdline, 1351 unsigned long long *crash_size, 1352 unsigned long long *crash_base) 1353 { 1354 char *cur = cmdline; 1355 1356 *crash_size = memparse(cmdline, &cur); 1357 if (cmdline == cur) { 1358 pr_warning("crashkernel: memory value expected\n"); 1359 return -EINVAL; 1360 } 1361 1362 if (*cur == '@') 1363 *crash_base = memparse(cur+1, &cur); 1364 else if (*cur != ' ' && *cur != '\0') { 1365 pr_warning("crashkernel: unrecognized char\n"); 1366 return -EINVAL; 1367 } 1368 1369 return 0; 1370 } 1371 1372 /* 1373 * That function is the entry point for command line parsing and should be 1374 * called from the arch-specific code. 1375 */ 1376 static int __init __parse_crashkernel(char *cmdline, 1377 unsigned long long system_ram, 1378 unsigned long long *crash_size, 1379 unsigned long long *crash_base, 1380 const char *name) 1381 { 1382 char *p = cmdline, *ck_cmdline = NULL; 1383 char *first_colon, *first_space; 1384 1385 BUG_ON(!crash_size || !crash_base); 1386 *crash_size = 0; 1387 *crash_base = 0; 1388 1389 /* find crashkernel and use the last one if there are more */ 1390 p = strstr(p, name); 1391 while (p) { 1392 ck_cmdline = p; 1393 p = strstr(p+1, name); 1394 } 1395 1396 if (!ck_cmdline) 1397 return -EINVAL; 1398 1399 ck_cmdline += strlen(name); 1400 1401 /* 1402 * if the commandline contains a ':', then that's the extended 1403 * syntax -- if not, it must be the classic syntax 1404 */ 1405 first_colon = strchr(ck_cmdline, ':'); 1406 first_space = strchr(ck_cmdline, ' '); 1407 if (first_colon && (!first_space || first_colon < first_space)) 1408 return parse_crashkernel_mem(ck_cmdline, system_ram, 1409 crash_size, crash_base); 1410 else 1411 return parse_crashkernel_simple(ck_cmdline, crash_size, 1412 crash_base); 1413 1414 return 0; 1415 } 1416 1417 int __init parse_crashkernel(char *cmdline, 1418 unsigned long long system_ram, 1419 unsigned long long *crash_size, 1420 unsigned long long *crash_base) 1421 { 1422 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, 1423 "crashkernel="); 1424 } 1425 1426 int __init parse_crashkernel_low(char *cmdline, 1427 unsigned long long system_ram, 1428 unsigned long long *crash_size, 1429 unsigned long long *crash_base) 1430 { 1431 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, 1432 "crashkernel_low="); 1433 } 1434 1435 static void update_vmcoreinfo_note(void) 1436 { 1437 u32 *buf = vmcoreinfo_note; 1438 1439 if (!vmcoreinfo_size) 1440 return; 1441 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, 1442 vmcoreinfo_size); 1443 final_note(buf); 1444 } 1445 1446 void crash_save_vmcoreinfo(void) 1447 { 1448 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); 1449 update_vmcoreinfo_note(); 1450 } 1451 1452 void vmcoreinfo_append_str(const char *fmt, ...) 1453 { 1454 va_list args; 1455 char buf[0x50]; 1456 int r; 1457 1458 va_start(args, fmt); 1459 r = vsnprintf(buf, sizeof(buf), fmt, args); 1460 va_end(args); 1461 1462 if (r + vmcoreinfo_size > vmcoreinfo_max_size) 1463 r = vmcoreinfo_max_size - vmcoreinfo_size; 1464 1465 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); 1466 1467 vmcoreinfo_size += r; 1468 } 1469 1470 /* 1471 * provide an empty default implementation here -- architecture 1472 * code may override this 1473 */ 1474 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) 1475 {} 1476 1477 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) 1478 { 1479 return __pa((unsigned long)(char *)&vmcoreinfo_note); 1480 } 1481 1482 static int __init crash_save_vmcoreinfo_init(void) 1483 { 1484 VMCOREINFO_OSRELEASE(init_uts_ns.name.release); 1485 VMCOREINFO_PAGESIZE(PAGE_SIZE); 1486 1487 VMCOREINFO_SYMBOL(init_uts_ns); 1488 VMCOREINFO_SYMBOL(node_online_map); 1489 #ifdef CONFIG_MMU 1490 VMCOREINFO_SYMBOL(swapper_pg_dir); 1491 #endif 1492 VMCOREINFO_SYMBOL(_stext); 1493 VMCOREINFO_SYMBOL(vmlist); 1494 1495 #ifndef CONFIG_NEED_MULTIPLE_NODES 1496 VMCOREINFO_SYMBOL(mem_map); 1497 VMCOREINFO_SYMBOL(contig_page_data); 1498 #endif 1499 #ifdef CONFIG_SPARSEMEM 1500 VMCOREINFO_SYMBOL(mem_section); 1501 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); 1502 VMCOREINFO_STRUCT_SIZE(mem_section); 1503 VMCOREINFO_OFFSET(mem_section, section_mem_map); 1504 #endif 1505 VMCOREINFO_STRUCT_SIZE(page); 1506 VMCOREINFO_STRUCT_SIZE(pglist_data); 1507 VMCOREINFO_STRUCT_SIZE(zone); 1508 VMCOREINFO_STRUCT_SIZE(free_area); 1509 VMCOREINFO_STRUCT_SIZE(list_head); 1510 VMCOREINFO_SIZE(nodemask_t); 1511 VMCOREINFO_OFFSET(page, flags); 1512 VMCOREINFO_OFFSET(page, _count); 1513 VMCOREINFO_OFFSET(page, mapping); 1514 VMCOREINFO_OFFSET(page, lru); 1515 VMCOREINFO_OFFSET(page, _mapcount); 1516 VMCOREINFO_OFFSET(page, private); 1517 VMCOREINFO_OFFSET(pglist_data, node_zones); 1518 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1519 #ifdef CONFIG_FLAT_NODE_MEM_MAP 1520 VMCOREINFO_OFFSET(pglist_data, node_mem_map); 1521 #endif 1522 VMCOREINFO_OFFSET(pglist_data, node_start_pfn); 1523 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); 1524 VMCOREINFO_OFFSET(pglist_data, node_id); 1525 VMCOREINFO_OFFSET(zone, free_area); 1526 VMCOREINFO_OFFSET(zone, vm_stat); 1527 VMCOREINFO_OFFSET(zone, spanned_pages); 1528 VMCOREINFO_OFFSET(free_area, free_list); 1529 VMCOREINFO_OFFSET(list_head, next); 1530 VMCOREINFO_OFFSET(list_head, prev); 1531 VMCOREINFO_OFFSET(vm_struct, addr); 1532 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); 1533 log_buf_kexec_setup(); 1534 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); 1535 VMCOREINFO_NUMBER(NR_FREE_PAGES); 1536 VMCOREINFO_NUMBER(PG_lru); 1537 VMCOREINFO_NUMBER(PG_private); 1538 VMCOREINFO_NUMBER(PG_swapcache); 1539 VMCOREINFO_NUMBER(PG_slab); 1540 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); 1541 1542 arch_crash_save_vmcoreinfo(); 1543 update_vmcoreinfo_note(); 1544 1545 return 0; 1546 } 1547 1548 module_init(crash_save_vmcoreinfo_init) 1549 1550 /* 1551 * Move into place and start executing a preloaded standalone 1552 * executable. If nothing was preloaded return an error. 1553 */ 1554 int kernel_kexec(void) 1555 { 1556 int error = 0; 1557 1558 if (!mutex_trylock(&kexec_mutex)) 1559 return -EBUSY; 1560 if (!kexec_image) { 1561 error = -EINVAL; 1562 goto Unlock; 1563 } 1564 1565 #ifdef CONFIG_KEXEC_JUMP 1566 if (kexec_image->preserve_context) { 1567 lock_system_sleep(); 1568 pm_prepare_console(); 1569 error = freeze_processes(); 1570 if (error) { 1571 error = -EBUSY; 1572 goto Restore_console; 1573 } 1574 suspend_console(); 1575 error = dpm_suspend_start(PMSG_FREEZE); 1576 if (error) 1577 goto Resume_console; 1578 /* At this point, dpm_suspend_start() has been called, 1579 * but *not* dpm_suspend_end(). We *must* call 1580 * dpm_suspend_end() now. Otherwise, drivers for 1581 * some devices (e.g. interrupt controllers) become 1582 * desynchronized with the actual state of the 1583 * hardware at resume time, and evil weirdness ensues. 1584 */ 1585 error = dpm_suspend_end(PMSG_FREEZE); 1586 if (error) 1587 goto Resume_devices; 1588 error = disable_nonboot_cpus(); 1589 if (error) 1590 goto Enable_cpus; 1591 local_irq_disable(); 1592 error = syscore_suspend(); 1593 if (error) 1594 goto Enable_irqs; 1595 } else 1596 #endif 1597 { 1598 kernel_restart_prepare(NULL); 1599 printk(KERN_EMERG "Starting new kernel\n"); 1600 machine_shutdown(); 1601 } 1602 1603 machine_kexec(kexec_image); 1604 1605 #ifdef CONFIG_KEXEC_JUMP 1606 if (kexec_image->preserve_context) { 1607 syscore_resume(); 1608 Enable_irqs: 1609 local_irq_enable(); 1610 Enable_cpus: 1611 enable_nonboot_cpus(); 1612 dpm_resume_start(PMSG_RESTORE); 1613 Resume_devices: 1614 dpm_resume_end(PMSG_RESTORE); 1615 Resume_console: 1616 resume_console(); 1617 thaw_processes(); 1618 Restore_console: 1619 pm_restore_console(); 1620 unlock_system_sleep(); 1621 } 1622 #endif 1623 1624 Unlock: 1625 mutex_unlock(&kexec_mutex); 1626 return error; 1627 } 1628