xref: /openbmc/linux/kernel/kexec_core.c (revision 8730046c)
1 /*
2  * kexec.c - kexec system call core code.
3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/capability.h>
12 #include <linux/mm.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/fs.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
41 
42 #include <asm/page.h>
43 #include <asm/sections.h>
44 
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
47 #include "kexec_internal.h"
48 
49 DEFINE_MUTEX(kexec_mutex);
50 
51 /* Per cpu memory for storing cpu states in case of system crash. */
52 note_buf_t __percpu *crash_notes;
53 
54 /* vmcoreinfo stuff */
55 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
56 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
57 size_t vmcoreinfo_size;
58 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
59 
60 /* Flag to indicate we are going to kexec a new kernel */
61 bool kexec_in_progress = false;
62 
63 
64 /* Location of the reserved area for the crash kernel */
65 struct resource crashk_res = {
66 	.name  = "Crash kernel",
67 	.start = 0,
68 	.end   = 0,
69 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
70 	.desc  = IORES_DESC_CRASH_KERNEL
71 };
72 struct resource crashk_low_res = {
73 	.name  = "Crash kernel",
74 	.start = 0,
75 	.end   = 0,
76 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
77 	.desc  = IORES_DESC_CRASH_KERNEL
78 };
79 
80 int kexec_should_crash(struct task_struct *p)
81 {
82 	/*
83 	 * If crash_kexec_post_notifiers is enabled, don't run
84 	 * crash_kexec() here yet, which must be run after panic
85 	 * notifiers in panic().
86 	 */
87 	if (crash_kexec_post_notifiers)
88 		return 0;
89 	/*
90 	 * There are 4 panic() calls in do_exit() path, each of which
91 	 * corresponds to each of these 4 conditions.
92 	 */
93 	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
94 		return 1;
95 	return 0;
96 }
97 
98 int kexec_crash_loaded(void)
99 {
100 	return !!kexec_crash_image;
101 }
102 EXPORT_SYMBOL_GPL(kexec_crash_loaded);
103 
104 /*
105  * When kexec transitions to the new kernel there is a one-to-one
106  * mapping between physical and virtual addresses.  On processors
107  * where you can disable the MMU this is trivial, and easy.  For
108  * others it is still a simple predictable page table to setup.
109  *
110  * In that environment kexec copies the new kernel to its final
111  * resting place.  This means I can only support memory whose
112  * physical address can fit in an unsigned long.  In particular
113  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
114  * If the assembly stub has more restrictive requirements
115  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
116  * defined more restrictively in <asm/kexec.h>.
117  *
118  * The code for the transition from the current kernel to the
119  * the new kernel is placed in the control_code_buffer, whose size
120  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
121  * page of memory is necessary, but some architectures require more.
122  * Because this memory must be identity mapped in the transition from
123  * virtual to physical addresses it must live in the range
124  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
125  * modifiable.
126  *
127  * The assembly stub in the control code buffer is passed a linked list
128  * of descriptor pages detailing the source pages of the new kernel,
129  * and the destination addresses of those source pages.  As this data
130  * structure is not used in the context of the current OS, it must
131  * be self-contained.
132  *
133  * The code has been made to work with highmem pages and will use a
134  * destination page in its final resting place (if it happens
135  * to allocate it).  The end product of this is that most of the
136  * physical address space, and most of RAM can be used.
137  *
138  * Future directions include:
139  *  - allocating a page table with the control code buffer identity
140  *    mapped, to simplify machine_kexec and make kexec_on_panic more
141  *    reliable.
142  */
143 
144 /*
145  * KIMAGE_NO_DEST is an impossible destination address..., for
146  * allocating pages whose destination address we do not care about.
147  */
148 #define KIMAGE_NO_DEST (-1UL)
149 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
150 
151 static struct page *kimage_alloc_page(struct kimage *image,
152 				       gfp_t gfp_mask,
153 				       unsigned long dest);
154 
155 int sanity_check_segment_list(struct kimage *image)
156 {
157 	int i;
158 	unsigned long nr_segments = image->nr_segments;
159 	unsigned long total_pages = 0;
160 
161 	/*
162 	 * Verify we have good destination addresses.  The caller is
163 	 * responsible for making certain we don't attempt to load
164 	 * the new image into invalid or reserved areas of RAM.  This
165 	 * just verifies it is an address we can use.
166 	 *
167 	 * Since the kernel does everything in page size chunks ensure
168 	 * the destination addresses are page aligned.  Too many
169 	 * special cases crop of when we don't do this.  The most
170 	 * insidious is getting overlapping destination addresses
171 	 * simply because addresses are changed to page size
172 	 * granularity.
173 	 */
174 	for (i = 0; i < nr_segments; i++) {
175 		unsigned long mstart, mend;
176 
177 		mstart = image->segment[i].mem;
178 		mend   = mstart + image->segment[i].memsz;
179 		if (mstart > mend)
180 			return -EADDRNOTAVAIL;
181 		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
182 			return -EADDRNOTAVAIL;
183 		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
184 			return -EADDRNOTAVAIL;
185 	}
186 
187 	/* Verify our destination addresses do not overlap.
188 	 * If we alloed overlapping destination addresses
189 	 * through very weird things can happen with no
190 	 * easy explanation as one segment stops on another.
191 	 */
192 	for (i = 0; i < nr_segments; i++) {
193 		unsigned long mstart, mend;
194 		unsigned long j;
195 
196 		mstart = image->segment[i].mem;
197 		mend   = mstart + image->segment[i].memsz;
198 		for (j = 0; j < i; j++) {
199 			unsigned long pstart, pend;
200 
201 			pstart = image->segment[j].mem;
202 			pend   = pstart + image->segment[j].memsz;
203 			/* Do the segments overlap ? */
204 			if ((mend > pstart) && (mstart < pend))
205 				return -EINVAL;
206 		}
207 	}
208 
209 	/* Ensure our buffer sizes are strictly less than
210 	 * our memory sizes.  This should always be the case,
211 	 * and it is easier to check up front than to be surprised
212 	 * later on.
213 	 */
214 	for (i = 0; i < nr_segments; i++) {
215 		if (image->segment[i].bufsz > image->segment[i].memsz)
216 			return -EINVAL;
217 	}
218 
219 	/*
220 	 * Verify that no more than half of memory will be consumed. If the
221 	 * request from userspace is too large, a large amount of time will be
222 	 * wasted allocating pages, which can cause a soft lockup.
223 	 */
224 	for (i = 0; i < nr_segments; i++) {
225 		if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
226 			return -EINVAL;
227 
228 		total_pages += PAGE_COUNT(image->segment[i].memsz);
229 	}
230 
231 	if (total_pages > totalram_pages / 2)
232 		return -EINVAL;
233 
234 	/*
235 	 * Verify we have good destination addresses.  Normally
236 	 * the caller is responsible for making certain we don't
237 	 * attempt to load the new image into invalid or reserved
238 	 * areas of RAM.  But crash kernels are preloaded into a
239 	 * reserved area of ram.  We must ensure the addresses
240 	 * are in the reserved area otherwise preloading the
241 	 * kernel could corrupt things.
242 	 */
243 
244 	if (image->type == KEXEC_TYPE_CRASH) {
245 		for (i = 0; i < nr_segments; i++) {
246 			unsigned long mstart, mend;
247 
248 			mstart = image->segment[i].mem;
249 			mend = mstart + image->segment[i].memsz - 1;
250 			/* Ensure we are within the crash kernel limits */
251 			if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
252 			    (mend > phys_to_boot_phys(crashk_res.end)))
253 				return -EADDRNOTAVAIL;
254 		}
255 	}
256 
257 	return 0;
258 }
259 
260 struct kimage *do_kimage_alloc_init(void)
261 {
262 	struct kimage *image;
263 
264 	/* Allocate a controlling structure */
265 	image = kzalloc(sizeof(*image), GFP_KERNEL);
266 	if (!image)
267 		return NULL;
268 
269 	image->head = 0;
270 	image->entry = &image->head;
271 	image->last_entry = &image->head;
272 	image->control_page = ~0; /* By default this does not apply */
273 	image->type = KEXEC_TYPE_DEFAULT;
274 
275 	/* Initialize the list of control pages */
276 	INIT_LIST_HEAD(&image->control_pages);
277 
278 	/* Initialize the list of destination pages */
279 	INIT_LIST_HEAD(&image->dest_pages);
280 
281 	/* Initialize the list of unusable pages */
282 	INIT_LIST_HEAD(&image->unusable_pages);
283 
284 	return image;
285 }
286 
287 int kimage_is_destination_range(struct kimage *image,
288 					unsigned long start,
289 					unsigned long end)
290 {
291 	unsigned long i;
292 
293 	for (i = 0; i < image->nr_segments; i++) {
294 		unsigned long mstart, mend;
295 
296 		mstart = image->segment[i].mem;
297 		mend = mstart + image->segment[i].memsz;
298 		if ((end > mstart) && (start < mend))
299 			return 1;
300 	}
301 
302 	return 0;
303 }
304 
305 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
306 {
307 	struct page *pages;
308 
309 	pages = alloc_pages(gfp_mask, order);
310 	if (pages) {
311 		unsigned int count, i;
312 
313 		pages->mapping = NULL;
314 		set_page_private(pages, order);
315 		count = 1 << order;
316 		for (i = 0; i < count; i++)
317 			SetPageReserved(pages + i);
318 	}
319 
320 	return pages;
321 }
322 
323 static void kimage_free_pages(struct page *page)
324 {
325 	unsigned int order, count, i;
326 
327 	order = page_private(page);
328 	count = 1 << order;
329 	for (i = 0; i < count; i++)
330 		ClearPageReserved(page + i);
331 	__free_pages(page, order);
332 }
333 
334 void kimage_free_page_list(struct list_head *list)
335 {
336 	struct page *page, *next;
337 
338 	list_for_each_entry_safe(page, next, list, lru) {
339 		list_del(&page->lru);
340 		kimage_free_pages(page);
341 	}
342 }
343 
344 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
345 							unsigned int order)
346 {
347 	/* Control pages are special, they are the intermediaries
348 	 * that are needed while we copy the rest of the pages
349 	 * to their final resting place.  As such they must
350 	 * not conflict with either the destination addresses
351 	 * or memory the kernel is already using.
352 	 *
353 	 * The only case where we really need more than one of
354 	 * these are for architectures where we cannot disable
355 	 * the MMU and must instead generate an identity mapped
356 	 * page table for all of the memory.
357 	 *
358 	 * At worst this runs in O(N) of the image size.
359 	 */
360 	struct list_head extra_pages;
361 	struct page *pages;
362 	unsigned int count;
363 
364 	count = 1 << order;
365 	INIT_LIST_HEAD(&extra_pages);
366 
367 	/* Loop while I can allocate a page and the page allocated
368 	 * is a destination page.
369 	 */
370 	do {
371 		unsigned long pfn, epfn, addr, eaddr;
372 
373 		pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
374 		if (!pages)
375 			break;
376 		pfn   = page_to_boot_pfn(pages);
377 		epfn  = pfn + count;
378 		addr  = pfn << PAGE_SHIFT;
379 		eaddr = epfn << PAGE_SHIFT;
380 		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
381 			      kimage_is_destination_range(image, addr, eaddr)) {
382 			list_add(&pages->lru, &extra_pages);
383 			pages = NULL;
384 		}
385 	} while (!pages);
386 
387 	if (pages) {
388 		/* Remember the allocated page... */
389 		list_add(&pages->lru, &image->control_pages);
390 
391 		/* Because the page is already in it's destination
392 		 * location we will never allocate another page at
393 		 * that address.  Therefore kimage_alloc_pages
394 		 * will not return it (again) and we don't need
395 		 * to give it an entry in image->segment[].
396 		 */
397 	}
398 	/* Deal with the destination pages I have inadvertently allocated.
399 	 *
400 	 * Ideally I would convert multi-page allocations into single
401 	 * page allocations, and add everything to image->dest_pages.
402 	 *
403 	 * For now it is simpler to just free the pages.
404 	 */
405 	kimage_free_page_list(&extra_pages);
406 
407 	return pages;
408 }
409 
410 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
411 						      unsigned int order)
412 {
413 	/* Control pages are special, they are the intermediaries
414 	 * that are needed while we copy the rest of the pages
415 	 * to their final resting place.  As such they must
416 	 * not conflict with either the destination addresses
417 	 * or memory the kernel is already using.
418 	 *
419 	 * Control pages are also the only pags we must allocate
420 	 * when loading a crash kernel.  All of the other pages
421 	 * are specified by the segments and we just memcpy
422 	 * into them directly.
423 	 *
424 	 * The only case where we really need more than one of
425 	 * these are for architectures where we cannot disable
426 	 * the MMU and must instead generate an identity mapped
427 	 * page table for all of the memory.
428 	 *
429 	 * Given the low demand this implements a very simple
430 	 * allocator that finds the first hole of the appropriate
431 	 * size in the reserved memory region, and allocates all
432 	 * of the memory up to and including the hole.
433 	 */
434 	unsigned long hole_start, hole_end, size;
435 	struct page *pages;
436 
437 	pages = NULL;
438 	size = (1 << order) << PAGE_SHIFT;
439 	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
440 	hole_end   = hole_start + size - 1;
441 	while (hole_end <= crashk_res.end) {
442 		unsigned long i;
443 
444 		cond_resched();
445 
446 		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
447 			break;
448 		/* See if I overlap any of the segments */
449 		for (i = 0; i < image->nr_segments; i++) {
450 			unsigned long mstart, mend;
451 
452 			mstart = image->segment[i].mem;
453 			mend   = mstart + image->segment[i].memsz - 1;
454 			if ((hole_end >= mstart) && (hole_start <= mend)) {
455 				/* Advance the hole to the end of the segment */
456 				hole_start = (mend + (size - 1)) & ~(size - 1);
457 				hole_end   = hole_start + size - 1;
458 				break;
459 			}
460 		}
461 		/* If I don't overlap any segments I have found my hole! */
462 		if (i == image->nr_segments) {
463 			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
464 			image->control_page = hole_end;
465 			break;
466 		}
467 	}
468 
469 	return pages;
470 }
471 
472 
473 struct page *kimage_alloc_control_pages(struct kimage *image,
474 					 unsigned int order)
475 {
476 	struct page *pages = NULL;
477 
478 	switch (image->type) {
479 	case KEXEC_TYPE_DEFAULT:
480 		pages = kimage_alloc_normal_control_pages(image, order);
481 		break;
482 	case KEXEC_TYPE_CRASH:
483 		pages = kimage_alloc_crash_control_pages(image, order);
484 		break;
485 	}
486 
487 	return pages;
488 }
489 
490 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
491 {
492 	if (*image->entry != 0)
493 		image->entry++;
494 
495 	if (image->entry == image->last_entry) {
496 		kimage_entry_t *ind_page;
497 		struct page *page;
498 
499 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
500 		if (!page)
501 			return -ENOMEM;
502 
503 		ind_page = page_address(page);
504 		*image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
505 		image->entry = ind_page;
506 		image->last_entry = ind_page +
507 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
508 	}
509 	*image->entry = entry;
510 	image->entry++;
511 	*image->entry = 0;
512 
513 	return 0;
514 }
515 
516 static int kimage_set_destination(struct kimage *image,
517 				   unsigned long destination)
518 {
519 	int result;
520 
521 	destination &= PAGE_MASK;
522 	result = kimage_add_entry(image, destination | IND_DESTINATION);
523 
524 	return result;
525 }
526 
527 
528 static int kimage_add_page(struct kimage *image, unsigned long page)
529 {
530 	int result;
531 
532 	page &= PAGE_MASK;
533 	result = kimage_add_entry(image, page | IND_SOURCE);
534 
535 	return result;
536 }
537 
538 
539 static void kimage_free_extra_pages(struct kimage *image)
540 {
541 	/* Walk through and free any extra destination pages I may have */
542 	kimage_free_page_list(&image->dest_pages);
543 
544 	/* Walk through and free any unusable pages I have cached */
545 	kimage_free_page_list(&image->unusable_pages);
546 
547 }
548 void kimage_terminate(struct kimage *image)
549 {
550 	if (*image->entry != 0)
551 		image->entry++;
552 
553 	*image->entry = IND_DONE;
554 }
555 
556 #define for_each_kimage_entry(image, ptr, entry) \
557 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
558 		ptr = (entry & IND_INDIRECTION) ? \
559 			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
560 
561 static void kimage_free_entry(kimage_entry_t entry)
562 {
563 	struct page *page;
564 
565 	page = boot_pfn_to_page(entry >> PAGE_SHIFT);
566 	kimage_free_pages(page);
567 }
568 
569 void kimage_free(struct kimage *image)
570 {
571 	kimage_entry_t *ptr, entry;
572 	kimage_entry_t ind = 0;
573 
574 	if (!image)
575 		return;
576 
577 	kimage_free_extra_pages(image);
578 	for_each_kimage_entry(image, ptr, entry) {
579 		if (entry & IND_INDIRECTION) {
580 			/* Free the previous indirection page */
581 			if (ind & IND_INDIRECTION)
582 				kimage_free_entry(ind);
583 			/* Save this indirection page until we are
584 			 * done with it.
585 			 */
586 			ind = entry;
587 		} else if (entry & IND_SOURCE)
588 			kimage_free_entry(entry);
589 	}
590 	/* Free the final indirection page */
591 	if (ind & IND_INDIRECTION)
592 		kimage_free_entry(ind);
593 
594 	/* Handle any machine specific cleanup */
595 	machine_kexec_cleanup(image);
596 
597 	/* Free the kexec control pages... */
598 	kimage_free_page_list(&image->control_pages);
599 
600 	/*
601 	 * Free up any temporary buffers allocated. This might hit if
602 	 * error occurred much later after buffer allocation.
603 	 */
604 	if (image->file_mode)
605 		kimage_file_post_load_cleanup(image);
606 
607 	kfree(image);
608 }
609 
610 static kimage_entry_t *kimage_dst_used(struct kimage *image,
611 					unsigned long page)
612 {
613 	kimage_entry_t *ptr, entry;
614 	unsigned long destination = 0;
615 
616 	for_each_kimage_entry(image, ptr, entry) {
617 		if (entry & IND_DESTINATION)
618 			destination = entry & PAGE_MASK;
619 		else if (entry & IND_SOURCE) {
620 			if (page == destination)
621 				return ptr;
622 			destination += PAGE_SIZE;
623 		}
624 	}
625 
626 	return NULL;
627 }
628 
629 static struct page *kimage_alloc_page(struct kimage *image,
630 					gfp_t gfp_mask,
631 					unsigned long destination)
632 {
633 	/*
634 	 * Here we implement safeguards to ensure that a source page
635 	 * is not copied to its destination page before the data on
636 	 * the destination page is no longer useful.
637 	 *
638 	 * To do this we maintain the invariant that a source page is
639 	 * either its own destination page, or it is not a
640 	 * destination page at all.
641 	 *
642 	 * That is slightly stronger than required, but the proof
643 	 * that no problems will not occur is trivial, and the
644 	 * implementation is simply to verify.
645 	 *
646 	 * When allocating all pages normally this algorithm will run
647 	 * in O(N) time, but in the worst case it will run in O(N^2)
648 	 * time.   If the runtime is a problem the data structures can
649 	 * be fixed.
650 	 */
651 	struct page *page;
652 	unsigned long addr;
653 
654 	/*
655 	 * Walk through the list of destination pages, and see if I
656 	 * have a match.
657 	 */
658 	list_for_each_entry(page, &image->dest_pages, lru) {
659 		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
660 		if (addr == destination) {
661 			list_del(&page->lru);
662 			return page;
663 		}
664 	}
665 	page = NULL;
666 	while (1) {
667 		kimage_entry_t *old;
668 
669 		/* Allocate a page, if we run out of memory give up */
670 		page = kimage_alloc_pages(gfp_mask, 0);
671 		if (!page)
672 			return NULL;
673 		/* If the page cannot be used file it away */
674 		if (page_to_boot_pfn(page) >
675 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
676 			list_add(&page->lru, &image->unusable_pages);
677 			continue;
678 		}
679 		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
680 
681 		/* If it is the destination page we want use it */
682 		if (addr == destination)
683 			break;
684 
685 		/* If the page is not a destination page use it */
686 		if (!kimage_is_destination_range(image, addr,
687 						  addr + PAGE_SIZE))
688 			break;
689 
690 		/*
691 		 * I know that the page is someones destination page.
692 		 * See if there is already a source page for this
693 		 * destination page.  And if so swap the source pages.
694 		 */
695 		old = kimage_dst_used(image, addr);
696 		if (old) {
697 			/* If so move it */
698 			unsigned long old_addr;
699 			struct page *old_page;
700 
701 			old_addr = *old & PAGE_MASK;
702 			old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
703 			copy_highpage(page, old_page);
704 			*old = addr | (*old & ~PAGE_MASK);
705 
706 			/* The old page I have found cannot be a
707 			 * destination page, so return it if it's
708 			 * gfp_flags honor the ones passed in.
709 			 */
710 			if (!(gfp_mask & __GFP_HIGHMEM) &&
711 			    PageHighMem(old_page)) {
712 				kimage_free_pages(old_page);
713 				continue;
714 			}
715 			addr = old_addr;
716 			page = old_page;
717 			break;
718 		}
719 		/* Place the page on the destination list, to be used later */
720 		list_add(&page->lru, &image->dest_pages);
721 	}
722 
723 	return page;
724 }
725 
726 static int kimage_load_normal_segment(struct kimage *image,
727 					 struct kexec_segment *segment)
728 {
729 	unsigned long maddr;
730 	size_t ubytes, mbytes;
731 	int result;
732 	unsigned char __user *buf = NULL;
733 	unsigned char *kbuf = NULL;
734 
735 	result = 0;
736 	if (image->file_mode)
737 		kbuf = segment->kbuf;
738 	else
739 		buf = segment->buf;
740 	ubytes = segment->bufsz;
741 	mbytes = segment->memsz;
742 	maddr = segment->mem;
743 
744 	result = kimage_set_destination(image, maddr);
745 	if (result < 0)
746 		goto out;
747 
748 	while (mbytes) {
749 		struct page *page;
750 		char *ptr;
751 		size_t uchunk, mchunk;
752 
753 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
754 		if (!page) {
755 			result  = -ENOMEM;
756 			goto out;
757 		}
758 		result = kimage_add_page(image, page_to_boot_pfn(page)
759 								<< PAGE_SHIFT);
760 		if (result < 0)
761 			goto out;
762 
763 		ptr = kmap(page);
764 		/* Start with a clear page */
765 		clear_page(ptr);
766 		ptr += maddr & ~PAGE_MASK;
767 		mchunk = min_t(size_t, mbytes,
768 				PAGE_SIZE - (maddr & ~PAGE_MASK));
769 		uchunk = min(ubytes, mchunk);
770 
771 		/* For file based kexec, source pages are in kernel memory */
772 		if (image->file_mode)
773 			memcpy(ptr, kbuf, uchunk);
774 		else
775 			result = copy_from_user(ptr, buf, uchunk);
776 		kunmap(page);
777 		if (result) {
778 			result = -EFAULT;
779 			goto out;
780 		}
781 		ubytes -= uchunk;
782 		maddr  += mchunk;
783 		if (image->file_mode)
784 			kbuf += mchunk;
785 		else
786 			buf += mchunk;
787 		mbytes -= mchunk;
788 	}
789 out:
790 	return result;
791 }
792 
793 static int kimage_load_crash_segment(struct kimage *image,
794 					struct kexec_segment *segment)
795 {
796 	/* For crash dumps kernels we simply copy the data from
797 	 * user space to it's destination.
798 	 * We do things a page at a time for the sake of kmap.
799 	 */
800 	unsigned long maddr;
801 	size_t ubytes, mbytes;
802 	int result;
803 	unsigned char __user *buf = NULL;
804 	unsigned char *kbuf = NULL;
805 
806 	result = 0;
807 	if (image->file_mode)
808 		kbuf = segment->kbuf;
809 	else
810 		buf = segment->buf;
811 	ubytes = segment->bufsz;
812 	mbytes = segment->memsz;
813 	maddr = segment->mem;
814 	while (mbytes) {
815 		struct page *page;
816 		char *ptr;
817 		size_t uchunk, mchunk;
818 
819 		page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
820 		if (!page) {
821 			result  = -ENOMEM;
822 			goto out;
823 		}
824 		ptr = kmap(page);
825 		ptr += maddr & ~PAGE_MASK;
826 		mchunk = min_t(size_t, mbytes,
827 				PAGE_SIZE - (maddr & ~PAGE_MASK));
828 		uchunk = min(ubytes, mchunk);
829 		if (mchunk > uchunk) {
830 			/* Zero the trailing part of the page */
831 			memset(ptr + uchunk, 0, mchunk - uchunk);
832 		}
833 
834 		/* For file based kexec, source pages are in kernel memory */
835 		if (image->file_mode)
836 			memcpy(ptr, kbuf, uchunk);
837 		else
838 			result = copy_from_user(ptr, buf, uchunk);
839 		kexec_flush_icache_page(page);
840 		kunmap(page);
841 		if (result) {
842 			result = -EFAULT;
843 			goto out;
844 		}
845 		ubytes -= uchunk;
846 		maddr  += mchunk;
847 		if (image->file_mode)
848 			kbuf += mchunk;
849 		else
850 			buf += mchunk;
851 		mbytes -= mchunk;
852 	}
853 out:
854 	return result;
855 }
856 
857 int kimage_load_segment(struct kimage *image,
858 				struct kexec_segment *segment)
859 {
860 	int result = -ENOMEM;
861 
862 	switch (image->type) {
863 	case KEXEC_TYPE_DEFAULT:
864 		result = kimage_load_normal_segment(image, segment);
865 		break;
866 	case KEXEC_TYPE_CRASH:
867 		result = kimage_load_crash_segment(image, segment);
868 		break;
869 	}
870 
871 	return result;
872 }
873 
874 struct kimage *kexec_image;
875 struct kimage *kexec_crash_image;
876 int kexec_load_disabled;
877 
878 /*
879  * No panic_cpu check version of crash_kexec().  This function is called
880  * only when panic_cpu holds the current CPU number; this is the only CPU
881  * which processes crash_kexec routines.
882  */
883 void __crash_kexec(struct pt_regs *regs)
884 {
885 	/* Take the kexec_mutex here to prevent sys_kexec_load
886 	 * running on one cpu from replacing the crash kernel
887 	 * we are using after a panic on a different cpu.
888 	 *
889 	 * If the crash kernel was not located in a fixed area
890 	 * of memory the xchg(&kexec_crash_image) would be
891 	 * sufficient.  But since I reuse the memory...
892 	 */
893 	if (mutex_trylock(&kexec_mutex)) {
894 		if (kexec_crash_image) {
895 			struct pt_regs fixed_regs;
896 
897 			crash_setup_regs(&fixed_regs, regs);
898 			crash_save_vmcoreinfo();
899 			machine_crash_shutdown(&fixed_regs);
900 			machine_kexec(kexec_crash_image);
901 		}
902 		mutex_unlock(&kexec_mutex);
903 	}
904 }
905 
906 void crash_kexec(struct pt_regs *regs)
907 {
908 	int old_cpu, this_cpu;
909 
910 	/*
911 	 * Only one CPU is allowed to execute the crash_kexec() code as with
912 	 * panic().  Otherwise parallel calls of panic() and crash_kexec()
913 	 * may stop each other.  To exclude them, we use panic_cpu here too.
914 	 */
915 	this_cpu = raw_smp_processor_id();
916 	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
917 	if (old_cpu == PANIC_CPU_INVALID) {
918 		/* This is the 1st CPU which comes here, so go ahead. */
919 		printk_nmi_flush_on_panic();
920 		__crash_kexec(regs);
921 
922 		/*
923 		 * Reset panic_cpu to allow another panic()/crash_kexec()
924 		 * call.
925 		 */
926 		atomic_set(&panic_cpu, PANIC_CPU_INVALID);
927 	}
928 }
929 
930 size_t crash_get_memory_size(void)
931 {
932 	size_t size = 0;
933 
934 	mutex_lock(&kexec_mutex);
935 	if (crashk_res.end != crashk_res.start)
936 		size = resource_size(&crashk_res);
937 	mutex_unlock(&kexec_mutex);
938 	return size;
939 }
940 
941 void __weak crash_free_reserved_phys_range(unsigned long begin,
942 					   unsigned long end)
943 {
944 	unsigned long addr;
945 
946 	for (addr = begin; addr < end; addr += PAGE_SIZE)
947 		free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
948 }
949 
950 int crash_shrink_memory(unsigned long new_size)
951 {
952 	int ret = 0;
953 	unsigned long start, end;
954 	unsigned long old_size;
955 	struct resource *ram_res;
956 
957 	mutex_lock(&kexec_mutex);
958 
959 	if (kexec_crash_image) {
960 		ret = -ENOENT;
961 		goto unlock;
962 	}
963 	start = crashk_res.start;
964 	end = crashk_res.end;
965 	old_size = (end == 0) ? 0 : end - start + 1;
966 	if (new_size >= old_size) {
967 		ret = (new_size == old_size) ? 0 : -EINVAL;
968 		goto unlock;
969 	}
970 
971 	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
972 	if (!ram_res) {
973 		ret = -ENOMEM;
974 		goto unlock;
975 	}
976 
977 	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
978 	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
979 
980 	crash_free_reserved_phys_range(end, crashk_res.end);
981 
982 	if ((start == end) && (crashk_res.parent != NULL))
983 		release_resource(&crashk_res);
984 
985 	ram_res->start = end;
986 	ram_res->end = crashk_res.end;
987 	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
988 	ram_res->name = "System RAM";
989 
990 	crashk_res.end = end - 1;
991 
992 	insert_resource(&iomem_resource, ram_res);
993 
994 unlock:
995 	mutex_unlock(&kexec_mutex);
996 	return ret;
997 }
998 
999 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1000 			    size_t data_len)
1001 {
1002 	struct elf_note note;
1003 
1004 	note.n_namesz = strlen(name) + 1;
1005 	note.n_descsz = data_len;
1006 	note.n_type   = type;
1007 	memcpy(buf, &note, sizeof(note));
1008 	buf += (sizeof(note) + 3)/4;
1009 	memcpy(buf, name, note.n_namesz);
1010 	buf += (note.n_namesz + 3)/4;
1011 	memcpy(buf, data, note.n_descsz);
1012 	buf += (note.n_descsz + 3)/4;
1013 
1014 	return buf;
1015 }
1016 
1017 static void final_note(u32 *buf)
1018 {
1019 	struct elf_note note;
1020 
1021 	note.n_namesz = 0;
1022 	note.n_descsz = 0;
1023 	note.n_type   = 0;
1024 	memcpy(buf, &note, sizeof(note));
1025 }
1026 
1027 void crash_save_cpu(struct pt_regs *regs, int cpu)
1028 {
1029 	struct elf_prstatus prstatus;
1030 	u32 *buf;
1031 
1032 	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1033 		return;
1034 
1035 	/* Using ELF notes here is opportunistic.
1036 	 * I need a well defined structure format
1037 	 * for the data I pass, and I need tags
1038 	 * on the data to indicate what information I have
1039 	 * squirrelled away.  ELF notes happen to provide
1040 	 * all of that, so there is no need to invent something new.
1041 	 */
1042 	buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1043 	if (!buf)
1044 		return;
1045 	memset(&prstatus, 0, sizeof(prstatus));
1046 	prstatus.pr_pid = current->pid;
1047 	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1048 	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1049 			      &prstatus, sizeof(prstatus));
1050 	final_note(buf);
1051 }
1052 
1053 static int __init crash_notes_memory_init(void)
1054 {
1055 	/* Allocate memory for saving cpu registers. */
1056 	size_t size, align;
1057 
1058 	/*
1059 	 * crash_notes could be allocated across 2 vmalloc pages when percpu
1060 	 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1061 	 * pages are also on 2 continuous physical pages. In this case the
1062 	 * 2nd part of crash_notes in 2nd page could be lost since only the
1063 	 * starting address and size of crash_notes are exported through sysfs.
1064 	 * Here round up the size of crash_notes to the nearest power of two
1065 	 * and pass it to __alloc_percpu as align value. This can make sure
1066 	 * crash_notes is allocated inside one physical page.
1067 	 */
1068 	size = sizeof(note_buf_t);
1069 	align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1070 
1071 	/*
1072 	 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1073 	 * definitely will be in 2 pages with that.
1074 	 */
1075 	BUILD_BUG_ON(size > PAGE_SIZE);
1076 
1077 	crash_notes = __alloc_percpu(size, align);
1078 	if (!crash_notes) {
1079 		pr_warn("Memory allocation for saving cpu register states failed\n");
1080 		return -ENOMEM;
1081 	}
1082 	return 0;
1083 }
1084 subsys_initcall(crash_notes_memory_init);
1085 
1086 
1087 /*
1088  * parsing the "crashkernel" commandline
1089  *
1090  * this code is intended to be called from architecture specific code
1091  */
1092 
1093 
1094 /*
1095  * This function parses command lines in the format
1096  *
1097  *   crashkernel=ramsize-range:size[,...][@offset]
1098  *
1099  * The function returns 0 on success and -EINVAL on failure.
1100  */
1101 static int __init parse_crashkernel_mem(char *cmdline,
1102 					unsigned long long system_ram,
1103 					unsigned long long *crash_size,
1104 					unsigned long long *crash_base)
1105 {
1106 	char *cur = cmdline, *tmp;
1107 
1108 	/* for each entry of the comma-separated list */
1109 	do {
1110 		unsigned long long start, end = ULLONG_MAX, size;
1111 
1112 		/* get the start of the range */
1113 		start = memparse(cur, &tmp);
1114 		if (cur == tmp) {
1115 			pr_warn("crashkernel: Memory value expected\n");
1116 			return -EINVAL;
1117 		}
1118 		cur = tmp;
1119 		if (*cur != '-') {
1120 			pr_warn("crashkernel: '-' expected\n");
1121 			return -EINVAL;
1122 		}
1123 		cur++;
1124 
1125 		/* if no ':' is here, than we read the end */
1126 		if (*cur != ':') {
1127 			end = memparse(cur, &tmp);
1128 			if (cur == tmp) {
1129 				pr_warn("crashkernel: Memory value expected\n");
1130 				return -EINVAL;
1131 			}
1132 			cur = tmp;
1133 			if (end <= start) {
1134 				pr_warn("crashkernel: end <= start\n");
1135 				return -EINVAL;
1136 			}
1137 		}
1138 
1139 		if (*cur != ':') {
1140 			pr_warn("crashkernel: ':' expected\n");
1141 			return -EINVAL;
1142 		}
1143 		cur++;
1144 
1145 		size = memparse(cur, &tmp);
1146 		if (cur == tmp) {
1147 			pr_warn("Memory value expected\n");
1148 			return -EINVAL;
1149 		}
1150 		cur = tmp;
1151 		if (size >= system_ram) {
1152 			pr_warn("crashkernel: invalid size\n");
1153 			return -EINVAL;
1154 		}
1155 
1156 		/* match ? */
1157 		if (system_ram >= start && system_ram < end) {
1158 			*crash_size = size;
1159 			break;
1160 		}
1161 	} while (*cur++ == ',');
1162 
1163 	if (*crash_size > 0) {
1164 		while (*cur && *cur != ' ' && *cur != '@')
1165 			cur++;
1166 		if (*cur == '@') {
1167 			cur++;
1168 			*crash_base = memparse(cur, &tmp);
1169 			if (cur == tmp) {
1170 				pr_warn("Memory value expected after '@'\n");
1171 				return -EINVAL;
1172 			}
1173 		}
1174 	}
1175 
1176 	return 0;
1177 }
1178 
1179 /*
1180  * That function parses "simple" (old) crashkernel command lines like
1181  *
1182  *	crashkernel=size[@offset]
1183  *
1184  * It returns 0 on success and -EINVAL on failure.
1185  */
1186 static int __init parse_crashkernel_simple(char *cmdline,
1187 					   unsigned long long *crash_size,
1188 					   unsigned long long *crash_base)
1189 {
1190 	char *cur = cmdline;
1191 
1192 	*crash_size = memparse(cmdline, &cur);
1193 	if (cmdline == cur) {
1194 		pr_warn("crashkernel: memory value expected\n");
1195 		return -EINVAL;
1196 	}
1197 
1198 	if (*cur == '@')
1199 		*crash_base = memparse(cur+1, &cur);
1200 	else if (*cur != ' ' && *cur != '\0') {
1201 		pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1202 		return -EINVAL;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 #define SUFFIX_HIGH 0
1209 #define SUFFIX_LOW  1
1210 #define SUFFIX_NULL 2
1211 static __initdata char *suffix_tbl[] = {
1212 	[SUFFIX_HIGH] = ",high",
1213 	[SUFFIX_LOW]  = ",low",
1214 	[SUFFIX_NULL] = NULL,
1215 };
1216 
1217 /*
1218  * That function parses "suffix"  crashkernel command lines like
1219  *
1220  *	crashkernel=size,[high|low]
1221  *
1222  * It returns 0 on success and -EINVAL on failure.
1223  */
1224 static int __init parse_crashkernel_suffix(char *cmdline,
1225 					   unsigned long long	*crash_size,
1226 					   const char *suffix)
1227 {
1228 	char *cur = cmdline;
1229 
1230 	*crash_size = memparse(cmdline, &cur);
1231 	if (cmdline == cur) {
1232 		pr_warn("crashkernel: memory value expected\n");
1233 		return -EINVAL;
1234 	}
1235 
1236 	/* check with suffix */
1237 	if (strncmp(cur, suffix, strlen(suffix))) {
1238 		pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1239 		return -EINVAL;
1240 	}
1241 	cur += strlen(suffix);
1242 	if (*cur != ' ' && *cur != '\0') {
1243 		pr_warn("crashkernel: unrecognized char: %c\n", *cur);
1244 		return -EINVAL;
1245 	}
1246 
1247 	return 0;
1248 }
1249 
1250 static __init char *get_last_crashkernel(char *cmdline,
1251 			     const char *name,
1252 			     const char *suffix)
1253 {
1254 	char *p = cmdline, *ck_cmdline = NULL;
1255 
1256 	/* find crashkernel and use the last one if there are more */
1257 	p = strstr(p, name);
1258 	while (p) {
1259 		char *end_p = strchr(p, ' ');
1260 		char *q;
1261 
1262 		if (!end_p)
1263 			end_p = p + strlen(p);
1264 
1265 		if (!suffix) {
1266 			int i;
1267 
1268 			/* skip the one with any known suffix */
1269 			for (i = 0; suffix_tbl[i]; i++) {
1270 				q = end_p - strlen(suffix_tbl[i]);
1271 				if (!strncmp(q, suffix_tbl[i],
1272 					     strlen(suffix_tbl[i])))
1273 					goto next;
1274 			}
1275 			ck_cmdline = p;
1276 		} else {
1277 			q = end_p - strlen(suffix);
1278 			if (!strncmp(q, suffix, strlen(suffix)))
1279 				ck_cmdline = p;
1280 		}
1281 next:
1282 		p = strstr(p+1, name);
1283 	}
1284 
1285 	if (!ck_cmdline)
1286 		return NULL;
1287 
1288 	return ck_cmdline;
1289 }
1290 
1291 static int __init __parse_crashkernel(char *cmdline,
1292 			     unsigned long long system_ram,
1293 			     unsigned long long *crash_size,
1294 			     unsigned long long *crash_base,
1295 			     const char *name,
1296 			     const char *suffix)
1297 {
1298 	char	*first_colon, *first_space;
1299 	char	*ck_cmdline;
1300 
1301 	BUG_ON(!crash_size || !crash_base);
1302 	*crash_size = 0;
1303 	*crash_base = 0;
1304 
1305 	ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1306 
1307 	if (!ck_cmdline)
1308 		return -EINVAL;
1309 
1310 	ck_cmdline += strlen(name);
1311 
1312 	if (suffix)
1313 		return parse_crashkernel_suffix(ck_cmdline, crash_size,
1314 				suffix);
1315 	/*
1316 	 * if the commandline contains a ':', then that's the extended
1317 	 * syntax -- if not, it must be the classic syntax
1318 	 */
1319 	first_colon = strchr(ck_cmdline, ':');
1320 	first_space = strchr(ck_cmdline, ' ');
1321 	if (first_colon && (!first_space || first_colon < first_space))
1322 		return parse_crashkernel_mem(ck_cmdline, system_ram,
1323 				crash_size, crash_base);
1324 
1325 	return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1326 }
1327 
1328 /*
1329  * That function is the entry point for command line parsing and should be
1330  * called from the arch-specific code.
1331  */
1332 int __init parse_crashkernel(char *cmdline,
1333 			     unsigned long long system_ram,
1334 			     unsigned long long *crash_size,
1335 			     unsigned long long *crash_base)
1336 {
1337 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1338 					"crashkernel=", NULL);
1339 }
1340 
1341 int __init parse_crashkernel_high(char *cmdline,
1342 			     unsigned long long system_ram,
1343 			     unsigned long long *crash_size,
1344 			     unsigned long long *crash_base)
1345 {
1346 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1347 				"crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1348 }
1349 
1350 int __init parse_crashkernel_low(char *cmdline,
1351 			     unsigned long long system_ram,
1352 			     unsigned long long *crash_size,
1353 			     unsigned long long *crash_base)
1354 {
1355 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1356 				"crashkernel=", suffix_tbl[SUFFIX_LOW]);
1357 }
1358 
1359 static void update_vmcoreinfo_note(void)
1360 {
1361 	u32 *buf = vmcoreinfo_note;
1362 
1363 	if (!vmcoreinfo_size)
1364 		return;
1365 	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1366 			      vmcoreinfo_size);
1367 	final_note(buf);
1368 }
1369 
1370 void crash_save_vmcoreinfo(void)
1371 {
1372 	vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1373 	update_vmcoreinfo_note();
1374 }
1375 
1376 void vmcoreinfo_append_str(const char *fmt, ...)
1377 {
1378 	va_list args;
1379 	char buf[0x50];
1380 	size_t r;
1381 
1382 	va_start(args, fmt);
1383 	r = vscnprintf(buf, sizeof(buf), fmt, args);
1384 	va_end(args);
1385 
1386 	r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1387 
1388 	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1389 
1390 	vmcoreinfo_size += r;
1391 }
1392 
1393 /*
1394  * provide an empty default implementation here -- architecture
1395  * code may override this
1396  */
1397 void __weak arch_crash_save_vmcoreinfo(void)
1398 {}
1399 
1400 phys_addr_t __weak paddr_vmcoreinfo_note(void)
1401 {
1402 	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1403 }
1404 
1405 static int __init crash_save_vmcoreinfo_init(void)
1406 {
1407 	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1408 	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1409 
1410 	VMCOREINFO_SYMBOL(init_uts_ns);
1411 	VMCOREINFO_SYMBOL(node_online_map);
1412 #ifdef CONFIG_MMU
1413 	VMCOREINFO_SYMBOL(swapper_pg_dir);
1414 #endif
1415 	VMCOREINFO_SYMBOL(_stext);
1416 	VMCOREINFO_SYMBOL(vmap_area_list);
1417 
1418 #ifndef CONFIG_NEED_MULTIPLE_NODES
1419 	VMCOREINFO_SYMBOL(mem_map);
1420 	VMCOREINFO_SYMBOL(contig_page_data);
1421 #endif
1422 #ifdef CONFIG_SPARSEMEM
1423 	VMCOREINFO_SYMBOL(mem_section);
1424 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1425 	VMCOREINFO_STRUCT_SIZE(mem_section);
1426 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1427 #endif
1428 	VMCOREINFO_STRUCT_SIZE(page);
1429 	VMCOREINFO_STRUCT_SIZE(pglist_data);
1430 	VMCOREINFO_STRUCT_SIZE(zone);
1431 	VMCOREINFO_STRUCT_SIZE(free_area);
1432 	VMCOREINFO_STRUCT_SIZE(list_head);
1433 	VMCOREINFO_SIZE(nodemask_t);
1434 	VMCOREINFO_OFFSET(page, flags);
1435 	VMCOREINFO_OFFSET(page, _refcount);
1436 	VMCOREINFO_OFFSET(page, mapping);
1437 	VMCOREINFO_OFFSET(page, lru);
1438 	VMCOREINFO_OFFSET(page, _mapcount);
1439 	VMCOREINFO_OFFSET(page, private);
1440 	VMCOREINFO_OFFSET(page, compound_dtor);
1441 	VMCOREINFO_OFFSET(page, compound_order);
1442 	VMCOREINFO_OFFSET(page, compound_head);
1443 	VMCOREINFO_OFFSET(pglist_data, node_zones);
1444 	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1445 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1446 	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1447 #endif
1448 	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1449 	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1450 	VMCOREINFO_OFFSET(pglist_data, node_id);
1451 	VMCOREINFO_OFFSET(zone, free_area);
1452 	VMCOREINFO_OFFSET(zone, vm_stat);
1453 	VMCOREINFO_OFFSET(zone, spanned_pages);
1454 	VMCOREINFO_OFFSET(free_area, free_list);
1455 	VMCOREINFO_OFFSET(list_head, next);
1456 	VMCOREINFO_OFFSET(list_head, prev);
1457 	VMCOREINFO_OFFSET(vmap_area, va_start);
1458 	VMCOREINFO_OFFSET(vmap_area, list);
1459 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1460 	log_buf_kexec_setup();
1461 	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1462 	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1463 	VMCOREINFO_NUMBER(PG_lru);
1464 	VMCOREINFO_NUMBER(PG_private);
1465 	VMCOREINFO_NUMBER(PG_swapcache);
1466 	VMCOREINFO_NUMBER(PG_slab);
1467 #ifdef CONFIG_MEMORY_FAILURE
1468 	VMCOREINFO_NUMBER(PG_hwpoison);
1469 #endif
1470 	VMCOREINFO_NUMBER(PG_head_mask);
1471 	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1472 #ifdef CONFIG_HUGETLB_PAGE
1473 	VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1474 #endif
1475 
1476 	arch_crash_save_vmcoreinfo();
1477 	update_vmcoreinfo_note();
1478 
1479 	return 0;
1480 }
1481 
1482 subsys_initcall(crash_save_vmcoreinfo_init);
1483 
1484 /*
1485  * Move into place and start executing a preloaded standalone
1486  * executable.  If nothing was preloaded return an error.
1487  */
1488 int kernel_kexec(void)
1489 {
1490 	int error = 0;
1491 
1492 	if (!mutex_trylock(&kexec_mutex))
1493 		return -EBUSY;
1494 	if (!kexec_image) {
1495 		error = -EINVAL;
1496 		goto Unlock;
1497 	}
1498 
1499 #ifdef CONFIG_KEXEC_JUMP
1500 	if (kexec_image->preserve_context) {
1501 		lock_system_sleep();
1502 		pm_prepare_console();
1503 		error = freeze_processes();
1504 		if (error) {
1505 			error = -EBUSY;
1506 			goto Restore_console;
1507 		}
1508 		suspend_console();
1509 		error = dpm_suspend_start(PMSG_FREEZE);
1510 		if (error)
1511 			goto Resume_console;
1512 		/* At this point, dpm_suspend_start() has been called,
1513 		 * but *not* dpm_suspend_end(). We *must* call
1514 		 * dpm_suspend_end() now.  Otherwise, drivers for
1515 		 * some devices (e.g. interrupt controllers) become
1516 		 * desynchronized with the actual state of the
1517 		 * hardware at resume time, and evil weirdness ensues.
1518 		 */
1519 		error = dpm_suspend_end(PMSG_FREEZE);
1520 		if (error)
1521 			goto Resume_devices;
1522 		error = disable_nonboot_cpus();
1523 		if (error)
1524 			goto Enable_cpus;
1525 		local_irq_disable();
1526 		error = syscore_suspend();
1527 		if (error)
1528 			goto Enable_irqs;
1529 	} else
1530 #endif
1531 	{
1532 		kexec_in_progress = true;
1533 		kernel_restart_prepare(NULL);
1534 		migrate_to_reboot_cpu();
1535 
1536 		/*
1537 		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1538 		 * no further code needs to use CPU hotplug (which is true in
1539 		 * the reboot case). However, the kexec path depends on using
1540 		 * CPU hotplug again; so re-enable it here.
1541 		 */
1542 		cpu_hotplug_enable();
1543 		pr_emerg("Starting new kernel\n");
1544 		machine_shutdown();
1545 	}
1546 
1547 	machine_kexec(kexec_image);
1548 
1549 #ifdef CONFIG_KEXEC_JUMP
1550 	if (kexec_image->preserve_context) {
1551 		syscore_resume();
1552  Enable_irqs:
1553 		local_irq_enable();
1554  Enable_cpus:
1555 		enable_nonboot_cpus();
1556 		dpm_resume_start(PMSG_RESTORE);
1557  Resume_devices:
1558 		dpm_resume_end(PMSG_RESTORE);
1559  Resume_console:
1560 		resume_console();
1561 		thaw_processes();
1562  Restore_console:
1563 		pm_restore_console();
1564 		unlock_system_sleep();
1565 	}
1566 #endif
1567 
1568  Unlock:
1569 	mutex_unlock(&kexec_mutex);
1570 	return error;
1571 }
1572 
1573 /*
1574  * Protection mechanism for crashkernel reserved memory after
1575  * the kdump kernel is loaded.
1576  *
1577  * Provide an empty default implementation here -- architecture
1578  * code may override this
1579  */
1580 void __weak arch_kexec_protect_crashkres(void)
1581 {}
1582 
1583 void __weak arch_kexec_unprotect_crashkres(void)
1584 {}
1585