xref: /openbmc/linux/kernel/kexec.c (revision f77f13e2)
1 /*
2  * kexec.c - kexec system call
3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mm.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <generated/utsrelease.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
30 #include <linux/pm.h>
31 #include <linux/cpu.h>
32 #include <linux/console.h>
33 #include <linux/vmalloc.h>
34 #include <linux/swap.h>
35 #include <linux/kmsg_dump.h>
36 
37 #include <asm/page.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
40 #include <asm/system.h>
41 #include <asm/sections.h>
42 
43 /* Per cpu memory for storing cpu states in case of system crash. */
44 note_buf_t __percpu *crash_notes;
45 
46 /* vmcoreinfo stuff */
47 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
48 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
49 size_t vmcoreinfo_size;
50 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
51 
52 /* Location of the reserved area for the crash kernel */
53 struct resource crashk_res = {
54 	.name  = "Crash kernel",
55 	.start = 0,
56 	.end   = 0,
57 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
58 };
59 
60 int kexec_should_crash(struct task_struct *p)
61 {
62 	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
63 		return 1;
64 	return 0;
65 }
66 
67 /*
68  * When kexec transitions to the new kernel there is a one-to-one
69  * mapping between physical and virtual addresses.  On processors
70  * where you can disable the MMU this is trivial, and easy.  For
71  * others it is still a simple predictable page table to setup.
72  *
73  * In that environment kexec copies the new kernel to its final
74  * resting place.  This means I can only support memory whose
75  * physical address can fit in an unsigned long.  In particular
76  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
77  * If the assembly stub has more restrictive requirements
78  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
79  * defined more restrictively in <asm/kexec.h>.
80  *
81  * The code for the transition from the current kernel to the
82  * the new kernel is placed in the control_code_buffer, whose size
83  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
84  * page of memory is necessary, but some architectures require more.
85  * Because this memory must be identity mapped in the transition from
86  * virtual to physical addresses it must live in the range
87  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
88  * modifiable.
89  *
90  * The assembly stub in the control code buffer is passed a linked list
91  * of descriptor pages detailing the source pages of the new kernel,
92  * and the destination addresses of those source pages.  As this data
93  * structure is not used in the context of the current OS, it must
94  * be self-contained.
95  *
96  * The code has been made to work with highmem pages and will use a
97  * destination page in its final resting place (if it happens
98  * to allocate it).  The end product of this is that most of the
99  * physical address space, and most of RAM can be used.
100  *
101  * Future directions include:
102  *  - allocating a page table with the control code buffer identity
103  *    mapped, to simplify machine_kexec and make kexec_on_panic more
104  *    reliable.
105  */
106 
107 /*
108  * KIMAGE_NO_DEST is an impossible destination address..., for
109  * allocating pages whose destination address we do not care about.
110  */
111 #define KIMAGE_NO_DEST (-1UL)
112 
113 static int kimage_is_destination_range(struct kimage *image,
114 				       unsigned long start, unsigned long end);
115 static struct page *kimage_alloc_page(struct kimage *image,
116 				       gfp_t gfp_mask,
117 				       unsigned long dest);
118 
119 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
120 	                    unsigned long nr_segments,
121                             struct kexec_segment __user *segments)
122 {
123 	size_t segment_bytes;
124 	struct kimage *image;
125 	unsigned long i;
126 	int result;
127 
128 	/* Allocate a controlling structure */
129 	result = -ENOMEM;
130 	image = kzalloc(sizeof(*image), GFP_KERNEL);
131 	if (!image)
132 		goto out;
133 
134 	image->head = 0;
135 	image->entry = &image->head;
136 	image->last_entry = &image->head;
137 	image->control_page = ~0; /* By default this does not apply */
138 	image->start = entry;
139 	image->type = KEXEC_TYPE_DEFAULT;
140 
141 	/* Initialize the list of control pages */
142 	INIT_LIST_HEAD(&image->control_pages);
143 
144 	/* Initialize the list of destination pages */
145 	INIT_LIST_HEAD(&image->dest_pages);
146 
147 	/* Initialize the list of unuseable pages */
148 	INIT_LIST_HEAD(&image->unuseable_pages);
149 
150 	/* Read in the segments */
151 	image->nr_segments = nr_segments;
152 	segment_bytes = nr_segments * sizeof(*segments);
153 	result = copy_from_user(image->segment, segments, segment_bytes);
154 	if (result)
155 		goto out;
156 
157 	/*
158 	 * Verify we have good destination addresses.  The caller is
159 	 * responsible for making certain we don't attempt to load
160 	 * the new image into invalid or reserved areas of RAM.  This
161 	 * just verifies it is an address we can use.
162 	 *
163 	 * Since the kernel does everything in page size chunks ensure
164 	 * the destination addreses are page aligned.  Too many
165 	 * special cases crop of when we don't do this.  The most
166 	 * insidious is getting overlapping destination addresses
167 	 * simply because addresses are changed to page size
168 	 * granularity.
169 	 */
170 	result = -EADDRNOTAVAIL;
171 	for (i = 0; i < nr_segments; i++) {
172 		unsigned long mstart, mend;
173 
174 		mstart = image->segment[i].mem;
175 		mend   = mstart + image->segment[i].memsz;
176 		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
177 			goto out;
178 		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
179 			goto out;
180 	}
181 
182 	/* Verify our destination addresses do not overlap.
183 	 * If we alloed overlapping destination addresses
184 	 * through very weird things can happen with no
185 	 * easy explanation as one segment stops on another.
186 	 */
187 	result = -EINVAL;
188 	for (i = 0; i < nr_segments; i++) {
189 		unsigned long mstart, mend;
190 		unsigned long j;
191 
192 		mstart = image->segment[i].mem;
193 		mend   = mstart + image->segment[i].memsz;
194 		for (j = 0; j < i; j++) {
195 			unsigned long pstart, pend;
196 			pstart = image->segment[j].mem;
197 			pend   = pstart + image->segment[j].memsz;
198 			/* Do the segments overlap ? */
199 			if ((mend > pstart) && (mstart < pend))
200 				goto out;
201 		}
202 	}
203 
204 	/* Ensure our buffer sizes are strictly less than
205 	 * our memory sizes.  This should always be the case,
206 	 * and it is easier to check up front than to be surprised
207 	 * later on.
208 	 */
209 	result = -EINVAL;
210 	for (i = 0; i < nr_segments; i++) {
211 		if (image->segment[i].bufsz > image->segment[i].memsz)
212 			goto out;
213 	}
214 
215 	result = 0;
216 out:
217 	if (result == 0)
218 		*rimage = image;
219 	else
220 		kfree(image);
221 
222 	return result;
223 
224 }
225 
226 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
227 				unsigned long nr_segments,
228 				struct kexec_segment __user *segments)
229 {
230 	int result;
231 	struct kimage *image;
232 
233 	/* Allocate and initialize a controlling structure */
234 	image = NULL;
235 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
236 	if (result)
237 		goto out;
238 
239 	*rimage = image;
240 
241 	/*
242 	 * Find a location for the control code buffer, and add it
243 	 * the vector of segments so that it's pages will also be
244 	 * counted as destination pages.
245 	 */
246 	result = -ENOMEM;
247 	image->control_code_page = kimage_alloc_control_pages(image,
248 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
249 	if (!image->control_code_page) {
250 		printk(KERN_ERR "Could not allocate control_code_buffer\n");
251 		goto out;
252 	}
253 
254 	image->swap_page = kimage_alloc_control_pages(image, 0);
255 	if (!image->swap_page) {
256 		printk(KERN_ERR "Could not allocate swap buffer\n");
257 		goto out;
258 	}
259 
260 	result = 0;
261  out:
262 	if (result == 0)
263 		*rimage = image;
264 	else
265 		kfree(image);
266 
267 	return result;
268 }
269 
270 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
271 				unsigned long nr_segments,
272 				struct kexec_segment __user *segments)
273 {
274 	int result;
275 	struct kimage *image;
276 	unsigned long i;
277 
278 	image = NULL;
279 	/* Verify we have a valid entry point */
280 	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
281 		result = -EADDRNOTAVAIL;
282 		goto out;
283 	}
284 
285 	/* Allocate and initialize a controlling structure */
286 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
287 	if (result)
288 		goto out;
289 
290 	/* Enable the special crash kernel control page
291 	 * allocation policy.
292 	 */
293 	image->control_page = crashk_res.start;
294 	image->type = KEXEC_TYPE_CRASH;
295 
296 	/*
297 	 * Verify we have good destination addresses.  Normally
298 	 * the caller is responsible for making certain we don't
299 	 * attempt to load the new image into invalid or reserved
300 	 * areas of RAM.  But crash kernels are preloaded into a
301 	 * reserved area of ram.  We must ensure the addresses
302 	 * are in the reserved area otherwise preloading the
303 	 * kernel could corrupt things.
304 	 */
305 	result = -EADDRNOTAVAIL;
306 	for (i = 0; i < nr_segments; i++) {
307 		unsigned long mstart, mend;
308 
309 		mstart = image->segment[i].mem;
310 		mend = mstart + image->segment[i].memsz - 1;
311 		/* Ensure we are within the crash kernel limits */
312 		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
313 			goto out;
314 	}
315 
316 	/*
317 	 * Find a location for the control code buffer, and add
318 	 * the vector of segments so that it's pages will also be
319 	 * counted as destination pages.
320 	 */
321 	result = -ENOMEM;
322 	image->control_code_page = kimage_alloc_control_pages(image,
323 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
324 	if (!image->control_code_page) {
325 		printk(KERN_ERR "Could not allocate control_code_buffer\n");
326 		goto out;
327 	}
328 
329 	result = 0;
330 out:
331 	if (result == 0)
332 		*rimage = image;
333 	else
334 		kfree(image);
335 
336 	return result;
337 }
338 
339 static int kimage_is_destination_range(struct kimage *image,
340 					unsigned long start,
341 					unsigned long end)
342 {
343 	unsigned long i;
344 
345 	for (i = 0; i < image->nr_segments; i++) {
346 		unsigned long mstart, mend;
347 
348 		mstart = image->segment[i].mem;
349 		mend = mstart + image->segment[i].memsz;
350 		if ((end > mstart) && (start < mend))
351 			return 1;
352 	}
353 
354 	return 0;
355 }
356 
357 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
358 {
359 	struct page *pages;
360 
361 	pages = alloc_pages(gfp_mask, order);
362 	if (pages) {
363 		unsigned int count, i;
364 		pages->mapping = NULL;
365 		set_page_private(pages, order);
366 		count = 1 << order;
367 		for (i = 0; i < count; i++)
368 			SetPageReserved(pages + i);
369 	}
370 
371 	return pages;
372 }
373 
374 static void kimage_free_pages(struct page *page)
375 {
376 	unsigned int order, count, i;
377 
378 	order = page_private(page);
379 	count = 1 << order;
380 	for (i = 0; i < count; i++)
381 		ClearPageReserved(page + i);
382 	__free_pages(page, order);
383 }
384 
385 static void kimage_free_page_list(struct list_head *list)
386 {
387 	struct list_head *pos, *next;
388 
389 	list_for_each_safe(pos, next, list) {
390 		struct page *page;
391 
392 		page = list_entry(pos, struct page, lru);
393 		list_del(&page->lru);
394 		kimage_free_pages(page);
395 	}
396 }
397 
398 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
399 							unsigned int order)
400 {
401 	/* Control pages are special, they are the intermediaries
402 	 * that are needed while we copy the rest of the pages
403 	 * to their final resting place.  As such they must
404 	 * not conflict with either the destination addresses
405 	 * or memory the kernel is already using.
406 	 *
407 	 * The only case where we really need more than one of
408 	 * these are for architectures where we cannot disable
409 	 * the MMU and must instead generate an identity mapped
410 	 * page table for all of the memory.
411 	 *
412 	 * At worst this runs in O(N) of the image size.
413 	 */
414 	struct list_head extra_pages;
415 	struct page *pages;
416 	unsigned int count;
417 
418 	count = 1 << order;
419 	INIT_LIST_HEAD(&extra_pages);
420 
421 	/* Loop while I can allocate a page and the page allocated
422 	 * is a destination page.
423 	 */
424 	do {
425 		unsigned long pfn, epfn, addr, eaddr;
426 
427 		pages = kimage_alloc_pages(GFP_KERNEL, order);
428 		if (!pages)
429 			break;
430 		pfn   = page_to_pfn(pages);
431 		epfn  = pfn + count;
432 		addr  = pfn << PAGE_SHIFT;
433 		eaddr = epfn << PAGE_SHIFT;
434 		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
435 			      kimage_is_destination_range(image, addr, eaddr)) {
436 			list_add(&pages->lru, &extra_pages);
437 			pages = NULL;
438 		}
439 	} while (!pages);
440 
441 	if (pages) {
442 		/* Remember the allocated page... */
443 		list_add(&pages->lru, &image->control_pages);
444 
445 		/* Because the page is already in it's destination
446 		 * location we will never allocate another page at
447 		 * that address.  Therefore kimage_alloc_pages
448 		 * will not return it (again) and we don't need
449 		 * to give it an entry in image->segment[].
450 		 */
451 	}
452 	/* Deal with the destination pages I have inadvertently allocated.
453 	 *
454 	 * Ideally I would convert multi-page allocations into single
455 	 * page allocations, and add everyting to image->dest_pages.
456 	 *
457 	 * For now it is simpler to just free the pages.
458 	 */
459 	kimage_free_page_list(&extra_pages);
460 
461 	return pages;
462 }
463 
464 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
465 						      unsigned int order)
466 {
467 	/* Control pages are special, they are the intermediaries
468 	 * that are needed while we copy the rest of the pages
469 	 * to their final resting place.  As such they must
470 	 * not conflict with either the destination addresses
471 	 * or memory the kernel is already using.
472 	 *
473 	 * Control pages are also the only pags we must allocate
474 	 * when loading a crash kernel.  All of the other pages
475 	 * are specified by the segments and we just memcpy
476 	 * into them directly.
477 	 *
478 	 * The only case where we really need more than one of
479 	 * these are for architectures where we cannot disable
480 	 * the MMU and must instead generate an identity mapped
481 	 * page table for all of the memory.
482 	 *
483 	 * Given the low demand this implements a very simple
484 	 * allocator that finds the first hole of the appropriate
485 	 * size in the reserved memory region, and allocates all
486 	 * of the memory up to and including the hole.
487 	 */
488 	unsigned long hole_start, hole_end, size;
489 	struct page *pages;
490 
491 	pages = NULL;
492 	size = (1 << order) << PAGE_SHIFT;
493 	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
494 	hole_end   = hole_start + size - 1;
495 	while (hole_end <= crashk_res.end) {
496 		unsigned long i;
497 
498 		if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
499 			break;
500 		if (hole_end > crashk_res.end)
501 			break;
502 		/* See if I overlap any of the segments */
503 		for (i = 0; i < image->nr_segments; i++) {
504 			unsigned long mstart, mend;
505 
506 			mstart = image->segment[i].mem;
507 			mend   = mstart + image->segment[i].memsz - 1;
508 			if ((hole_end >= mstart) && (hole_start <= mend)) {
509 				/* Advance the hole to the end of the segment */
510 				hole_start = (mend + (size - 1)) & ~(size - 1);
511 				hole_end   = hole_start + size - 1;
512 				break;
513 			}
514 		}
515 		/* If I don't overlap any segments I have found my hole! */
516 		if (i == image->nr_segments) {
517 			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
518 			break;
519 		}
520 	}
521 	if (pages)
522 		image->control_page = hole_end;
523 
524 	return pages;
525 }
526 
527 
528 struct page *kimage_alloc_control_pages(struct kimage *image,
529 					 unsigned int order)
530 {
531 	struct page *pages = NULL;
532 
533 	switch (image->type) {
534 	case KEXEC_TYPE_DEFAULT:
535 		pages = kimage_alloc_normal_control_pages(image, order);
536 		break;
537 	case KEXEC_TYPE_CRASH:
538 		pages = kimage_alloc_crash_control_pages(image, order);
539 		break;
540 	}
541 
542 	return pages;
543 }
544 
545 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
546 {
547 	if (*image->entry != 0)
548 		image->entry++;
549 
550 	if (image->entry == image->last_entry) {
551 		kimage_entry_t *ind_page;
552 		struct page *page;
553 
554 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
555 		if (!page)
556 			return -ENOMEM;
557 
558 		ind_page = page_address(page);
559 		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
560 		image->entry = ind_page;
561 		image->last_entry = ind_page +
562 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
563 	}
564 	*image->entry = entry;
565 	image->entry++;
566 	*image->entry = 0;
567 
568 	return 0;
569 }
570 
571 static int kimage_set_destination(struct kimage *image,
572 				   unsigned long destination)
573 {
574 	int result;
575 
576 	destination &= PAGE_MASK;
577 	result = kimage_add_entry(image, destination | IND_DESTINATION);
578 	if (result == 0)
579 		image->destination = destination;
580 
581 	return result;
582 }
583 
584 
585 static int kimage_add_page(struct kimage *image, unsigned long page)
586 {
587 	int result;
588 
589 	page &= PAGE_MASK;
590 	result = kimage_add_entry(image, page | IND_SOURCE);
591 	if (result == 0)
592 		image->destination += PAGE_SIZE;
593 
594 	return result;
595 }
596 
597 
598 static void kimage_free_extra_pages(struct kimage *image)
599 {
600 	/* Walk through and free any extra destination pages I may have */
601 	kimage_free_page_list(&image->dest_pages);
602 
603 	/* Walk through and free any unuseable pages I have cached */
604 	kimage_free_page_list(&image->unuseable_pages);
605 
606 }
607 static void kimage_terminate(struct kimage *image)
608 {
609 	if (*image->entry != 0)
610 		image->entry++;
611 
612 	*image->entry = IND_DONE;
613 }
614 
615 #define for_each_kimage_entry(image, ptr, entry) \
616 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
617 		ptr = (entry & IND_INDIRECTION)? \
618 			phys_to_virt((entry & PAGE_MASK)): ptr +1)
619 
620 static void kimage_free_entry(kimage_entry_t entry)
621 {
622 	struct page *page;
623 
624 	page = pfn_to_page(entry >> PAGE_SHIFT);
625 	kimage_free_pages(page);
626 }
627 
628 static void kimage_free(struct kimage *image)
629 {
630 	kimage_entry_t *ptr, entry;
631 	kimage_entry_t ind = 0;
632 
633 	if (!image)
634 		return;
635 
636 	kimage_free_extra_pages(image);
637 	for_each_kimage_entry(image, ptr, entry) {
638 		if (entry & IND_INDIRECTION) {
639 			/* Free the previous indirection page */
640 			if (ind & IND_INDIRECTION)
641 				kimage_free_entry(ind);
642 			/* Save this indirection page until we are
643 			 * done with it.
644 			 */
645 			ind = entry;
646 		}
647 		else if (entry & IND_SOURCE)
648 			kimage_free_entry(entry);
649 	}
650 	/* Free the final indirection page */
651 	if (ind & IND_INDIRECTION)
652 		kimage_free_entry(ind);
653 
654 	/* Handle any machine specific cleanup */
655 	machine_kexec_cleanup(image);
656 
657 	/* Free the kexec control pages... */
658 	kimage_free_page_list(&image->control_pages);
659 	kfree(image);
660 }
661 
662 static kimage_entry_t *kimage_dst_used(struct kimage *image,
663 					unsigned long page)
664 {
665 	kimage_entry_t *ptr, entry;
666 	unsigned long destination = 0;
667 
668 	for_each_kimage_entry(image, ptr, entry) {
669 		if (entry & IND_DESTINATION)
670 			destination = entry & PAGE_MASK;
671 		else if (entry & IND_SOURCE) {
672 			if (page == destination)
673 				return ptr;
674 			destination += PAGE_SIZE;
675 		}
676 	}
677 
678 	return NULL;
679 }
680 
681 static struct page *kimage_alloc_page(struct kimage *image,
682 					gfp_t gfp_mask,
683 					unsigned long destination)
684 {
685 	/*
686 	 * Here we implement safeguards to ensure that a source page
687 	 * is not copied to its destination page before the data on
688 	 * the destination page is no longer useful.
689 	 *
690 	 * To do this we maintain the invariant that a source page is
691 	 * either its own destination page, or it is not a
692 	 * destination page at all.
693 	 *
694 	 * That is slightly stronger than required, but the proof
695 	 * that no problems will not occur is trivial, and the
696 	 * implementation is simply to verify.
697 	 *
698 	 * When allocating all pages normally this algorithm will run
699 	 * in O(N) time, but in the worst case it will run in O(N^2)
700 	 * time.   If the runtime is a problem the data structures can
701 	 * be fixed.
702 	 */
703 	struct page *page;
704 	unsigned long addr;
705 
706 	/*
707 	 * Walk through the list of destination pages, and see if I
708 	 * have a match.
709 	 */
710 	list_for_each_entry(page, &image->dest_pages, lru) {
711 		addr = page_to_pfn(page) << PAGE_SHIFT;
712 		if (addr == destination) {
713 			list_del(&page->lru);
714 			return page;
715 		}
716 	}
717 	page = NULL;
718 	while (1) {
719 		kimage_entry_t *old;
720 
721 		/* Allocate a page, if we run out of memory give up */
722 		page = kimage_alloc_pages(gfp_mask, 0);
723 		if (!page)
724 			return NULL;
725 		/* If the page cannot be used file it away */
726 		if (page_to_pfn(page) >
727 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
728 			list_add(&page->lru, &image->unuseable_pages);
729 			continue;
730 		}
731 		addr = page_to_pfn(page) << PAGE_SHIFT;
732 
733 		/* If it is the destination page we want use it */
734 		if (addr == destination)
735 			break;
736 
737 		/* If the page is not a destination page use it */
738 		if (!kimage_is_destination_range(image, addr,
739 						  addr + PAGE_SIZE))
740 			break;
741 
742 		/*
743 		 * I know that the page is someones destination page.
744 		 * See if there is already a source page for this
745 		 * destination page.  And if so swap the source pages.
746 		 */
747 		old = kimage_dst_used(image, addr);
748 		if (old) {
749 			/* If so move it */
750 			unsigned long old_addr;
751 			struct page *old_page;
752 
753 			old_addr = *old & PAGE_MASK;
754 			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
755 			copy_highpage(page, old_page);
756 			*old = addr | (*old & ~PAGE_MASK);
757 
758 			/* The old page I have found cannot be a
759 			 * destination page, so return it if it's
760 			 * gfp_flags honor the ones passed in.
761 			 */
762 			if (!(gfp_mask & __GFP_HIGHMEM) &&
763 			    PageHighMem(old_page)) {
764 				kimage_free_pages(old_page);
765 				continue;
766 			}
767 			addr = old_addr;
768 			page = old_page;
769 			break;
770 		}
771 		else {
772 			/* Place the page on the destination list I
773 			 * will use it later.
774 			 */
775 			list_add(&page->lru, &image->dest_pages);
776 		}
777 	}
778 
779 	return page;
780 }
781 
782 static int kimage_load_normal_segment(struct kimage *image,
783 					 struct kexec_segment *segment)
784 {
785 	unsigned long maddr;
786 	unsigned long ubytes, mbytes;
787 	int result;
788 	unsigned char __user *buf;
789 
790 	result = 0;
791 	buf = segment->buf;
792 	ubytes = segment->bufsz;
793 	mbytes = segment->memsz;
794 	maddr = segment->mem;
795 
796 	result = kimage_set_destination(image, maddr);
797 	if (result < 0)
798 		goto out;
799 
800 	while (mbytes) {
801 		struct page *page;
802 		char *ptr;
803 		size_t uchunk, mchunk;
804 
805 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
806 		if (!page) {
807 			result  = -ENOMEM;
808 			goto out;
809 		}
810 		result = kimage_add_page(image, page_to_pfn(page)
811 								<< PAGE_SHIFT);
812 		if (result < 0)
813 			goto out;
814 
815 		ptr = kmap(page);
816 		/* Start with a clear page */
817 		memset(ptr, 0, PAGE_SIZE);
818 		ptr += maddr & ~PAGE_MASK;
819 		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
820 		if (mchunk > mbytes)
821 			mchunk = mbytes;
822 
823 		uchunk = mchunk;
824 		if (uchunk > ubytes)
825 			uchunk = ubytes;
826 
827 		result = copy_from_user(ptr, buf, uchunk);
828 		kunmap(page);
829 		if (result) {
830 			result = (result < 0) ? result : -EIO;
831 			goto out;
832 		}
833 		ubytes -= uchunk;
834 		maddr  += mchunk;
835 		buf    += mchunk;
836 		mbytes -= mchunk;
837 	}
838 out:
839 	return result;
840 }
841 
842 static int kimage_load_crash_segment(struct kimage *image,
843 					struct kexec_segment *segment)
844 {
845 	/* For crash dumps kernels we simply copy the data from
846 	 * user space to it's destination.
847 	 * We do things a page at a time for the sake of kmap.
848 	 */
849 	unsigned long maddr;
850 	unsigned long ubytes, mbytes;
851 	int result;
852 	unsigned char __user *buf;
853 
854 	result = 0;
855 	buf = segment->buf;
856 	ubytes = segment->bufsz;
857 	mbytes = segment->memsz;
858 	maddr = segment->mem;
859 	while (mbytes) {
860 		struct page *page;
861 		char *ptr;
862 		size_t uchunk, mchunk;
863 
864 		page = pfn_to_page(maddr >> PAGE_SHIFT);
865 		if (!page) {
866 			result  = -ENOMEM;
867 			goto out;
868 		}
869 		ptr = kmap(page);
870 		ptr += maddr & ~PAGE_MASK;
871 		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
872 		if (mchunk > mbytes)
873 			mchunk = mbytes;
874 
875 		uchunk = mchunk;
876 		if (uchunk > ubytes) {
877 			uchunk = ubytes;
878 			/* Zero the trailing part of the page */
879 			memset(ptr + uchunk, 0, mchunk - uchunk);
880 		}
881 		result = copy_from_user(ptr, buf, uchunk);
882 		kexec_flush_icache_page(page);
883 		kunmap(page);
884 		if (result) {
885 			result = (result < 0) ? result : -EIO;
886 			goto out;
887 		}
888 		ubytes -= uchunk;
889 		maddr  += mchunk;
890 		buf    += mchunk;
891 		mbytes -= mchunk;
892 	}
893 out:
894 	return result;
895 }
896 
897 static int kimage_load_segment(struct kimage *image,
898 				struct kexec_segment *segment)
899 {
900 	int result = -ENOMEM;
901 
902 	switch (image->type) {
903 	case KEXEC_TYPE_DEFAULT:
904 		result = kimage_load_normal_segment(image, segment);
905 		break;
906 	case KEXEC_TYPE_CRASH:
907 		result = kimage_load_crash_segment(image, segment);
908 		break;
909 	}
910 
911 	return result;
912 }
913 
914 /*
915  * Exec Kernel system call: for obvious reasons only root may call it.
916  *
917  * This call breaks up into three pieces.
918  * - A generic part which loads the new kernel from the current
919  *   address space, and very carefully places the data in the
920  *   allocated pages.
921  *
922  * - A generic part that interacts with the kernel and tells all of
923  *   the devices to shut down.  Preventing on-going dmas, and placing
924  *   the devices in a consistent state so a later kernel can
925  *   reinitialize them.
926  *
927  * - A machine specific part that includes the syscall number
928  *   and the copies the image to it's final destination.  And
929  *   jumps into the image at entry.
930  *
931  * kexec does not sync, or unmount filesystems so if you need
932  * that to happen you need to do that yourself.
933  */
934 struct kimage *kexec_image;
935 struct kimage *kexec_crash_image;
936 
937 static DEFINE_MUTEX(kexec_mutex);
938 
939 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
940 		struct kexec_segment __user *, segments, unsigned long, flags)
941 {
942 	struct kimage **dest_image, *image;
943 	int result;
944 
945 	/* We only trust the superuser with rebooting the system. */
946 	if (!capable(CAP_SYS_BOOT))
947 		return -EPERM;
948 
949 	/*
950 	 * Verify we have a legal set of flags
951 	 * This leaves us room for future extensions.
952 	 */
953 	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
954 		return -EINVAL;
955 
956 	/* Verify we are on the appropriate architecture */
957 	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
958 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
959 		return -EINVAL;
960 
961 	/* Put an artificial cap on the number
962 	 * of segments passed to kexec_load.
963 	 */
964 	if (nr_segments > KEXEC_SEGMENT_MAX)
965 		return -EINVAL;
966 
967 	image = NULL;
968 	result = 0;
969 
970 	/* Because we write directly to the reserved memory
971 	 * region when loading crash kernels we need a mutex here to
972 	 * prevent multiple crash  kernels from attempting to load
973 	 * simultaneously, and to prevent a crash kernel from loading
974 	 * over the top of a in use crash kernel.
975 	 *
976 	 * KISS: always take the mutex.
977 	 */
978 	if (!mutex_trylock(&kexec_mutex))
979 		return -EBUSY;
980 
981 	dest_image = &kexec_image;
982 	if (flags & KEXEC_ON_CRASH)
983 		dest_image = &kexec_crash_image;
984 	if (nr_segments > 0) {
985 		unsigned long i;
986 
987 		/* Loading another kernel to reboot into */
988 		if ((flags & KEXEC_ON_CRASH) == 0)
989 			result = kimage_normal_alloc(&image, entry,
990 							nr_segments, segments);
991 		/* Loading another kernel to switch to if this one crashes */
992 		else if (flags & KEXEC_ON_CRASH) {
993 			/* Free any current crash dump kernel before
994 			 * we corrupt it.
995 			 */
996 			kimage_free(xchg(&kexec_crash_image, NULL));
997 			result = kimage_crash_alloc(&image, entry,
998 						     nr_segments, segments);
999 		}
1000 		if (result)
1001 			goto out;
1002 
1003 		if (flags & KEXEC_PRESERVE_CONTEXT)
1004 			image->preserve_context = 1;
1005 		result = machine_kexec_prepare(image);
1006 		if (result)
1007 			goto out;
1008 
1009 		for (i = 0; i < nr_segments; i++) {
1010 			result = kimage_load_segment(image, &image->segment[i]);
1011 			if (result)
1012 				goto out;
1013 		}
1014 		kimage_terminate(image);
1015 	}
1016 	/* Install the new kernel, and  Uninstall the old */
1017 	image = xchg(dest_image, image);
1018 
1019 out:
1020 	mutex_unlock(&kexec_mutex);
1021 	kimage_free(image);
1022 
1023 	return result;
1024 }
1025 
1026 #ifdef CONFIG_COMPAT
1027 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1028 				unsigned long nr_segments,
1029 				struct compat_kexec_segment __user *segments,
1030 				unsigned long flags)
1031 {
1032 	struct compat_kexec_segment in;
1033 	struct kexec_segment out, __user *ksegments;
1034 	unsigned long i, result;
1035 
1036 	/* Don't allow clients that don't understand the native
1037 	 * architecture to do anything.
1038 	 */
1039 	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1040 		return -EINVAL;
1041 
1042 	if (nr_segments > KEXEC_SEGMENT_MAX)
1043 		return -EINVAL;
1044 
1045 	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1046 	for (i=0; i < nr_segments; i++) {
1047 		result = copy_from_user(&in, &segments[i], sizeof(in));
1048 		if (result)
1049 			return -EFAULT;
1050 
1051 		out.buf   = compat_ptr(in.buf);
1052 		out.bufsz = in.bufsz;
1053 		out.mem   = in.mem;
1054 		out.memsz = in.memsz;
1055 
1056 		result = copy_to_user(&ksegments[i], &out, sizeof(out));
1057 		if (result)
1058 			return -EFAULT;
1059 	}
1060 
1061 	return sys_kexec_load(entry, nr_segments, ksegments, flags);
1062 }
1063 #endif
1064 
1065 void crash_kexec(struct pt_regs *regs)
1066 {
1067 	/* Take the kexec_mutex here to prevent sys_kexec_load
1068 	 * running on one cpu from replacing the crash kernel
1069 	 * we are using after a panic on a different cpu.
1070 	 *
1071 	 * If the crash kernel was not located in a fixed area
1072 	 * of memory the xchg(&kexec_crash_image) would be
1073 	 * sufficient.  But since I reuse the memory...
1074 	 */
1075 	if (mutex_trylock(&kexec_mutex)) {
1076 		if (kexec_crash_image) {
1077 			struct pt_regs fixed_regs;
1078 
1079 			kmsg_dump(KMSG_DUMP_KEXEC);
1080 
1081 			crash_setup_regs(&fixed_regs, regs);
1082 			crash_save_vmcoreinfo();
1083 			machine_crash_shutdown(&fixed_regs);
1084 			machine_kexec(kexec_crash_image);
1085 		}
1086 		mutex_unlock(&kexec_mutex);
1087 	}
1088 }
1089 
1090 size_t crash_get_memory_size(void)
1091 {
1092 	size_t size;
1093 	mutex_lock(&kexec_mutex);
1094 	size = crashk_res.end - crashk_res.start + 1;
1095 	mutex_unlock(&kexec_mutex);
1096 	return size;
1097 }
1098 
1099 static void free_reserved_phys_range(unsigned long begin, unsigned long end)
1100 {
1101 	unsigned long addr;
1102 
1103 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
1104 		ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1105 		init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1106 		free_page((unsigned long)__va(addr));
1107 		totalram_pages++;
1108 	}
1109 }
1110 
1111 int crash_shrink_memory(unsigned long new_size)
1112 {
1113 	int ret = 0;
1114 	unsigned long start, end;
1115 
1116 	mutex_lock(&kexec_mutex);
1117 
1118 	if (kexec_crash_image) {
1119 		ret = -ENOENT;
1120 		goto unlock;
1121 	}
1122 	start = crashk_res.start;
1123 	end = crashk_res.end;
1124 
1125 	if (new_size >= end - start + 1) {
1126 		ret = -EINVAL;
1127 		if (new_size == end - start + 1)
1128 			ret = 0;
1129 		goto unlock;
1130 	}
1131 
1132 	start = roundup(start, PAGE_SIZE);
1133 	end = roundup(start + new_size, PAGE_SIZE);
1134 
1135 	free_reserved_phys_range(end, crashk_res.end);
1136 
1137 	if (start == end) {
1138 		crashk_res.end = end;
1139 		release_resource(&crashk_res);
1140 	} else
1141 		crashk_res.end = end - 1;
1142 
1143 unlock:
1144 	mutex_unlock(&kexec_mutex);
1145 	return ret;
1146 }
1147 
1148 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1149 			    size_t data_len)
1150 {
1151 	struct elf_note note;
1152 
1153 	note.n_namesz = strlen(name) + 1;
1154 	note.n_descsz = data_len;
1155 	note.n_type   = type;
1156 	memcpy(buf, &note, sizeof(note));
1157 	buf += (sizeof(note) + 3)/4;
1158 	memcpy(buf, name, note.n_namesz);
1159 	buf += (note.n_namesz + 3)/4;
1160 	memcpy(buf, data, note.n_descsz);
1161 	buf += (note.n_descsz + 3)/4;
1162 
1163 	return buf;
1164 }
1165 
1166 static void final_note(u32 *buf)
1167 {
1168 	struct elf_note note;
1169 
1170 	note.n_namesz = 0;
1171 	note.n_descsz = 0;
1172 	note.n_type   = 0;
1173 	memcpy(buf, &note, sizeof(note));
1174 }
1175 
1176 void crash_save_cpu(struct pt_regs *regs, int cpu)
1177 {
1178 	struct elf_prstatus prstatus;
1179 	u32 *buf;
1180 
1181 	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1182 		return;
1183 
1184 	/* Using ELF notes here is opportunistic.
1185 	 * I need a well defined structure format
1186 	 * for the data I pass, and I need tags
1187 	 * on the data to indicate what information I have
1188 	 * squirrelled away.  ELF notes happen to provide
1189 	 * all of that, so there is no need to invent something new.
1190 	 */
1191 	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1192 	if (!buf)
1193 		return;
1194 	memset(&prstatus, 0, sizeof(prstatus));
1195 	prstatus.pr_pid = current->pid;
1196 	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1197 	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1198 		      	      &prstatus, sizeof(prstatus));
1199 	final_note(buf);
1200 }
1201 
1202 static int __init crash_notes_memory_init(void)
1203 {
1204 	/* Allocate memory for saving cpu registers. */
1205 	crash_notes = alloc_percpu(note_buf_t);
1206 	if (!crash_notes) {
1207 		printk("Kexec: Memory allocation for saving cpu register"
1208 		" states failed\n");
1209 		return -ENOMEM;
1210 	}
1211 	return 0;
1212 }
1213 module_init(crash_notes_memory_init)
1214 
1215 
1216 /*
1217  * parsing the "crashkernel" commandline
1218  *
1219  * this code is intended to be called from architecture specific code
1220  */
1221 
1222 
1223 /*
1224  * This function parses command lines in the format
1225  *
1226  *   crashkernel=ramsize-range:size[,...][@offset]
1227  *
1228  * The function returns 0 on success and -EINVAL on failure.
1229  */
1230 static int __init parse_crashkernel_mem(char 			*cmdline,
1231 					unsigned long long	system_ram,
1232 					unsigned long long	*crash_size,
1233 					unsigned long long	*crash_base)
1234 {
1235 	char *cur = cmdline, *tmp;
1236 
1237 	/* for each entry of the comma-separated list */
1238 	do {
1239 		unsigned long long start, end = ULLONG_MAX, size;
1240 
1241 		/* get the start of the range */
1242 		start = memparse(cur, &tmp);
1243 		if (cur == tmp) {
1244 			pr_warning("crashkernel: Memory value expected\n");
1245 			return -EINVAL;
1246 		}
1247 		cur = tmp;
1248 		if (*cur != '-') {
1249 			pr_warning("crashkernel: '-' expected\n");
1250 			return -EINVAL;
1251 		}
1252 		cur++;
1253 
1254 		/* if no ':' is here, than we read the end */
1255 		if (*cur != ':') {
1256 			end = memparse(cur, &tmp);
1257 			if (cur == tmp) {
1258 				pr_warning("crashkernel: Memory "
1259 						"value expected\n");
1260 				return -EINVAL;
1261 			}
1262 			cur = tmp;
1263 			if (end <= start) {
1264 				pr_warning("crashkernel: end <= start\n");
1265 				return -EINVAL;
1266 			}
1267 		}
1268 
1269 		if (*cur != ':') {
1270 			pr_warning("crashkernel: ':' expected\n");
1271 			return -EINVAL;
1272 		}
1273 		cur++;
1274 
1275 		size = memparse(cur, &tmp);
1276 		if (cur == tmp) {
1277 			pr_warning("Memory value expected\n");
1278 			return -EINVAL;
1279 		}
1280 		cur = tmp;
1281 		if (size >= system_ram) {
1282 			pr_warning("crashkernel: invalid size\n");
1283 			return -EINVAL;
1284 		}
1285 
1286 		/* match ? */
1287 		if (system_ram >= start && system_ram < end) {
1288 			*crash_size = size;
1289 			break;
1290 		}
1291 	} while (*cur++ == ',');
1292 
1293 	if (*crash_size > 0) {
1294 		while (*cur && *cur != ' ' && *cur != '@')
1295 			cur++;
1296 		if (*cur == '@') {
1297 			cur++;
1298 			*crash_base = memparse(cur, &tmp);
1299 			if (cur == tmp) {
1300 				pr_warning("Memory value expected "
1301 						"after '@'\n");
1302 				return -EINVAL;
1303 			}
1304 		}
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 /*
1311  * That function parses "simple" (old) crashkernel command lines like
1312  *
1313  * 	crashkernel=size[@offset]
1314  *
1315  * It returns 0 on success and -EINVAL on failure.
1316  */
1317 static int __init parse_crashkernel_simple(char 		*cmdline,
1318 					   unsigned long long 	*crash_size,
1319 					   unsigned long long 	*crash_base)
1320 {
1321 	char *cur = cmdline;
1322 
1323 	*crash_size = memparse(cmdline, &cur);
1324 	if (cmdline == cur) {
1325 		pr_warning("crashkernel: memory value expected\n");
1326 		return -EINVAL;
1327 	}
1328 
1329 	if (*cur == '@')
1330 		*crash_base = memparse(cur+1, &cur);
1331 
1332 	return 0;
1333 }
1334 
1335 /*
1336  * That function is the entry point for command line parsing and should be
1337  * called from the arch-specific code.
1338  */
1339 int __init parse_crashkernel(char 		 *cmdline,
1340 			     unsigned long long system_ram,
1341 			     unsigned long long *crash_size,
1342 			     unsigned long long *crash_base)
1343 {
1344 	char 	*p = cmdline, *ck_cmdline = NULL;
1345 	char	*first_colon, *first_space;
1346 
1347 	BUG_ON(!crash_size || !crash_base);
1348 	*crash_size = 0;
1349 	*crash_base = 0;
1350 
1351 	/* find crashkernel and use the last one if there are more */
1352 	p = strstr(p, "crashkernel=");
1353 	while (p) {
1354 		ck_cmdline = p;
1355 		p = strstr(p+1, "crashkernel=");
1356 	}
1357 
1358 	if (!ck_cmdline)
1359 		return -EINVAL;
1360 
1361 	ck_cmdline += 12; /* strlen("crashkernel=") */
1362 
1363 	/*
1364 	 * if the commandline contains a ':', then that's the extended
1365 	 * syntax -- if not, it must be the classic syntax
1366 	 */
1367 	first_colon = strchr(ck_cmdline, ':');
1368 	first_space = strchr(ck_cmdline, ' ');
1369 	if (first_colon && (!first_space || first_colon < first_space))
1370 		return parse_crashkernel_mem(ck_cmdline, system_ram,
1371 				crash_size, crash_base);
1372 	else
1373 		return parse_crashkernel_simple(ck_cmdline, crash_size,
1374 				crash_base);
1375 
1376 	return 0;
1377 }
1378 
1379 
1380 
1381 void crash_save_vmcoreinfo(void)
1382 {
1383 	u32 *buf;
1384 
1385 	if (!vmcoreinfo_size)
1386 		return;
1387 
1388 	vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1389 
1390 	buf = (u32 *)vmcoreinfo_note;
1391 
1392 	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1393 			      vmcoreinfo_size);
1394 
1395 	final_note(buf);
1396 }
1397 
1398 void vmcoreinfo_append_str(const char *fmt, ...)
1399 {
1400 	va_list args;
1401 	char buf[0x50];
1402 	int r;
1403 
1404 	va_start(args, fmt);
1405 	r = vsnprintf(buf, sizeof(buf), fmt, args);
1406 	va_end(args);
1407 
1408 	if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1409 		r = vmcoreinfo_max_size - vmcoreinfo_size;
1410 
1411 	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1412 
1413 	vmcoreinfo_size += r;
1414 }
1415 
1416 /*
1417  * provide an empty default implementation here -- architecture
1418  * code may override this
1419  */
1420 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1421 {}
1422 
1423 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1424 {
1425 	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1426 }
1427 
1428 static int __init crash_save_vmcoreinfo_init(void)
1429 {
1430 	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1431 	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1432 
1433 	VMCOREINFO_SYMBOL(init_uts_ns);
1434 	VMCOREINFO_SYMBOL(node_online_map);
1435 	VMCOREINFO_SYMBOL(swapper_pg_dir);
1436 	VMCOREINFO_SYMBOL(_stext);
1437 	VMCOREINFO_SYMBOL(vmlist);
1438 
1439 #ifndef CONFIG_NEED_MULTIPLE_NODES
1440 	VMCOREINFO_SYMBOL(mem_map);
1441 	VMCOREINFO_SYMBOL(contig_page_data);
1442 #endif
1443 #ifdef CONFIG_SPARSEMEM
1444 	VMCOREINFO_SYMBOL(mem_section);
1445 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1446 	VMCOREINFO_STRUCT_SIZE(mem_section);
1447 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1448 #endif
1449 	VMCOREINFO_STRUCT_SIZE(page);
1450 	VMCOREINFO_STRUCT_SIZE(pglist_data);
1451 	VMCOREINFO_STRUCT_SIZE(zone);
1452 	VMCOREINFO_STRUCT_SIZE(free_area);
1453 	VMCOREINFO_STRUCT_SIZE(list_head);
1454 	VMCOREINFO_SIZE(nodemask_t);
1455 	VMCOREINFO_OFFSET(page, flags);
1456 	VMCOREINFO_OFFSET(page, _count);
1457 	VMCOREINFO_OFFSET(page, mapping);
1458 	VMCOREINFO_OFFSET(page, lru);
1459 	VMCOREINFO_OFFSET(pglist_data, node_zones);
1460 	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1461 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1462 	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1463 #endif
1464 	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1465 	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1466 	VMCOREINFO_OFFSET(pglist_data, node_id);
1467 	VMCOREINFO_OFFSET(zone, free_area);
1468 	VMCOREINFO_OFFSET(zone, vm_stat);
1469 	VMCOREINFO_OFFSET(zone, spanned_pages);
1470 	VMCOREINFO_OFFSET(free_area, free_list);
1471 	VMCOREINFO_OFFSET(list_head, next);
1472 	VMCOREINFO_OFFSET(list_head, prev);
1473 	VMCOREINFO_OFFSET(vm_struct, addr);
1474 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1475 	log_buf_kexec_setup();
1476 	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1477 	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1478 	VMCOREINFO_NUMBER(PG_lru);
1479 	VMCOREINFO_NUMBER(PG_private);
1480 	VMCOREINFO_NUMBER(PG_swapcache);
1481 
1482 	arch_crash_save_vmcoreinfo();
1483 
1484 	return 0;
1485 }
1486 
1487 module_init(crash_save_vmcoreinfo_init)
1488 
1489 /*
1490  * Move into place and start executing a preloaded standalone
1491  * executable.  If nothing was preloaded return an error.
1492  */
1493 int kernel_kexec(void)
1494 {
1495 	int error = 0;
1496 
1497 	if (!mutex_trylock(&kexec_mutex))
1498 		return -EBUSY;
1499 	if (!kexec_image) {
1500 		error = -EINVAL;
1501 		goto Unlock;
1502 	}
1503 
1504 #ifdef CONFIG_KEXEC_JUMP
1505 	if (kexec_image->preserve_context) {
1506 		mutex_lock(&pm_mutex);
1507 		pm_prepare_console();
1508 		error = freeze_processes();
1509 		if (error) {
1510 			error = -EBUSY;
1511 			goto Restore_console;
1512 		}
1513 		suspend_console();
1514 		error = dpm_suspend_start(PMSG_FREEZE);
1515 		if (error)
1516 			goto Resume_console;
1517 		/* At this point, dpm_suspend_start() has been called,
1518 		 * but *not* dpm_suspend_noirq(). We *must* call
1519 		 * dpm_suspend_noirq() now.  Otherwise, drivers for
1520 		 * some devices (e.g. interrupt controllers) become
1521 		 * desynchronized with the actual state of the
1522 		 * hardware at resume time, and evil weirdness ensues.
1523 		 */
1524 		error = dpm_suspend_noirq(PMSG_FREEZE);
1525 		if (error)
1526 			goto Resume_devices;
1527 		error = disable_nonboot_cpus();
1528 		if (error)
1529 			goto Enable_cpus;
1530 		local_irq_disable();
1531 		/* Suspend system devices */
1532 		error = sysdev_suspend(PMSG_FREEZE);
1533 		if (error)
1534 			goto Enable_irqs;
1535 	} else
1536 #endif
1537 	{
1538 		kernel_restart_prepare(NULL);
1539 		printk(KERN_EMERG "Starting new kernel\n");
1540 		machine_shutdown();
1541 	}
1542 
1543 	machine_kexec(kexec_image);
1544 
1545 #ifdef CONFIG_KEXEC_JUMP
1546 	if (kexec_image->preserve_context) {
1547 		sysdev_resume();
1548  Enable_irqs:
1549 		local_irq_enable();
1550  Enable_cpus:
1551 		enable_nonboot_cpus();
1552 		dpm_resume_noirq(PMSG_RESTORE);
1553  Resume_devices:
1554 		dpm_resume_end(PMSG_RESTORE);
1555  Resume_console:
1556 		resume_console();
1557 		thaw_processes();
1558  Restore_console:
1559 		pm_restore_console();
1560 		mutex_unlock(&pm_mutex);
1561 	}
1562 #endif
1563 
1564  Unlock:
1565 	mutex_unlock(&kexec_mutex);
1566 	return error;
1567 }
1568