xref: /openbmc/linux/kernel/kexec.c (revision 62e7ca52)
1 /*
2  * kexec.c - kexec system call
3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mm.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
29 #include <linux/pm.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
35 #include <linux/compiler.h>
36 #include <linux/hugetlb.h>
37 
38 #include <asm/page.h>
39 #include <asm/uaccess.h>
40 #include <asm/io.h>
41 #include <asm/sections.h>
42 
43 /* Per cpu memory for storing cpu states in case of system crash. */
44 note_buf_t __percpu *crash_notes;
45 
46 /* vmcoreinfo stuff */
47 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
48 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
49 size_t vmcoreinfo_size;
50 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
51 
52 /* Flag to indicate we are going to kexec a new kernel */
53 bool kexec_in_progress = false;
54 
55 /* Location of the reserved area for the crash kernel */
56 struct resource crashk_res = {
57 	.name  = "Crash kernel",
58 	.start = 0,
59 	.end   = 0,
60 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
61 };
62 struct resource crashk_low_res = {
63 	.name  = "Crash kernel",
64 	.start = 0,
65 	.end   = 0,
66 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
67 };
68 
69 int kexec_should_crash(struct task_struct *p)
70 {
71 	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
72 		return 1;
73 	return 0;
74 }
75 
76 /*
77  * When kexec transitions to the new kernel there is a one-to-one
78  * mapping between physical and virtual addresses.  On processors
79  * where you can disable the MMU this is trivial, and easy.  For
80  * others it is still a simple predictable page table to setup.
81  *
82  * In that environment kexec copies the new kernel to its final
83  * resting place.  This means I can only support memory whose
84  * physical address can fit in an unsigned long.  In particular
85  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
86  * If the assembly stub has more restrictive requirements
87  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
88  * defined more restrictively in <asm/kexec.h>.
89  *
90  * The code for the transition from the current kernel to the
91  * the new kernel is placed in the control_code_buffer, whose size
92  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
93  * page of memory is necessary, but some architectures require more.
94  * Because this memory must be identity mapped in the transition from
95  * virtual to physical addresses it must live in the range
96  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
97  * modifiable.
98  *
99  * The assembly stub in the control code buffer is passed a linked list
100  * of descriptor pages detailing the source pages of the new kernel,
101  * and the destination addresses of those source pages.  As this data
102  * structure is not used in the context of the current OS, it must
103  * be self-contained.
104  *
105  * The code has been made to work with highmem pages and will use a
106  * destination page in its final resting place (if it happens
107  * to allocate it).  The end product of this is that most of the
108  * physical address space, and most of RAM can be used.
109  *
110  * Future directions include:
111  *  - allocating a page table with the control code buffer identity
112  *    mapped, to simplify machine_kexec and make kexec_on_panic more
113  *    reliable.
114  */
115 
116 /*
117  * KIMAGE_NO_DEST is an impossible destination address..., for
118  * allocating pages whose destination address we do not care about.
119  */
120 #define KIMAGE_NO_DEST (-1UL)
121 
122 static int kimage_is_destination_range(struct kimage *image,
123 				       unsigned long start, unsigned long end);
124 static struct page *kimage_alloc_page(struct kimage *image,
125 				       gfp_t gfp_mask,
126 				       unsigned long dest);
127 
128 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
129 			   unsigned long nr_segments,
130 			   struct kexec_segment __user *segments)
131 {
132 	size_t segment_bytes;
133 	struct kimage *image;
134 	unsigned long i;
135 	int result;
136 
137 	/* Allocate a controlling structure */
138 	result = -ENOMEM;
139 	image = kzalloc(sizeof(*image), GFP_KERNEL);
140 	if (!image)
141 		goto out;
142 
143 	image->head = 0;
144 	image->entry = &image->head;
145 	image->last_entry = &image->head;
146 	image->control_page = ~0; /* By default this does not apply */
147 	image->start = entry;
148 	image->type = KEXEC_TYPE_DEFAULT;
149 
150 	/* Initialize the list of control pages */
151 	INIT_LIST_HEAD(&image->control_pages);
152 
153 	/* Initialize the list of destination pages */
154 	INIT_LIST_HEAD(&image->dest_pages);
155 
156 	/* Initialize the list of unusable pages */
157 	INIT_LIST_HEAD(&image->unuseable_pages);
158 
159 	/* Read in the segments */
160 	image->nr_segments = nr_segments;
161 	segment_bytes = nr_segments * sizeof(*segments);
162 	result = copy_from_user(image->segment, segments, segment_bytes);
163 	if (result) {
164 		result = -EFAULT;
165 		goto out;
166 	}
167 
168 	/*
169 	 * Verify we have good destination addresses.  The caller is
170 	 * responsible for making certain we don't attempt to load
171 	 * the new image into invalid or reserved areas of RAM.  This
172 	 * just verifies it is an address we can use.
173 	 *
174 	 * Since the kernel does everything in page size chunks ensure
175 	 * the destination addresses are page aligned.  Too many
176 	 * special cases crop of when we don't do this.  The most
177 	 * insidious is getting overlapping destination addresses
178 	 * simply because addresses are changed to page size
179 	 * granularity.
180 	 */
181 	result = -EADDRNOTAVAIL;
182 	for (i = 0; i < nr_segments; i++) {
183 		unsigned long mstart, mend;
184 
185 		mstart = image->segment[i].mem;
186 		mend   = mstart + image->segment[i].memsz;
187 		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
188 			goto out;
189 		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
190 			goto out;
191 	}
192 
193 	/* Verify our destination addresses do not overlap.
194 	 * If we alloed overlapping destination addresses
195 	 * through very weird things can happen with no
196 	 * easy explanation as one segment stops on another.
197 	 */
198 	result = -EINVAL;
199 	for (i = 0; i < nr_segments; i++) {
200 		unsigned long mstart, mend;
201 		unsigned long j;
202 
203 		mstart = image->segment[i].mem;
204 		mend   = mstart + image->segment[i].memsz;
205 		for (j = 0; j < i; j++) {
206 			unsigned long pstart, pend;
207 			pstart = image->segment[j].mem;
208 			pend   = pstart + image->segment[j].memsz;
209 			/* Do the segments overlap ? */
210 			if ((mend > pstart) && (mstart < pend))
211 				goto out;
212 		}
213 	}
214 
215 	/* Ensure our buffer sizes are strictly less than
216 	 * our memory sizes.  This should always be the case,
217 	 * and it is easier to check up front than to be surprised
218 	 * later on.
219 	 */
220 	result = -EINVAL;
221 	for (i = 0; i < nr_segments; i++) {
222 		if (image->segment[i].bufsz > image->segment[i].memsz)
223 			goto out;
224 	}
225 
226 	result = 0;
227 out:
228 	if (result == 0)
229 		*rimage = image;
230 	else
231 		kfree(image);
232 
233 	return result;
234 
235 }
236 
237 static void kimage_free_page_list(struct list_head *list);
238 
239 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
240 				unsigned long nr_segments,
241 				struct kexec_segment __user *segments)
242 {
243 	int result;
244 	struct kimage *image;
245 
246 	/* Allocate and initialize a controlling structure */
247 	image = NULL;
248 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
249 	if (result)
250 		goto out;
251 
252 	/*
253 	 * Find a location for the control code buffer, and add it
254 	 * the vector of segments so that it's pages will also be
255 	 * counted as destination pages.
256 	 */
257 	result = -ENOMEM;
258 	image->control_code_page = kimage_alloc_control_pages(image,
259 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
260 	if (!image->control_code_page) {
261 		pr_err("Could not allocate control_code_buffer\n");
262 		goto out_free;
263 	}
264 
265 	image->swap_page = kimage_alloc_control_pages(image, 0);
266 	if (!image->swap_page) {
267 		pr_err("Could not allocate swap buffer\n");
268 		goto out_free;
269 	}
270 
271 	*rimage = image;
272 	return 0;
273 
274 out_free:
275 	kimage_free_page_list(&image->control_pages);
276 	kfree(image);
277 out:
278 	return result;
279 }
280 
281 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
282 				unsigned long nr_segments,
283 				struct kexec_segment __user *segments)
284 {
285 	int result;
286 	struct kimage *image;
287 	unsigned long i;
288 
289 	image = NULL;
290 	/* Verify we have a valid entry point */
291 	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
292 		result = -EADDRNOTAVAIL;
293 		goto out;
294 	}
295 
296 	/* Allocate and initialize a controlling structure */
297 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
298 	if (result)
299 		goto out;
300 
301 	/* Enable the special crash kernel control page
302 	 * allocation policy.
303 	 */
304 	image->control_page = crashk_res.start;
305 	image->type = KEXEC_TYPE_CRASH;
306 
307 	/*
308 	 * Verify we have good destination addresses.  Normally
309 	 * the caller is responsible for making certain we don't
310 	 * attempt to load the new image into invalid or reserved
311 	 * areas of RAM.  But crash kernels are preloaded into a
312 	 * reserved area of ram.  We must ensure the addresses
313 	 * are in the reserved area otherwise preloading the
314 	 * kernel could corrupt things.
315 	 */
316 	result = -EADDRNOTAVAIL;
317 	for (i = 0; i < nr_segments; i++) {
318 		unsigned long mstart, mend;
319 
320 		mstart = image->segment[i].mem;
321 		mend = mstart + image->segment[i].memsz - 1;
322 		/* Ensure we are within the crash kernel limits */
323 		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
324 			goto out_free;
325 	}
326 
327 	/*
328 	 * Find a location for the control code buffer, and add
329 	 * the vector of segments so that it's pages will also be
330 	 * counted as destination pages.
331 	 */
332 	result = -ENOMEM;
333 	image->control_code_page = kimage_alloc_control_pages(image,
334 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
335 	if (!image->control_code_page) {
336 		pr_err("Could not allocate control_code_buffer\n");
337 		goto out_free;
338 	}
339 
340 	*rimage = image;
341 	return 0;
342 
343 out_free:
344 	kfree(image);
345 out:
346 	return result;
347 }
348 
349 static int kimage_is_destination_range(struct kimage *image,
350 					unsigned long start,
351 					unsigned long end)
352 {
353 	unsigned long i;
354 
355 	for (i = 0; i < image->nr_segments; i++) {
356 		unsigned long mstart, mend;
357 
358 		mstart = image->segment[i].mem;
359 		mend = mstart + image->segment[i].memsz;
360 		if ((end > mstart) && (start < mend))
361 			return 1;
362 	}
363 
364 	return 0;
365 }
366 
367 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
368 {
369 	struct page *pages;
370 
371 	pages = alloc_pages(gfp_mask, order);
372 	if (pages) {
373 		unsigned int count, i;
374 		pages->mapping = NULL;
375 		set_page_private(pages, order);
376 		count = 1 << order;
377 		for (i = 0; i < count; i++)
378 			SetPageReserved(pages + i);
379 	}
380 
381 	return pages;
382 }
383 
384 static void kimage_free_pages(struct page *page)
385 {
386 	unsigned int order, count, i;
387 
388 	order = page_private(page);
389 	count = 1 << order;
390 	for (i = 0; i < count; i++)
391 		ClearPageReserved(page + i);
392 	__free_pages(page, order);
393 }
394 
395 static void kimage_free_page_list(struct list_head *list)
396 {
397 	struct list_head *pos, *next;
398 
399 	list_for_each_safe(pos, next, list) {
400 		struct page *page;
401 
402 		page = list_entry(pos, struct page, lru);
403 		list_del(&page->lru);
404 		kimage_free_pages(page);
405 	}
406 }
407 
408 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
409 							unsigned int order)
410 {
411 	/* Control pages are special, they are the intermediaries
412 	 * that are needed while we copy the rest of the pages
413 	 * to their final resting place.  As such they must
414 	 * not conflict with either the destination addresses
415 	 * or memory the kernel is already using.
416 	 *
417 	 * The only case where we really need more than one of
418 	 * these are for architectures where we cannot disable
419 	 * the MMU and must instead generate an identity mapped
420 	 * page table for all of the memory.
421 	 *
422 	 * At worst this runs in O(N) of the image size.
423 	 */
424 	struct list_head extra_pages;
425 	struct page *pages;
426 	unsigned int count;
427 
428 	count = 1 << order;
429 	INIT_LIST_HEAD(&extra_pages);
430 
431 	/* Loop while I can allocate a page and the page allocated
432 	 * is a destination page.
433 	 */
434 	do {
435 		unsigned long pfn, epfn, addr, eaddr;
436 
437 		pages = kimage_alloc_pages(GFP_KERNEL, order);
438 		if (!pages)
439 			break;
440 		pfn   = page_to_pfn(pages);
441 		epfn  = pfn + count;
442 		addr  = pfn << PAGE_SHIFT;
443 		eaddr = epfn << PAGE_SHIFT;
444 		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
445 			      kimage_is_destination_range(image, addr, eaddr)) {
446 			list_add(&pages->lru, &extra_pages);
447 			pages = NULL;
448 		}
449 	} while (!pages);
450 
451 	if (pages) {
452 		/* Remember the allocated page... */
453 		list_add(&pages->lru, &image->control_pages);
454 
455 		/* Because the page is already in it's destination
456 		 * location we will never allocate another page at
457 		 * that address.  Therefore kimage_alloc_pages
458 		 * will not return it (again) and we don't need
459 		 * to give it an entry in image->segment[].
460 		 */
461 	}
462 	/* Deal with the destination pages I have inadvertently allocated.
463 	 *
464 	 * Ideally I would convert multi-page allocations into single
465 	 * page allocations, and add everything to image->dest_pages.
466 	 *
467 	 * For now it is simpler to just free the pages.
468 	 */
469 	kimage_free_page_list(&extra_pages);
470 
471 	return pages;
472 }
473 
474 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
475 						      unsigned int order)
476 {
477 	/* Control pages are special, they are the intermediaries
478 	 * that are needed while we copy the rest of the pages
479 	 * to their final resting place.  As such they must
480 	 * not conflict with either the destination addresses
481 	 * or memory the kernel is already using.
482 	 *
483 	 * Control pages are also the only pags we must allocate
484 	 * when loading a crash kernel.  All of the other pages
485 	 * are specified by the segments and we just memcpy
486 	 * into them directly.
487 	 *
488 	 * The only case where we really need more than one of
489 	 * these are for architectures where we cannot disable
490 	 * the MMU and must instead generate an identity mapped
491 	 * page table for all of the memory.
492 	 *
493 	 * Given the low demand this implements a very simple
494 	 * allocator that finds the first hole of the appropriate
495 	 * size in the reserved memory region, and allocates all
496 	 * of the memory up to and including the hole.
497 	 */
498 	unsigned long hole_start, hole_end, size;
499 	struct page *pages;
500 
501 	pages = NULL;
502 	size = (1 << order) << PAGE_SHIFT;
503 	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
504 	hole_end   = hole_start + size - 1;
505 	while (hole_end <= crashk_res.end) {
506 		unsigned long i;
507 
508 		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
509 			break;
510 		/* See if I overlap any of the segments */
511 		for (i = 0; i < image->nr_segments; i++) {
512 			unsigned long mstart, mend;
513 
514 			mstart = image->segment[i].mem;
515 			mend   = mstart + image->segment[i].memsz - 1;
516 			if ((hole_end >= mstart) && (hole_start <= mend)) {
517 				/* Advance the hole to the end of the segment */
518 				hole_start = (mend + (size - 1)) & ~(size - 1);
519 				hole_end   = hole_start + size - 1;
520 				break;
521 			}
522 		}
523 		/* If I don't overlap any segments I have found my hole! */
524 		if (i == image->nr_segments) {
525 			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
526 			break;
527 		}
528 	}
529 	if (pages)
530 		image->control_page = hole_end;
531 
532 	return pages;
533 }
534 
535 
536 struct page *kimage_alloc_control_pages(struct kimage *image,
537 					 unsigned int order)
538 {
539 	struct page *pages = NULL;
540 
541 	switch (image->type) {
542 	case KEXEC_TYPE_DEFAULT:
543 		pages = kimage_alloc_normal_control_pages(image, order);
544 		break;
545 	case KEXEC_TYPE_CRASH:
546 		pages = kimage_alloc_crash_control_pages(image, order);
547 		break;
548 	}
549 
550 	return pages;
551 }
552 
553 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
554 {
555 	if (*image->entry != 0)
556 		image->entry++;
557 
558 	if (image->entry == image->last_entry) {
559 		kimage_entry_t *ind_page;
560 		struct page *page;
561 
562 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
563 		if (!page)
564 			return -ENOMEM;
565 
566 		ind_page = page_address(page);
567 		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
568 		image->entry = ind_page;
569 		image->last_entry = ind_page +
570 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
571 	}
572 	*image->entry = entry;
573 	image->entry++;
574 	*image->entry = 0;
575 
576 	return 0;
577 }
578 
579 static int kimage_set_destination(struct kimage *image,
580 				   unsigned long destination)
581 {
582 	int result;
583 
584 	destination &= PAGE_MASK;
585 	result = kimage_add_entry(image, destination | IND_DESTINATION);
586 	if (result == 0)
587 		image->destination = destination;
588 
589 	return result;
590 }
591 
592 
593 static int kimage_add_page(struct kimage *image, unsigned long page)
594 {
595 	int result;
596 
597 	page &= PAGE_MASK;
598 	result = kimage_add_entry(image, page | IND_SOURCE);
599 	if (result == 0)
600 		image->destination += PAGE_SIZE;
601 
602 	return result;
603 }
604 
605 
606 static void kimage_free_extra_pages(struct kimage *image)
607 {
608 	/* Walk through and free any extra destination pages I may have */
609 	kimage_free_page_list(&image->dest_pages);
610 
611 	/* Walk through and free any unusable pages I have cached */
612 	kimage_free_page_list(&image->unuseable_pages);
613 
614 }
615 static void kimage_terminate(struct kimage *image)
616 {
617 	if (*image->entry != 0)
618 		image->entry++;
619 
620 	*image->entry = IND_DONE;
621 }
622 
623 #define for_each_kimage_entry(image, ptr, entry) \
624 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
625 		ptr = (entry & IND_INDIRECTION) ? \
626 			phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
627 
628 static void kimage_free_entry(kimage_entry_t entry)
629 {
630 	struct page *page;
631 
632 	page = pfn_to_page(entry >> PAGE_SHIFT);
633 	kimage_free_pages(page);
634 }
635 
636 static void kimage_free(struct kimage *image)
637 {
638 	kimage_entry_t *ptr, entry;
639 	kimage_entry_t ind = 0;
640 
641 	if (!image)
642 		return;
643 
644 	kimage_free_extra_pages(image);
645 	for_each_kimage_entry(image, ptr, entry) {
646 		if (entry & IND_INDIRECTION) {
647 			/* Free the previous indirection page */
648 			if (ind & IND_INDIRECTION)
649 				kimage_free_entry(ind);
650 			/* Save this indirection page until we are
651 			 * done with it.
652 			 */
653 			ind = entry;
654 		} else if (entry & IND_SOURCE)
655 			kimage_free_entry(entry);
656 	}
657 	/* Free the final indirection page */
658 	if (ind & IND_INDIRECTION)
659 		kimage_free_entry(ind);
660 
661 	/* Handle any machine specific cleanup */
662 	machine_kexec_cleanup(image);
663 
664 	/* Free the kexec control pages... */
665 	kimage_free_page_list(&image->control_pages);
666 	kfree(image);
667 }
668 
669 static kimage_entry_t *kimage_dst_used(struct kimage *image,
670 					unsigned long page)
671 {
672 	kimage_entry_t *ptr, entry;
673 	unsigned long destination = 0;
674 
675 	for_each_kimage_entry(image, ptr, entry) {
676 		if (entry & IND_DESTINATION)
677 			destination = entry & PAGE_MASK;
678 		else if (entry & IND_SOURCE) {
679 			if (page == destination)
680 				return ptr;
681 			destination += PAGE_SIZE;
682 		}
683 	}
684 
685 	return NULL;
686 }
687 
688 static struct page *kimage_alloc_page(struct kimage *image,
689 					gfp_t gfp_mask,
690 					unsigned long destination)
691 {
692 	/*
693 	 * Here we implement safeguards to ensure that a source page
694 	 * is not copied to its destination page before the data on
695 	 * the destination page is no longer useful.
696 	 *
697 	 * To do this we maintain the invariant that a source page is
698 	 * either its own destination page, or it is not a
699 	 * destination page at all.
700 	 *
701 	 * That is slightly stronger than required, but the proof
702 	 * that no problems will not occur is trivial, and the
703 	 * implementation is simply to verify.
704 	 *
705 	 * When allocating all pages normally this algorithm will run
706 	 * in O(N) time, but in the worst case it will run in O(N^2)
707 	 * time.   If the runtime is a problem the data structures can
708 	 * be fixed.
709 	 */
710 	struct page *page;
711 	unsigned long addr;
712 
713 	/*
714 	 * Walk through the list of destination pages, and see if I
715 	 * have a match.
716 	 */
717 	list_for_each_entry(page, &image->dest_pages, lru) {
718 		addr = page_to_pfn(page) << PAGE_SHIFT;
719 		if (addr == destination) {
720 			list_del(&page->lru);
721 			return page;
722 		}
723 	}
724 	page = NULL;
725 	while (1) {
726 		kimage_entry_t *old;
727 
728 		/* Allocate a page, if we run out of memory give up */
729 		page = kimage_alloc_pages(gfp_mask, 0);
730 		if (!page)
731 			return NULL;
732 		/* If the page cannot be used file it away */
733 		if (page_to_pfn(page) >
734 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
735 			list_add(&page->lru, &image->unuseable_pages);
736 			continue;
737 		}
738 		addr = page_to_pfn(page) << PAGE_SHIFT;
739 
740 		/* If it is the destination page we want use it */
741 		if (addr == destination)
742 			break;
743 
744 		/* If the page is not a destination page use it */
745 		if (!kimage_is_destination_range(image, addr,
746 						  addr + PAGE_SIZE))
747 			break;
748 
749 		/*
750 		 * I know that the page is someones destination page.
751 		 * See if there is already a source page for this
752 		 * destination page.  And if so swap the source pages.
753 		 */
754 		old = kimage_dst_used(image, addr);
755 		if (old) {
756 			/* If so move it */
757 			unsigned long old_addr;
758 			struct page *old_page;
759 
760 			old_addr = *old & PAGE_MASK;
761 			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
762 			copy_highpage(page, old_page);
763 			*old = addr | (*old & ~PAGE_MASK);
764 
765 			/* The old page I have found cannot be a
766 			 * destination page, so return it if it's
767 			 * gfp_flags honor the ones passed in.
768 			 */
769 			if (!(gfp_mask & __GFP_HIGHMEM) &&
770 			    PageHighMem(old_page)) {
771 				kimage_free_pages(old_page);
772 				continue;
773 			}
774 			addr = old_addr;
775 			page = old_page;
776 			break;
777 		} else {
778 			/* Place the page on the destination list I
779 			 * will use it later.
780 			 */
781 			list_add(&page->lru, &image->dest_pages);
782 		}
783 	}
784 
785 	return page;
786 }
787 
788 static int kimage_load_normal_segment(struct kimage *image,
789 					 struct kexec_segment *segment)
790 {
791 	unsigned long maddr;
792 	size_t ubytes, mbytes;
793 	int result;
794 	unsigned char __user *buf;
795 
796 	result = 0;
797 	buf = segment->buf;
798 	ubytes = segment->bufsz;
799 	mbytes = segment->memsz;
800 	maddr = segment->mem;
801 
802 	result = kimage_set_destination(image, maddr);
803 	if (result < 0)
804 		goto out;
805 
806 	while (mbytes) {
807 		struct page *page;
808 		char *ptr;
809 		size_t uchunk, mchunk;
810 
811 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
812 		if (!page) {
813 			result  = -ENOMEM;
814 			goto out;
815 		}
816 		result = kimage_add_page(image, page_to_pfn(page)
817 								<< PAGE_SHIFT);
818 		if (result < 0)
819 			goto out;
820 
821 		ptr = kmap(page);
822 		/* Start with a clear page */
823 		clear_page(ptr);
824 		ptr += maddr & ~PAGE_MASK;
825 		mchunk = min_t(size_t, mbytes,
826 				PAGE_SIZE - (maddr & ~PAGE_MASK));
827 		uchunk = min(ubytes, mchunk);
828 
829 		result = copy_from_user(ptr, buf, uchunk);
830 		kunmap(page);
831 		if (result) {
832 			result = -EFAULT;
833 			goto out;
834 		}
835 		ubytes -= uchunk;
836 		maddr  += mchunk;
837 		buf    += mchunk;
838 		mbytes -= mchunk;
839 	}
840 out:
841 	return result;
842 }
843 
844 static int kimage_load_crash_segment(struct kimage *image,
845 					struct kexec_segment *segment)
846 {
847 	/* For crash dumps kernels we simply copy the data from
848 	 * user space to it's destination.
849 	 * We do things a page at a time for the sake of kmap.
850 	 */
851 	unsigned long maddr;
852 	size_t ubytes, mbytes;
853 	int result;
854 	unsigned char __user *buf;
855 
856 	result = 0;
857 	buf = segment->buf;
858 	ubytes = segment->bufsz;
859 	mbytes = segment->memsz;
860 	maddr = segment->mem;
861 	while (mbytes) {
862 		struct page *page;
863 		char *ptr;
864 		size_t uchunk, mchunk;
865 
866 		page = pfn_to_page(maddr >> PAGE_SHIFT);
867 		if (!page) {
868 			result  = -ENOMEM;
869 			goto out;
870 		}
871 		ptr = kmap(page);
872 		ptr += maddr & ~PAGE_MASK;
873 		mchunk = min_t(size_t, mbytes,
874 				PAGE_SIZE - (maddr & ~PAGE_MASK));
875 		uchunk = min(ubytes, mchunk);
876 		if (mchunk > uchunk) {
877 			/* Zero the trailing part of the page */
878 			memset(ptr + uchunk, 0, mchunk - uchunk);
879 		}
880 		result = copy_from_user(ptr, buf, uchunk);
881 		kexec_flush_icache_page(page);
882 		kunmap(page);
883 		if (result) {
884 			result = -EFAULT;
885 			goto out;
886 		}
887 		ubytes -= uchunk;
888 		maddr  += mchunk;
889 		buf    += mchunk;
890 		mbytes -= mchunk;
891 	}
892 out:
893 	return result;
894 }
895 
896 static int kimage_load_segment(struct kimage *image,
897 				struct kexec_segment *segment)
898 {
899 	int result = -ENOMEM;
900 
901 	switch (image->type) {
902 	case KEXEC_TYPE_DEFAULT:
903 		result = kimage_load_normal_segment(image, segment);
904 		break;
905 	case KEXEC_TYPE_CRASH:
906 		result = kimage_load_crash_segment(image, segment);
907 		break;
908 	}
909 
910 	return result;
911 }
912 
913 /*
914  * Exec Kernel system call: for obvious reasons only root may call it.
915  *
916  * This call breaks up into three pieces.
917  * - A generic part which loads the new kernel from the current
918  *   address space, and very carefully places the data in the
919  *   allocated pages.
920  *
921  * - A generic part that interacts with the kernel and tells all of
922  *   the devices to shut down.  Preventing on-going dmas, and placing
923  *   the devices in a consistent state so a later kernel can
924  *   reinitialize them.
925  *
926  * - A machine specific part that includes the syscall number
927  *   and then copies the image to it's final destination.  And
928  *   jumps into the image at entry.
929  *
930  * kexec does not sync, or unmount filesystems so if you need
931  * that to happen you need to do that yourself.
932  */
933 struct kimage *kexec_image;
934 struct kimage *kexec_crash_image;
935 int kexec_load_disabled;
936 
937 static DEFINE_MUTEX(kexec_mutex);
938 
939 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
940 		struct kexec_segment __user *, segments, unsigned long, flags)
941 {
942 	struct kimage **dest_image, *image;
943 	int result;
944 
945 	/* We only trust the superuser with rebooting the system. */
946 	if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
947 		return -EPERM;
948 
949 	/*
950 	 * Verify we have a legal set of flags
951 	 * This leaves us room for future extensions.
952 	 */
953 	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
954 		return -EINVAL;
955 
956 	/* Verify we are on the appropriate architecture */
957 	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
958 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
959 		return -EINVAL;
960 
961 	/* Put an artificial cap on the number
962 	 * of segments passed to kexec_load.
963 	 */
964 	if (nr_segments > KEXEC_SEGMENT_MAX)
965 		return -EINVAL;
966 
967 	image = NULL;
968 	result = 0;
969 
970 	/* Because we write directly to the reserved memory
971 	 * region when loading crash kernels we need a mutex here to
972 	 * prevent multiple crash  kernels from attempting to load
973 	 * simultaneously, and to prevent a crash kernel from loading
974 	 * over the top of a in use crash kernel.
975 	 *
976 	 * KISS: always take the mutex.
977 	 */
978 	if (!mutex_trylock(&kexec_mutex))
979 		return -EBUSY;
980 
981 	dest_image = &kexec_image;
982 	if (flags & KEXEC_ON_CRASH)
983 		dest_image = &kexec_crash_image;
984 	if (nr_segments > 0) {
985 		unsigned long i;
986 
987 		/* Loading another kernel to reboot into */
988 		if ((flags & KEXEC_ON_CRASH) == 0)
989 			result = kimage_normal_alloc(&image, entry,
990 							nr_segments, segments);
991 		/* Loading another kernel to switch to if this one crashes */
992 		else if (flags & KEXEC_ON_CRASH) {
993 			/* Free any current crash dump kernel before
994 			 * we corrupt it.
995 			 */
996 			kimage_free(xchg(&kexec_crash_image, NULL));
997 			result = kimage_crash_alloc(&image, entry,
998 						     nr_segments, segments);
999 			crash_map_reserved_pages();
1000 		}
1001 		if (result)
1002 			goto out;
1003 
1004 		if (flags & KEXEC_PRESERVE_CONTEXT)
1005 			image->preserve_context = 1;
1006 		result = machine_kexec_prepare(image);
1007 		if (result)
1008 			goto out;
1009 
1010 		for (i = 0; i < nr_segments; i++) {
1011 			result = kimage_load_segment(image, &image->segment[i]);
1012 			if (result)
1013 				goto out;
1014 		}
1015 		kimage_terminate(image);
1016 		if (flags & KEXEC_ON_CRASH)
1017 			crash_unmap_reserved_pages();
1018 	}
1019 	/* Install the new kernel, and  Uninstall the old */
1020 	image = xchg(dest_image, image);
1021 
1022 out:
1023 	mutex_unlock(&kexec_mutex);
1024 	kimage_free(image);
1025 
1026 	return result;
1027 }
1028 
1029 /*
1030  * Add and remove page tables for crashkernel memory
1031  *
1032  * Provide an empty default implementation here -- architecture
1033  * code may override this
1034  */
1035 void __weak crash_map_reserved_pages(void)
1036 {}
1037 
1038 void __weak crash_unmap_reserved_pages(void)
1039 {}
1040 
1041 #ifdef CONFIG_COMPAT
1042 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1043 		       compat_ulong_t, nr_segments,
1044 		       struct compat_kexec_segment __user *, segments,
1045 		       compat_ulong_t, flags)
1046 {
1047 	struct compat_kexec_segment in;
1048 	struct kexec_segment out, __user *ksegments;
1049 	unsigned long i, result;
1050 
1051 	/* Don't allow clients that don't understand the native
1052 	 * architecture to do anything.
1053 	 */
1054 	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1055 		return -EINVAL;
1056 
1057 	if (nr_segments > KEXEC_SEGMENT_MAX)
1058 		return -EINVAL;
1059 
1060 	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1061 	for (i = 0; i < nr_segments; i++) {
1062 		result = copy_from_user(&in, &segments[i], sizeof(in));
1063 		if (result)
1064 			return -EFAULT;
1065 
1066 		out.buf   = compat_ptr(in.buf);
1067 		out.bufsz = in.bufsz;
1068 		out.mem   = in.mem;
1069 		out.memsz = in.memsz;
1070 
1071 		result = copy_to_user(&ksegments[i], &out, sizeof(out));
1072 		if (result)
1073 			return -EFAULT;
1074 	}
1075 
1076 	return sys_kexec_load(entry, nr_segments, ksegments, flags);
1077 }
1078 #endif
1079 
1080 void crash_kexec(struct pt_regs *regs)
1081 {
1082 	/* Take the kexec_mutex here to prevent sys_kexec_load
1083 	 * running on one cpu from replacing the crash kernel
1084 	 * we are using after a panic on a different cpu.
1085 	 *
1086 	 * If the crash kernel was not located in a fixed area
1087 	 * of memory the xchg(&kexec_crash_image) would be
1088 	 * sufficient.  But since I reuse the memory...
1089 	 */
1090 	if (mutex_trylock(&kexec_mutex)) {
1091 		if (kexec_crash_image) {
1092 			struct pt_regs fixed_regs;
1093 
1094 			crash_setup_regs(&fixed_regs, regs);
1095 			crash_save_vmcoreinfo();
1096 			machine_crash_shutdown(&fixed_regs);
1097 			machine_kexec(kexec_crash_image);
1098 		}
1099 		mutex_unlock(&kexec_mutex);
1100 	}
1101 }
1102 
1103 size_t crash_get_memory_size(void)
1104 {
1105 	size_t size = 0;
1106 	mutex_lock(&kexec_mutex);
1107 	if (crashk_res.end != crashk_res.start)
1108 		size = resource_size(&crashk_res);
1109 	mutex_unlock(&kexec_mutex);
1110 	return size;
1111 }
1112 
1113 void __weak crash_free_reserved_phys_range(unsigned long begin,
1114 					   unsigned long end)
1115 {
1116 	unsigned long addr;
1117 
1118 	for (addr = begin; addr < end; addr += PAGE_SIZE)
1119 		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1120 }
1121 
1122 int crash_shrink_memory(unsigned long new_size)
1123 {
1124 	int ret = 0;
1125 	unsigned long start, end;
1126 	unsigned long old_size;
1127 	struct resource *ram_res;
1128 
1129 	mutex_lock(&kexec_mutex);
1130 
1131 	if (kexec_crash_image) {
1132 		ret = -ENOENT;
1133 		goto unlock;
1134 	}
1135 	start = crashk_res.start;
1136 	end = crashk_res.end;
1137 	old_size = (end == 0) ? 0 : end - start + 1;
1138 	if (new_size >= old_size) {
1139 		ret = (new_size == old_size) ? 0 : -EINVAL;
1140 		goto unlock;
1141 	}
1142 
1143 	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1144 	if (!ram_res) {
1145 		ret = -ENOMEM;
1146 		goto unlock;
1147 	}
1148 
1149 	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1150 	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1151 
1152 	crash_map_reserved_pages();
1153 	crash_free_reserved_phys_range(end, crashk_res.end);
1154 
1155 	if ((start == end) && (crashk_res.parent != NULL))
1156 		release_resource(&crashk_res);
1157 
1158 	ram_res->start = end;
1159 	ram_res->end = crashk_res.end;
1160 	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1161 	ram_res->name = "System RAM";
1162 
1163 	crashk_res.end = end - 1;
1164 
1165 	insert_resource(&iomem_resource, ram_res);
1166 	crash_unmap_reserved_pages();
1167 
1168 unlock:
1169 	mutex_unlock(&kexec_mutex);
1170 	return ret;
1171 }
1172 
1173 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1174 			    size_t data_len)
1175 {
1176 	struct elf_note note;
1177 
1178 	note.n_namesz = strlen(name) + 1;
1179 	note.n_descsz = data_len;
1180 	note.n_type   = type;
1181 	memcpy(buf, &note, sizeof(note));
1182 	buf += (sizeof(note) + 3)/4;
1183 	memcpy(buf, name, note.n_namesz);
1184 	buf += (note.n_namesz + 3)/4;
1185 	memcpy(buf, data, note.n_descsz);
1186 	buf += (note.n_descsz + 3)/4;
1187 
1188 	return buf;
1189 }
1190 
1191 static void final_note(u32 *buf)
1192 {
1193 	struct elf_note note;
1194 
1195 	note.n_namesz = 0;
1196 	note.n_descsz = 0;
1197 	note.n_type   = 0;
1198 	memcpy(buf, &note, sizeof(note));
1199 }
1200 
1201 void crash_save_cpu(struct pt_regs *regs, int cpu)
1202 {
1203 	struct elf_prstatus prstatus;
1204 	u32 *buf;
1205 
1206 	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1207 		return;
1208 
1209 	/* Using ELF notes here is opportunistic.
1210 	 * I need a well defined structure format
1211 	 * for the data I pass, and I need tags
1212 	 * on the data to indicate what information I have
1213 	 * squirrelled away.  ELF notes happen to provide
1214 	 * all of that, so there is no need to invent something new.
1215 	 */
1216 	buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1217 	if (!buf)
1218 		return;
1219 	memset(&prstatus, 0, sizeof(prstatus));
1220 	prstatus.pr_pid = current->pid;
1221 	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1222 	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1223 			      &prstatus, sizeof(prstatus));
1224 	final_note(buf);
1225 }
1226 
1227 static int __init crash_notes_memory_init(void)
1228 {
1229 	/* Allocate memory for saving cpu registers. */
1230 	crash_notes = alloc_percpu(note_buf_t);
1231 	if (!crash_notes) {
1232 		pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1233 		return -ENOMEM;
1234 	}
1235 	return 0;
1236 }
1237 subsys_initcall(crash_notes_memory_init);
1238 
1239 
1240 /*
1241  * parsing the "crashkernel" commandline
1242  *
1243  * this code is intended to be called from architecture specific code
1244  */
1245 
1246 
1247 /*
1248  * This function parses command lines in the format
1249  *
1250  *   crashkernel=ramsize-range:size[,...][@offset]
1251  *
1252  * The function returns 0 on success and -EINVAL on failure.
1253  */
1254 static int __init parse_crashkernel_mem(char *cmdline,
1255 					unsigned long long system_ram,
1256 					unsigned long long *crash_size,
1257 					unsigned long long *crash_base)
1258 {
1259 	char *cur = cmdline, *tmp;
1260 
1261 	/* for each entry of the comma-separated list */
1262 	do {
1263 		unsigned long long start, end = ULLONG_MAX, size;
1264 
1265 		/* get the start of the range */
1266 		start = memparse(cur, &tmp);
1267 		if (cur == tmp) {
1268 			pr_warn("crashkernel: Memory value expected\n");
1269 			return -EINVAL;
1270 		}
1271 		cur = tmp;
1272 		if (*cur != '-') {
1273 			pr_warn("crashkernel: '-' expected\n");
1274 			return -EINVAL;
1275 		}
1276 		cur++;
1277 
1278 		/* if no ':' is here, than we read the end */
1279 		if (*cur != ':') {
1280 			end = memparse(cur, &tmp);
1281 			if (cur == tmp) {
1282 				pr_warn("crashkernel: Memory value expected\n");
1283 				return -EINVAL;
1284 			}
1285 			cur = tmp;
1286 			if (end <= start) {
1287 				pr_warn("crashkernel: end <= start\n");
1288 				return -EINVAL;
1289 			}
1290 		}
1291 
1292 		if (*cur != ':') {
1293 			pr_warn("crashkernel: ':' expected\n");
1294 			return -EINVAL;
1295 		}
1296 		cur++;
1297 
1298 		size = memparse(cur, &tmp);
1299 		if (cur == tmp) {
1300 			pr_warn("Memory value expected\n");
1301 			return -EINVAL;
1302 		}
1303 		cur = tmp;
1304 		if (size >= system_ram) {
1305 			pr_warn("crashkernel: invalid size\n");
1306 			return -EINVAL;
1307 		}
1308 
1309 		/* match ? */
1310 		if (system_ram >= start && system_ram < end) {
1311 			*crash_size = size;
1312 			break;
1313 		}
1314 	} while (*cur++ == ',');
1315 
1316 	if (*crash_size > 0) {
1317 		while (*cur && *cur != ' ' && *cur != '@')
1318 			cur++;
1319 		if (*cur == '@') {
1320 			cur++;
1321 			*crash_base = memparse(cur, &tmp);
1322 			if (cur == tmp) {
1323 				pr_warn("Memory value expected after '@'\n");
1324 				return -EINVAL;
1325 			}
1326 		}
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 /*
1333  * That function parses "simple" (old) crashkernel command lines like
1334  *
1335  *	crashkernel=size[@offset]
1336  *
1337  * It returns 0 on success and -EINVAL on failure.
1338  */
1339 static int __init parse_crashkernel_simple(char *cmdline,
1340 					   unsigned long long *crash_size,
1341 					   unsigned long long *crash_base)
1342 {
1343 	char *cur = cmdline;
1344 
1345 	*crash_size = memparse(cmdline, &cur);
1346 	if (cmdline == cur) {
1347 		pr_warn("crashkernel: memory value expected\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (*cur == '@')
1352 		*crash_base = memparse(cur+1, &cur);
1353 	else if (*cur != ' ' && *cur != '\0') {
1354 		pr_warn("crashkernel: unrecognized char\n");
1355 		return -EINVAL;
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 #define SUFFIX_HIGH 0
1362 #define SUFFIX_LOW  1
1363 #define SUFFIX_NULL 2
1364 static __initdata char *suffix_tbl[] = {
1365 	[SUFFIX_HIGH] = ",high",
1366 	[SUFFIX_LOW]  = ",low",
1367 	[SUFFIX_NULL] = NULL,
1368 };
1369 
1370 /*
1371  * That function parses "suffix"  crashkernel command lines like
1372  *
1373  *	crashkernel=size,[high|low]
1374  *
1375  * It returns 0 on success and -EINVAL on failure.
1376  */
1377 static int __init parse_crashkernel_suffix(char *cmdline,
1378 					   unsigned long long	*crash_size,
1379 					   unsigned long long	*crash_base,
1380 					   const char *suffix)
1381 {
1382 	char *cur = cmdline;
1383 
1384 	*crash_size = memparse(cmdline, &cur);
1385 	if (cmdline == cur) {
1386 		pr_warn("crashkernel: memory value expected\n");
1387 		return -EINVAL;
1388 	}
1389 
1390 	/* check with suffix */
1391 	if (strncmp(cur, suffix, strlen(suffix))) {
1392 		pr_warn("crashkernel: unrecognized char\n");
1393 		return -EINVAL;
1394 	}
1395 	cur += strlen(suffix);
1396 	if (*cur != ' ' && *cur != '\0') {
1397 		pr_warn("crashkernel: unrecognized char\n");
1398 		return -EINVAL;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static __init char *get_last_crashkernel(char *cmdline,
1405 			     const char *name,
1406 			     const char *suffix)
1407 {
1408 	char *p = cmdline, *ck_cmdline = NULL;
1409 
1410 	/* find crashkernel and use the last one if there are more */
1411 	p = strstr(p, name);
1412 	while (p) {
1413 		char *end_p = strchr(p, ' ');
1414 		char *q;
1415 
1416 		if (!end_p)
1417 			end_p = p + strlen(p);
1418 
1419 		if (!suffix) {
1420 			int i;
1421 
1422 			/* skip the one with any known suffix */
1423 			for (i = 0; suffix_tbl[i]; i++) {
1424 				q = end_p - strlen(suffix_tbl[i]);
1425 				if (!strncmp(q, suffix_tbl[i],
1426 					     strlen(suffix_tbl[i])))
1427 					goto next;
1428 			}
1429 			ck_cmdline = p;
1430 		} else {
1431 			q = end_p - strlen(suffix);
1432 			if (!strncmp(q, suffix, strlen(suffix)))
1433 				ck_cmdline = p;
1434 		}
1435 next:
1436 		p = strstr(p+1, name);
1437 	}
1438 
1439 	if (!ck_cmdline)
1440 		return NULL;
1441 
1442 	return ck_cmdline;
1443 }
1444 
1445 static int __init __parse_crashkernel(char *cmdline,
1446 			     unsigned long long system_ram,
1447 			     unsigned long long *crash_size,
1448 			     unsigned long long *crash_base,
1449 			     const char *name,
1450 			     const char *suffix)
1451 {
1452 	char	*first_colon, *first_space;
1453 	char	*ck_cmdline;
1454 
1455 	BUG_ON(!crash_size || !crash_base);
1456 	*crash_size = 0;
1457 	*crash_base = 0;
1458 
1459 	ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1460 
1461 	if (!ck_cmdline)
1462 		return -EINVAL;
1463 
1464 	ck_cmdline += strlen(name);
1465 
1466 	if (suffix)
1467 		return parse_crashkernel_suffix(ck_cmdline, crash_size,
1468 				crash_base, suffix);
1469 	/*
1470 	 * if the commandline contains a ':', then that's the extended
1471 	 * syntax -- if not, it must be the classic syntax
1472 	 */
1473 	first_colon = strchr(ck_cmdline, ':');
1474 	first_space = strchr(ck_cmdline, ' ');
1475 	if (first_colon && (!first_space || first_colon < first_space))
1476 		return parse_crashkernel_mem(ck_cmdline, system_ram,
1477 				crash_size, crash_base);
1478 
1479 	return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1480 }
1481 
1482 /*
1483  * That function is the entry point for command line parsing and should be
1484  * called from the arch-specific code.
1485  */
1486 int __init parse_crashkernel(char *cmdline,
1487 			     unsigned long long system_ram,
1488 			     unsigned long long *crash_size,
1489 			     unsigned long long *crash_base)
1490 {
1491 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1492 					"crashkernel=", NULL);
1493 }
1494 
1495 int __init parse_crashkernel_high(char *cmdline,
1496 			     unsigned long long system_ram,
1497 			     unsigned long long *crash_size,
1498 			     unsigned long long *crash_base)
1499 {
1500 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1501 				"crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1502 }
1503 
1504 int __init parse_crashkernel_low(char *cmdline,
1505 			     unsigned long long system_ram,
1506 			     unsigned long long *crash_size,
1507 			     unsigned long long *crash_base)
1508 {
1509 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1510 				"crashkernel=", suffix_tbl[SUFFIX_LOW]);
1511 }
1512 
1513 static void update_vmcoreinfo_note(void)
1514 {
1515 	u32 *buf = vmcoreinfo_note;
1516 
1517 	if (!vmcoreinfo_size)
1518 		return;
1519 	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1520 			      vmcoreinfo_size);
1521 	final_note(buf);
1522 }
1523 
1524 void crash_save_vmcoreinfo(void)
1525 {
1526 	vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1527 	update_vmcoreinfo_note();
1528 }
1529 
1530 void vmcoreinfo_append_str(const char *fmt, ...)
1531 {
1532 	va_list args;
1533 	char buf[0x50];
1534 	size_t r;
1535 
1536 	va_start(args, fmt);
1537 	r = vscnprintf(buf, sizeof(buf), fmt, args);
1538 	va_end(args);
1539 
1540 	r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1541 
1542 	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1543 
1544 	vmcoreinfo_size += r;
1545 }
1546 
1547 /*
1548  * provide an empty default implementation here -- architecture
1549  * code may override this
1550  */
1551 void __weak arch_crash_save_vmcoreinfo(void)
1552 {}
1553 
1554 unsigned long __weak paddr_vmcoreinfo_note(void)
1555 {
1556 	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1557 }
1558 
1559 static int __init crash_save_vmcoreinfo_init(void)
1560 {
1561 	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1562 	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1563 
1564 	VMCOREINFO_SYMBOL(init_uts_ns);
1565 	VMCOREINFO_SYMBOL(node_online_map);
1566 #ifdef CONFIG_MMU
1567 	VMCOREINFO_SYMBOL(swapper_pg_dir);
1568 #endif
1569 	VMCOREINFO_SYMBOL(_stext);
1570 	VMCOREINFO_SYMBOL(vmap_area_list);
1571 
1572 #ifndef CONFIG_NEED_MULTIPLE_NODES
1573 	VMCOREINFO_SYMBOL(mem_map);
1574 	VMCOREINFO_SYMBOL(contig_page_data);
1575 #endif
1576 #ifdef CONFIG_SPARSEMEM
1577 	VMCOREINFO_SYMBOL(mem_section);
1578 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1579 	VMCOREINFO_STRUCT_SIZE(mem_section);
1580 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1581 #endif
1582 	VMCOREINFO_STRUCT_SIZE(page);
1583 	VMCOREINFO_STRUCT_SIZE(pglist_data);
1584 	VMCOREINFO_STRUCT_SIZE(zone);
1585 	VMCOREINFO_STRUCT_SIZE(free_area);
1586 	VMCOREINFO_STRUCT_SIZE(list_head);
1587 	VMCOREINFO_SIZE(nodemask_t);
1588 	VMCOREINFO_OFFSET(page, flags);
1589 	VMCOREINFO_OFFSET(page, _count);
1590 	VMCOREINFO_OFFSET(page, mapping);
1591 	VMCOREINFO_OFFSET(page, lru);
1592 	VMCOREINFO_OFFSET(page, _mapcount);
1593 	VMCOREINFO_OFFSET(page, private);
1594 	VMCOREINFO_OFFSET(pglist_data, node_zones);
1595 	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1596 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1597 	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1598 #endif
1599 	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1600 	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1601 	VMCOREINFO_OFFSET(pglist_data, node_id);
1602 	VMCOREINFO_OFFSET(zone, free_area);
1603 	VMCOREINFO_OFFSET(zone, vm_stat);
1604 	VMCOREINFO_OFFSET(zone, spanned_pages);
1605 	VMCOREINFO_OFFSET(free_area, free_list);
1606 	VMCOREINFO_OFFSET(list_head, next);
1607 	VMCOREINFO_OFFSET(list_head, prev);
1608 	VMCOREINFO_OFFSET(vmap_area, va_start);
1609 	VMCOREINFO_OFFSET(vmap_area, list);
1610 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1611 	log_buf_kexec_setup();
1612 	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1613 	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1614 	VMCOREINFO_NUMBER(PG_lru);
1615 	VMCOREINFO_NUMBER(PG_private);
1616 	VMCOREINFO_NUMBER(PG_swapcache);
1617 	VMCOREINFO_NUMBER(PG_slab);
1618 #ifdef CONFIG_MEMORY_FAILURE
1619 	VMCOREINFO_NUMBER(PG_hwpoison);
1620 #endif
1621 	VMCOREINFO_NUMBER(PG_head_mask);
1622 	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1623 #ifdef CONFIG_HUGETLBFS
1624 	VMCOREINFO_SYMBOL(free_huge_page);
1625 #endif
1626 
1627 	arch_crash_save_vmcoreinfo();
1628 	update_vmcoreinfo_note();
1629 
1630 	return 0;
1631 }
1632 
1633 subsys_initcall(crash_save_vmcoreinfo_init);
1634 
1635 /*
1636  * Move into place and start executing a preloaded standalone
1637  * executable.  If nothing was preloaded return an error.
1638  */
1639 int kernel_kexec(void)
1640 {
1641 	int error = 0;
1642 
1643 	if (!mutex_trylock(&kexec_mutex))
1644 		return -EBUSY;
1645 	if (!kexec_image) {
1646 		error = -EINVAL;
1647 		goto Unlock;
1648 	}
1649 
1650 #ifdef CONFIG_KEXEC_JUMP
1651 	if (kexec_image->preserve_context) {
1652 		lock_system_sleep();
1653 		pm_prepare_console();
1654 		error = freeze_processes();
1655 		if (error) {
1656 			error = -EBUSY;
1657 			goto Restore_console;
1658 		}
1659 		suspend_console();
1660 		error = dpm_suspend_start(PMSG_FREEZE);
1661 		if (error)
1662 			goto Resume_console;
1663 		/* At this point, dpm_suspend_start() has been called,
1664 		 * but *not* dpm_suspend_end(). We *must* call
1665 		 * dpm_suspend_end() now.  Otherwise, drivers for
1666 		 * some devices (e.g. interrupt controllers) become
1667 		 * desynchronized with the actual state of the
1668 		 * hardware at resume time, and evil weirdness ensues.
1669 		 */
1670 		error = dpm_suspend_end(PMSG_FREEZE);
1671 		if (error)
1672 			goto Resume_devices;
1673 		error = disable_nonboot_cpus();
1674 		if (error)
1675 			goto Enable_cpus;
1676 		local_irq_disable();
1677 		error = syscore_suspend();
1678 		if (error)
1679 			goto Enable_irqs;
1680 	} else
1681 #endif
1682 	{
1683 		kexec_in_progress = true;
1684 		kernel_restart_prepare(NULL);
1685 		migrate_to_reboot_cpu();
1686 
1687 		/*
1688 		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1689 		 * no further code needs to use CPU hotplug (which is true in
1690 		 * the reboot case). However, the kexec path depends on using
1691 		 * CPU hotplug again; so re-enable it here.
1692 		 */
1693 		cpu_hotplug_enable();
1694 		pr_emerg("Starting new kernel\n");
1695 		machine_shutdown();
1696 	}
1697 
1698 	machine_kexec(kexec_image);
1699 
1700 #ifdef CONFIG_KEXEC_JUMP
1701 	if (kexec_image->preserve_context) {
1702 		syscore_resume();
1703  Enable_irqs:
1704 		local_irq_enable();
1705  Enable_cpus:
1706 		enable_nonboot_cpus();
1707 		dpm_resume_start(PMSG_RESTORE);
1708  Resume_devices:
1709 		dpm_resume_end(PMSG_RESTORE);
1710  Resume_console:
1711 		resume_console();
1712 		thaw_processes();
1713  Restore_console:
1714 		pm_restore_console();
1715 		unlock_system_sleep();
1716 	}
1717 #endif
1718 
1719  Unlock:
1720 	mutex_unlock(&kexec_mutex);
1721 	return error;
1722 }
1723