xref: /openbmc/linux/kernel/kexec.c (revision d2168146)
1 /*
2  * kexec.c - kexec system call
3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mm.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
29 #include <linux/pm.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
35 #include <linux/compiler.h>
36 
37 #include <asm/page.h>
38 #include <asm/uaccess.h>
39 #include <asm/io.h>
40 #include <asm/sections.h>
41 
42 /* Per cpu memory for storing cpu states in case of system crash. */
43 note_buf_t __percpu *crash_notes;
44 
45 /* vmcoreinfo stuff */
46 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
47 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
48 size_t vmcoreinfo_size;
49 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
50 
51 /* Flag to indicate we are going to kexec a new kernel */
52 bool kexec_in_progress = false;
53 
54 /* Location of the reserved area for the crash kernel */
55 struct resource crashk_res = {
56 	.name  = "Crash kernel",
57 	.start = 0,
58 	.end   = 0,
59 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
60 };
61 struct resource crashk_low_res = {
62 	.name  = "Crash kernel",
63 	.start = 0,
64 	.end   = 0,
65 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
66 };
67 
68 int kexec_should_crash(struct task_struct *p)
69 {
70 	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
71 		return 1;
72 	return 0;
73 }
74 
75 /*
76  * When kexec transitions to the new kernel there is a one-to-one
77  * mapping between physical and virtual addresses.  On processors
78  * where you can disable the MMU this is trivial, and easy.  For
79  * others it is still a simple predictable page table to setup.
80  *
81  * In that environment kexec copies the new kernel to its final
82  * resting place.  This means I can only support memory whose
83  * physical address can fit in an unsigned long.  In particular
84  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
85  * If the assembly stub has more restrictive requirements
86  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
87  * defined more restrictively in <asm/kexec.h>.
88  *
89  * The code for the transition from the current kernel to the
90  * the new kernel is placed in the control_code_buffer, whose size
91  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
92  * page of memory is necessary, but some architectures require more.
93  * Because this memory must be identity mapped in the transition from
94  * virtual to physical addresses it must live in the range
95  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
96  * modifiable.
97  *
98  * The assembly stub in the control code buffer is passed a linked list
99  * of descriptor pages detailing the source pages of the new kernel,
100  * and the destination addresses of those source pages.  As this data
101  * structure is not used in the context of the current OS, it must
102  * be self-contained.
103  *
104  * The code has been made to work with highmem pages and will use a
105  * destination page in its final resting place (if it happens
106  * to allocate it).  The end product of this is that most of the
107  * physical address space, and most of RAM can be used.
108  *
109  * Future directions include:
110  *  - allocating a page table with the control code buffer identity
111  *    mapped, to simplify machine_kexec and make kexec_on_panic more
112  *    reliable.
113  */
114 
115 /*
116  * KIMAGE_NO_DEST is an impossible destination address..., for
117  * allocating pages whose destination address we do not care about.
118  */
119 #define KIMAGE_NO_DEST (-1UL)
120 
121 static int kimage_is_destination_range(struct kimage *image,
122 				       unsigned long start, unsigned long end);
123 static struct page *kimage_alloc_page(struct kimage *image,
124 				       gfp_t gfp_mask,
125 				       unsigned long dest);
126 
127 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
128 			   unsigned long nr_segments,
129 			   struct kexec_segment __user *segments)
130 {
131 	size_t segment_bytes;
132 	struct kimage *image;
133 	unsigned long i;
134 	int result;
135 
136 	/* Allocate a controlling structure */
137 	result = -ENOMEM;
138 	image = kzalloc(sizeof(*image), GFP_KERNEL);
139 	if (!image)
140 		goto out;
141 
142 	image->head = 0;
143 	image->entry = &image->head;
144 	image->last_entry = &image->head;
145 	image->control_page = ~0; /* By default this does not apply */
146 	image->start = entry;
147 	image->type = KEXEC_TYPE_DEFAULT;
148 
149 	/* Initialize the list of control pages */
150 	INIT_LIST_HEAD(&image->control_pages);
151 
152 	/* Initialize the list of destination pages */
153 	INIT_LIST_HEAD(&image->dest_pages);
154 
155 	/* Initialize the list of unusable pages */
156 	INIT_LIST_HEAD(&image->unuseable_pages);
157 
158 	/* Read in the segments */
159 	image->nr_segments = nr_segments;
160 	segment_bytes = nr_segments * sizeof(*segments);
161 	result = copy_from_user(image->segment, segments, segment_bytes);
162 	if (result) {
163 		result = -EFAULT;
164 		goto out;
165 	}
166 
167 	/*
168 	 * Verify we have good destination addresses.  The caller is
169 	 * responsible for making certain we don't attempt to load
170 	 * the new image into invalid or reserved areas of RAM.  This
171 	 * just verifies it is an address we can use.
172 	 *
173 	 * Since the kernel does everything in page size chunks ensure
174 	 * the destination addresses are page aligned.  Too many
175 	 * special cases crop of when we don't do this.  The most
176 	 * insidious is getting overlapping destination addresses
177 	 * simply because addresses are changed to page size
178 	 * granularity.
179 	 */
180 	result = -EADDRNOTAVAIL;
181 	for (i = 0; i < nr_segments; i++) {
182 		unsigned long mstart, mend;
183 
184 		mstart = image->segment[i].mem;
185 		mend   = mstart + image->segment[i].memsz;
186 		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
187 			goto out;
188 		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
189 			goto out;
190 	}
191 
192 	/* Verify our destination addresses do not overlap.
193 	 * If we alloed overlapping destination addresses
194 	 * through very weird things can happen with no
195 	 * easy explanation as one segment stops on another.
196 	 */
197 	result = -EINVAL;
198 	for (i = 0; i < nr_segments; i++) {
199 		unsigned long mstart, mend;
200 		unsigned long j;
201 
202 		mstart = image->segment[i].mem;
203 		mend   = mstart + image->segment[i].memsz;
204 		for (j = 0; j < i; j++) {
205 			unsigned long pstart, pend;
206 			pstart = image->segment[j].mem;
207 			pend   = pstart + image->segment[j].memsz;
208 			/* Do the segments overlap ? */
209 			if ((mend > pstart) && (mstart < pend))
210 				goto out;
211 		}
212 	}
213 
214 	/* Ensure our buffer sizes are strictly less than
215 	 * our memory sizes.  This should always be the case,
216 	 * and it is easier to check up front than to be surprised
217 	 * later on.
218 	 */
219 	result = -EINVAL;
220 	for (i = 0; i < nr_segments; i++) {
221 		if (image->segment[i].bufsz > image->segment[i].memsz)
222 			goto out;
223 	}
224 
225 	result = 0;
226 out:
227 	if (result == 0)
228 		*rimage = image;
229 	else
230 		kfree(image);
231 
232 	return result;
233 
234 }
235 
236 static void kimage_free_page_list(struct list_head *list);
237 
238 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
239 				unsigned long nr_segments,
240 				struct kexec_segment __user *segments)
241 {
242 	int result;
243 	struct kimage *image;
244 
245 	/* Allocate and initialize a controlling structure */
246 	image = NULL;
247 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
248 	if (result)
249 		goto out;
250 
251 	/*
252 	 * Find a location for the control code buffer, and add it
253 	 * the vector of segments so that it's pages will also be
254 	 * counted as destination pages.
255 	 */
256 	result = -ENOMEM;
257 	image->control_code_page = kimage_alloc_control_pages(image,
258 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
259 	if (!image->control_code_page) {
260 		pr_err("Could not allocate control_code_buffer\n");
261 		goto out_free;
262 	}
263 
264 	image->swap_page = kimage_alloc_control_pages(image, 0);
265 	if (!image->swap_page) {
266 		pr_err("Could not allocate swap buffer\n");
267 		goto out_free;
268 	}
269 
270 	*rimage = image;
271 	return 0;
272 
273 out_free:
274 	kimage_free_page_list(&image->control_pages);
275 	kfree(image);
276 out:
277 	return result;
278 }
279 
280 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
281 				unsigned long nr_segments,
282 				struct kexec_segment __user *segments)
283 {
284 	int result;
285 	struct kimage *image;
286 	unsigned long i;
287 
288 	image = NULL;
289 	/* Verify we have a valid entry point */
290 	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
291 		result = -EADDRNOTAVAIL;
292 		goto out;
293 	}
294 
295 	/* Allocate and initialize a controlling structure */
296 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
297 	if (result)
298 		goto out;
299 
300 	/* Enable the special crash kernel control page
301 	 * allocation policy.
302 	 */
303 	image->control_page = crashk_res.start;
304 	image->type = KEXEC_TYPE_CRASH;
305 
306 	/*
307 	 * Verify we have good destination addresses.  Normally
308 	 * the caller is responsible for making certain we don't
309 	 * attempt to load the new image into invalid or reserved
310 	 * areas of RAM.  But crash kernels are preloaded into a
311 	 * reserved area of ram.  We must ensure the addresses
312 	 * are in the reserved area otherwise preloading the
313 	 * kernel could corrupt things.
314 	 */
315 	result = -EADDRNOTAVAIL;
316 	for (i = 0; i < nr_segments; i++) {
317 		unsigned long mstart, mend;
318 
319 		mstart = image->segment[i].mem;
320 		mend = mstart + image->segment[i].memsz - 1;
321 		/* Ensure we are within the crash kernel limits */
322 		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
323 			goto out_free;
324 	}
325 
326 	/*
327 	 * Find a location for the control code buffer, and add
328 	 * the vector of segments so that it's pages will also be
329 	 * counted as destination pages.
330 	 */
331 	result = -ENOMEM;
332 	image->control_code_page = kimage_alloc_control_pages(image,
333 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
334 	if (!image->control_code_page) {
335 		pr_err("Could not allocate control_code_buffer\n");
336 		goto out_free;
337 	}
338 
339 	*rimage = image;
340 	return 0;
341 
342 out_free:
343 	kfree(image);
344 out:
345 	return result;
346 }
347 
348 static int kimage_is_destination_range(struct kimage *image,
349 					unsigned long start,
350 					unsigned long end)
351 {
352 	unsigned long i;
353 
354 	for (i = 0; i < image->nr_segments; i++) {
355 		unsigned long mstart, mend;
356 
357 		mstart = image->segment[i].mem;
358 		mend = mstart + image->segment[i].memsz;
359 		if ((end > mstart) && (start < mend))
360 			return 1;
361 	}
362 
363 	return 0;
364 }
365 
366 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
367 {
368 	struct page *pages;
369 
370 	pages = alloc_pages(gfp_mask, order);
371 	if (pages) {
372 		unsigned int count, i;
373 		pages->mapping = NULL;
374 		set_page_private(pages, order);
375 		count = 1 << order;
376 		for (i = 0; i < count; i++)
377 			SetPageReserved(pages + i);
378 	}
379 
380 	return pages;
381 }
382 
383 static void kimage_free_pages(struct page *page)
384 {
385 	unsigned int order, count, i;
386 
387 	order = page_private(page);
388 	count = 1 << order;
389 	for (i = 0; i < count; i++)
390 		ClearPageReserved(page + i);
391 	__free_pages(page, order);
392 }
393 
394 static void kimage_free_page_list(struct list_head *list)
395 {
396 	struct list_head *pos, *next;
397 
398 	list_for_each_safe(pos, next, list) {
399 		struct page *page;
400 
401 		page = list_entry(pos, struct page, lru);
402 		list_del(&page->lru);
403 		kimage_free_pages(page);
404 	}
405 }
406 
407 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
408 							unsigned int order)
409 {
410 	/* Control pages are special, they are the intermediaries
411 	 * that are needed while we copy the rest of the pages
412 	 * to their final resting place.  As such they must
413 	 * not conflict with either the destination addresses
414 	 * or memory the kernel is already using.
415 	 *
416 	 * The only case where we really need more than one of
417 	 * these are for architectures where we cannot disable
418 	 * the MMU and must instead generate an identity mapped
419 	 * page table for all of the memory.
420 	 *
421 	 * At worst this runs in O(N) of the image size.
422 	 */
423 	struct list_head extra_pages;
424 	struct page *pages;
425 	unsigned int count;
426 
427 	count = 1 << order;
428 	INIT_LIST_HEAD(&extra_pages);
429 
430 	/* Loop while I can allocate a page and the page allocated
431 	 * is a destination page.
432 	 */
433 	do {
434 		unsigned long pfn, epfn, addr, eaddr;
435 
436 		pages = kimage_alloc_pages(GFP_KERNEL, order);
437 		if (!pages)
438 			break;
439 		pfn   = page_to_pfn(pages);
440 		epfn  = pfn + count;
441 		addr  = pfn << PAGE_SHIFT;
442 		eaddr = epfn << PAGE_SHIFT;
443 		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
444 			      kimage_is_destination_range(image, addr, eaddr)) {
445 			list_add(&pages->lru, &extra_pages);
446 			pages = NULL;
447 		}
448 	} while (!pages);
449 
450 	if (pages) {
451 		/* Remember the allocated page... */
452 		list_add(&pages->lru, &image->control_pages);
453 
454 		/* Because the page is already in it's destination
455 		 * location we will never allocate another page at
456 		 * that address.  Therefore kimage_alloc_pages
457 		 * will not return it (again) and we don't need
458 		 * to give it an entry in image->segment[].
459 		 */
460 	}
461 	/* Deal with the destination pages I have inadvertently allocated.
462 	 *
463 	 * Ideally I would convert multi-page allocations into single
464 	 * page allocations, and add everything to image->dest_pages.
465 	 *
466 	 * For now it is simpler to just free the pages.
467 	 */
468 	kimage_free_page_list(&extra_pages);
469 
470 	return pages;
471 }
472 
473 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
474 						      unsigned int order)
475 {
476 	/* Control pages are special, they are the intermediaries
477 	 * that are needed while we copy the rest of the pages
478 	 * to their final resting place.  As such they must
479 	 * not conflict with either the destination addresses
480 	 * or memory the kernel is already using.
481 	 *
482 	 * Control pages are also the only pags we must allocate
483 	 * when loading a crash kernel.  All of the other pages
484 	 * are specified by the segments and we just memcpy
485 	 * into them directly.
486 	 *
487 	 * The only case where we really need more than one of
488 	 * these are for architectures where we cannot disable
489 	 * the MMU and must instead generate an identity mapped
490 	 * page table for all of the memory.
491 	 *
492 	 * Given the low demand this implements a very simple
493 	 * allocator that finds the first hole of the appropriate
494 	 * size in the reserved memory region, and allocates all
495 	 * of the memory up to and including the hole.
496 	 */
497 	unsigned long hole_start, hole_end, size;
498 	struct page *pages;
499 
500 	pages = NULL;
501 	size = (1 << order) << PAGE_SHIFT;
502 	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
503 	hole_end   = hole_start + size - 1;
504 	while (hole_end <= crashk_res.end) {
505 		unsigned long i;
506 
507 		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
508 			break;
509 		/* See if I overlap any of the segments */
510 		for (i = 0; i < image->nr_segments; i++) {
511 			unsigned long mstart, mend;
512 
513 			mstart = image->segment[i].mem;
514 			mend   = mstart + image->segment[i].memsz - 1;
515 			if ((hole_end >= mstart) && (hole_start <= mend)) {
516 				/* Advance the hole to the end of the segment */
517 				hole_start = (mend + (size - 1)) & ~(size - 1);
518 				hole_end   = hole_start + size - 1;
519 				break;
520 			}
521 		}
522 		/* If I don't overlap any segments I have found my hole! */
523 		if (i == image->nr_segments) {
524 			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
525 			break;
526 		}
527 	}
528 	if (pages)
529 		image->control_page = hole_end;
530 
531 	return pages;
532 }
533 
534 
535 struct page *kimage_alloc_control_pages(struct kimage *image,
536 					 unsigned int order)
537 {
538 	struct page *pages = NULL;
539 
540 	switch (image->type) {
541 	case KEXEC_TYPE_DEFAULT:
542 		pages = kimage_alloc_normal_control_pages(image, order);
543 		break;
544 	case KEXEC_TYPE_CRASH:
545 		pages = kimage_alloc_crash_control_pages(image, order);
546 		break;
547 	}
548 
549 	return pages;
550 }
551 
552 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
553 {
554 	if (*image->entry != 0)
555 		image->entry++;
556 
557 	if (image->entry == image->last_entry) {
558 		kimage_entry_t *ind_page;
559 		struct page *page;
560 
561 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
562 		if (!page)
563 			return -ENOMEM;
564 
565 		ind_page = page_address(page);
566 		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
567 		image->entry = ind_page;
568 		image->last_entry = ind_page +
569 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
570 	}
571 	*image->entry = entry;
572 	image->entry++;
573 	*image->entry = 0;
574 
575 	return 0;
576 }
577 
578 static int kimage_set_destination(struct kimage *image,
579 				   unsigned long destination)
580 {
581 	int result;
582 
583 	destination &= PAGE_MASK;
584 	result = kimage_add_entry(image, destination | IND_DESTINATION);
585 	if (result == 0)
586 		image->destination = destination;
587 
588 	return result;
589 }
590 
591 
592 static int kimage_add_page(struct kimage *image, unsigned long page)
593 {
594 	int result;
595 
596 	page &= PAGE_MASK;
597 	result = kimage_add_entry(image, page | IND_SOURCE);
598 	if (result == 0)
599 		image->destination += PAGE_SIZE;
600 
601 	return result;
602 }
603 
604 
605 static void kimage_free_extra_pages(struct kimage *image)
606 {
607 	/* Walk through and free any extra destination pages I may have */
608 	kimage_free_page_list(&image->dest_pages);
609 
610 	/* Walk through and free any unusable pages I have cached */
611 	kimage_free_page_list(&image->unuseable_pages);
612 
613 }
614 static void kimage_terminate(struct kimage *image)
615 {
616 	if (*image->entry != 0)
617 		image->entry++;
618 
619 	*image->entry = IND_DONE;
620 }
621 
622 #define for_each_kimage_entry(image, ptr, entry) \
623 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
624 		ptr = (entry & IND_INDIRECTION) ? \
625 			phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
626 
627 static void kimage_free_entry(kimage_entry_t entry)
628 {
629 	struct page *page;
630 
631 	page = pfn_to_page(entry >> PAGE_SHIFT);
632 	kimage_free_pages(page);
633 }
634 
635 static void kimage_free(struct kimage *image)
636 {
637 	kimage_entry_t *ptr, entry;
638 	kimage_entry_t ind = 0;
639 
640 	if (!image)
641 		return;
642 
643 	kimage_free_extra_pages(image);
644 	for_each_kimage_entry(image, ptr, entry) {
645 		if (entry & IND_INDIRECTION) {
646 			/* Free the previous indirection page */
647 			if (ind & IND_INDIRECTION)
648 				kimage_free_entry(ind);
649 			/* Save this indirection page until we are
650 			 * done with it.
651 			 */
652 			ind = entry;
653 		} else if (entry & IND_SOURCE)
654 			kimage_free_entry(entry);
655 	}
656 	/* Free the final indirection page */
657 	if (ind & IND_INDIRECTION)
658 		kimage_free_entry(ind);
659 
660 	/* Handle any machine specific cleanup */
661 	machine_kexec_cleanup(image);
662 
663 	/* Free the kexec control pages... */
664 	kimage_free_page_list(&image->control_pages);
665 	kfree(image);
666 }
667 
668 static kimage_entry_t *kimage_dst_used(struct kimage *image,
669 					unsigned long page)
670 {
671 	kimage_entry_t *ptr, entry;
672 	unsigned long destination = 0;
673 
674 	for_each_kimage_entry(image, ptr, entry) {
675 		if (entry & IND_DESTINATION)
676 			destination = entry & PAGE_MASK;
677 		else if (entry & IND_SOURCE) {
678 			if (page == destination)
679 				return ptr;
680 			destination += PAGE_SIZE;
681 		}
682 	}
683 
684 	return NULL;
685 }
686 
687 static struct page *kimage_alloc_page(struct kimage *image,
688 					gfp_t gfp_mask,
689 					unsigned long destination)
690 {
691 	/*
692 	 * Here we implement safeguards to ensure that a source page
693 	 * is not copied to its destination page before the data on
694 	 * the destination page is no longer useful.
695 	 *
696 	 * To do this we maintain the invariant that a source page is
697 	 * either its own destination page, or it is not a
698 	 * destination page at all.
699 	 *
700 	 * That is slightly stronger than required, but the proof
701 	 * that no problems will not occur is trivial, and the
702 	 * implementation is simply to verify.
703 	 *
704 	 * When allocating all pages normally this algorithm will run
705 	 * in O(N) time, but in the worst case it will run in O(N^2)
706 	 * time.   If the runtime is a problem the data structures can
707 	 * be fixed.
708 	 */
709 	struct page *page;
710 	unsigned long addr;
711 
712 	/*
713 	 * Walk through the list of destination pages, and see if I
714 	 * have a match.
715 	 */
716 	list_for_each_entry(page, &image->dest_pages, lru) {
717 		addr = page_to_pfn(page) << PAGE_SHIFT;
718 		if (addr == destination) {
719 			list_del(&page->lru);
720 			return page;
721 		}
722 	}
723 	page = NULL;
724 	while (1) {
725 		kimage_entry_t *old;
726 
727 		/* Allocate a page, if we run out of memory give up */
728 		page = kimage_alloc_pages(gfp_mask, 0);
729 		if (!page)
730 			return NULL;
731 		/* If the page cannot be used file it away */
732 		if (page_to_pfn(page) >
733 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
734 			list_add(&page->lru, &image->unuseable_pages);
735 			continue;
736 		}
737 		addr = page_to_pfn(page) << PAGE_SHIFT;
738 
739 		/* If it is the destination page we want use it */
740 		if (addr == destination)
741 			break;
742 
743 		/* If the page is not a destination page use it */
744 		if (!kimage_is_destination_range(image, addr,
745 						  addr + PAGE_SIZE))
746 			break;
747 
748 		/*
749 		 * I know that the page is someones destination page.
750 		 * See if there is already a source page for this
751 		 * destination page.  And if so swap the source pages.
752 		 */
753 		old = kimage_dst_used(image, addr);
754 		if (old) {
755 			/* If so move it */
756 			unsigned long old_addr;
757 			struct page *old_page;
758 
759 			old_addr = *old & PAGE_MASK;
760 			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
761 			copy_highpage(page, old_page);
762 			*old = addr | (*old & ~PAGE_MASK);
763 
764 			/* The old page I have found cannot be a
765 			 * destination page, so return it if it's
766 			 * gfp_flags honor the ones passed in.
767 			 */
768 			if (!(gfp_mask & __GFP_HIGHMEM) &&
769 			    PageHighMem(old_page)) {
770 				kimage_free_pages(old_page);
771 				continue;
772 			}
773 			addr = old_addr;
774 			page = old_page;
775 			break;
776 		} else {
777 			/* Place the page on the destination list I
778 			 * will use it later.
779 			 */
780 			list_add(&page->lru, &image->dest_pages);
781 		}
782 	}
783 
784 	return page;
785 }
786 
787 static int kimage_load_normal_segment(struct kimage *image,
788 					 struct kexec_segment *segment)
789 {
790 	unsigned long maddr;
791 	size_t ubytes, mbytes;
792 	int result;
793 	unsigned char __user *buf;
794 
795 	result = 0;
796 	buf = segment->buf;
797 	ubytes = segment->bufsz;
798 	mbytes = segment->memsz;
799 	maddr = segment->mem;
800 
801 	result = kimage_set_destination(image, maddr);
802 	if (result < 0)
803 		goto out;
804 
805 	while (mbytes) {
806 		struct page *page;
807 		char *ptr;
808 		size_t uchunk, mchunk;
809 
810 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
811 		if (!page) {
812 			result  = -ENOMEM;
813 			goto out;
814 		}
815 		result = kimage_add_page(image, page_to_pfn(page)
816 								<< PAGE_SHIFT);
817 		if (result < 0)
818 			goto out;
819 
820 		ptr = kmap(page);
821 		/* Start with a clear page */
822 		clear_page(ptr);
823 		ptr += maddr & ~PAGE_MASK;
824 		mchunk = min_t(size_t, mbytes,
825 				PAGE_SIZE - (maddr & ~PAGE_MASK));
826 		uchunk = min(ubytes, mchunk);
827 
828 		result = copy_from_user(ptr, buf, uchunk);
829 		kunmap(page);
830 		if (result) {
831 			result = -EFAULT;
832 			goto out;
833 		}
834 		ubytes -= uchunk;
835 		maddr  += mchunk;
836 		buf    += mchunk;
837 		mbytes -= mchunk;
838 	}
839 out:
840 	return result;
841 }
842 
843 static int kimage_load_crash_segment(struct kimage *image,
844 					struct kexec_segment *segment)
845 {
846 	/* For crash dumps kernels we simply copy the data from
847 	 * user space to it's destination.
848 	 * We do things a page at a time for the sake of kmap.
849 	 */
850 	unsigned long maddr;
851 	size_t ubytes, mbytes;
852 	int result;
853 	unsigned char __user *buf;
854 
855 	result = 0;
856 	buf = segment->buf;
857 	ubytes = segment->bufsz;
858 	mbytes = segment->memsz;
859 	maddr = segment->mem;
860 	while (mbytes) {
861 		struct page *page;
862 		char *ptr;
863 		size_t uchunk, mchunk;
864 
865 		page = pfn_to_page(maddr >> PAGE_SHIFT);
866 		if (!page) {
867 			result  = -ENOMEM;
868 			goto out;
869 		}
870 		ptr = kmap(page);
871 		ptr += maddr & ~PAGE_MASK;
872 		mchunk = min_t(size_t, mbytes,
873 				PAGE_SIZE - (maddr & ~PAGE_MASK));
874 		uchunk = min(ubytes, mchunk);
875 		if (mchunk > uchunk) {
876 			/* Zero the trailing part of the page */
877 			memset(ptr + uchunk, 0, mchunk - uchunk);
878 		}
879 		result = copy_from_user(ptr, buf, uchunk);
880 		kexec_flush_icache_page(page);
881 		kunmap(page);
882 		if (result) {
883 			result = -EFAULT;
884 			goto out;
885 		}
886 		ubytes -= uchunk;
887 		maddr  += mchunk;
888 		buf    += mchunk;
889 		mbytes -= mchunk;
890 	}
891 out:
892 	return result;
893 }
894 
895 static int kimage_load_segment(struct kimage *image,
896 				struct kexec_segment *segment)
897 {
898 	int result = -ENOMEM;
899 
900 	switch (image->type) {
901 	case KEXEC_TYPE_DEFAULT:
902 		result = kimage_load_normal_segment(image, segment);
903 		break;
904 	case KEXEC_TYPE_CRASH:
905 		result = kimage_load_crash_segment(image, segment);
906 		break;
907 	}
908 
909 	return result;
910 }
911 
912 /*
913  * Exec Kernel system call: for obvious reasons only root may call it.
914  *
915  * This call breaks up into three pieces.
916  * - A generic part which loads the new kernel from the current
917  *   address space, and very carefully places the data in the
918  *   allocated pages.
919  *
920  * - A generic part that interacts with the kernel and tells all of
921  *   the devices to shut down.  Preventing on-going dmas, and placing
922  *   the devices in a consistent state so a later kernel can
923  *   reinitialize them.
924  *
925  * - A machine specific part that includes the syscall number
926  *   and then copies the image to it's final destination.  And
927  *   jumps into the image at entry.
928  *
929  * kexec does not sync, or unmount filesystems so if you need
930  * that to happen you need to do that yourself.
931  */
932 struct kimage *kexec_image;
933 struct kimage *kexec_crash_image;
934 int kexec_load_disabled;
935 
936 static DEFINE_MUTEX(kexec_mutex);
937 
938 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
939 		struct kexec_segment __user *, segments, unsigned long, flags)
940 {
941 	struct kimage **dest_image, *image;
942 	int result;
943 
944 	/* We only trust the superuser with rebooting the system. */
945 	if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
946 		return -EPERM;
947 
948 	/*
949 	 * Verify we have a legal set of flags
950 	 * This leaves us room for future extensions.
951 	 */
952 	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
953 		return -EINVAL;
954 
955 	/* Verify we are on the appropriate architecture */
956 	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
957 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
958 		return -EINVAL;
959 
960 	/* Put an artificial cap on the number
961 	 * of segments passed to kexec_load.
962 	 */
963 	if (nr_segments > KEXEC_SEGMENT_MAX)
964 		return -EINVAL;
965 
966 	image = NULL;
967 	result = 0;
968 
969 	/* Because we write directly to the reserved memory
970 	 * region when loading crash kernels we need a mutex here to
971 	 * prevent multiple crash  kernels from attempting to load
972 	 * simultaneously, and to prevent a crash kernel from loading
973 	 * over the top of a in use crash kernel.
974 	 *
975 	 * KISS: always take the mutex.
976 	 */
977 	if (!mutex_trylock(&kexec_mutex))
978 		return -EBUSY;
979 
980 	dest_image = &kexec_image;
981 	if (flags & KEXEC_ON_CRASH)
982 		dest_image = &kexec_crash_image;
983 	if (nr_segments > 0) {
984 		unsigned long i;
985 
986 		/* Loading another kernel to reboot into */
987 		if ((flags & KEXEC_ON_CRASH) == 0)
988 			result = kimage_normal_alloc(&image, entry,
989 							nr_segments, segments);
990 		/* Loading another kernel to switch to if this one crashes */
991 		else if (flags & KEXEC_ON_CRASH) {
992 			/* Free any current crash dump kernel before
993 			 * we corrupt it.
994 			 */
995 			kimage_free(xchg(&kexec_crash_image, NULL));
996 			result = kimage_crash_alloc(&image, entry,
997 						     nr_segments, segments);
998 			crash_map_reserved_pages();
999 		}
1000 		if (result)
1001 			goto out;
1002 
1003 		if (flags & KEXEC_PRESERVE_CONTEXT)
1004 			image->preserve_context = 1;
1005 		result = machine_kexec_prepare(image);
1006 		if (result)
1007 			goto out;
1008 
1009 		for (i = 0; i < nr_segments; i++) {
1010 			result = kimage_load_segment(image, &image->segment[i]);
1011 			if (result)
1012 				goto out;
1013 		}
1014 		kimage_terminate(image);
1015 		if (flags & KEXEC_ON_CRASH)
1016 			crash_unmap_reserved_pages();
1017 	}
1018 	/* Install the new kernel, and  Uninstall the old */
1019 	image = xchg(dest_image, image);
1020 
1021 out:
1022 	mutex_unlock(&kexec_mutex);
1023 	kimage_free(image);
1024 
1025 	return result;
1026 }
1027 
1028 /*
1029  * Add and remove page tables for crashkernel memory
1030  *
1031  * Provide an empty default implementation here -- architecture
1032  * code may override this
1033  */
1034 void __weak crash_map_reserved_pages(void)
1035 {}
1036 
1037 void __weak crash_unmap_reserved_pages(void)
1038 {}
1039 
1040 #ifdef CONFIG_COMPAT
1041 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1042 		       compat_ulong_t, nr_segments,
1043 		       struct compat_kexec_segment __user *, segments,
1044 		       compat_ulong_t, flags)
1045 {
1046 	struct compat_kexec_segment in;
1047 	struct kexec_segment out, __user *ksegments;
1048 	unsigned long i, result;
1049 
1050 	/* Don't allow clients that don't understand the native
1051 	 * architecture to do anything.
1052 	 */
1053 	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1054 		return -EINVAL;
1055 
1056 	if (nr_segments > KEXEC_SEGMENT_MAX)
1057 		return -EINVAL;
1058 
1059 	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1060 	for (i = 0; i < nr_segments; i++) {
1061 		result = copy_from_user(&in, &segments[i], sizeof(in));
1062 		if (result)
1063 			return -EFAULT;
1064 
1065 		out.buf   = compat_ptr(in.buf);
1066 		out.bufsz = in.bufsz;
1067 		out.mem   = in.mem;
1068 		out.memsz = in.memsz;
1069 
1070 		result = copy_to_user(&ksegments[i], &out, sizeof(out));
1071 		if (result)
1072 			return -EFAULT;
1073 	}
1074 
1075 	return sys_kexec_load(entry, nr_segments, ksegments, flags);
1076 }
1077 #endif
1078 
1079 void crash_kexec(struct pt_regs *regs)
1080 {
1081 	/* Take the kexec_mutex here to prevent sys_kexec_load
1082 	 * running on one cpu from replacing the crash kernel
1083 	 * we are using after a panic on a different cpu.
1084 	 *
1085 	 * If the crash kernel was not located in a fixed area
1086 	 * of memory the xchg(&kexec_crash_image) would be
1087 	 * sufficient.  But since I reuse the memory...
1088 	 */
1089 	if (mutex_trylock(&kexec_mutex)) {
1090 		if (kexec_crash_image) {
1091 			struct pt_regs fixed_regs;
1092 
1093 			crash_setup_regs(&fixed_regs, regs);
1094 			crash_save_vmcoreinfo();
1095 			machine_crash_shutdown(&fixed_regs);
1096 			machine_kexec(kexec_crash_image);
1097 		}
1098 		mutex_unlock(&kexec_mutex);
1099 	}
1100 }
1101 
1102 size_t crash_get_memory_size(void)
1103 {
1104 	size_t size = 0;
1105 	mutex_lock(&kexec_mutex);
1106 	if (crashk_res.end != crashk_res.start)
1107 		size = resource_size(&crashk_res);
1108 	mutex_unlock(&kexec_mutex);
1109 	return size;
1110 }
1111 
1112 void __weak crash_free_reserved_phys_range(unsigned long begin,
1113 					   unsigned long end)
1114 {
1115 	unsigned long addr;
1116 
1117 	for (addr = begin; addr < end; addr += PAGE_SIZE)
1118 		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1119 }
1120 
1121 int crash_shrink_memory(unsigned long new_size)
1122 {
1123 	int ret = 0;
1124 	unsigned long start, end;
1125 	unsigned long old_size;
1126 	struct resource *ram_res;
1127 
1128 	mutex_lock(&kexec_mutex);
1129 
1130 	if (kexec_crash_image) {
1131 		ret = -ENOENT;
1132 		goto unlock;
1133 	}
1134 	start = crashk_res.start;
1135 	end = crashk_res.end;
1136 	old_size = (end == 0) ? 0 : end - start + 1;
1137 	if (new_size >= old_size) {
1138 		ret = (new_size == old_size) ? 0 : -EINVAL;
1139 		goto unlock;
1140 	}
1141 
1142 	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1143 	if (!ram_res) {
1144 		ret = -ENOMEM;
1145 		goto unlock;
1146 	}
1147 
1148 	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1149 	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1150 
1151 	crash_map_reserved_pages();
1152 	crash_free_reserved_phys_range(end, crashk_res.end);
1153 
1154 	if ((start == end) && (crashk_res.parent != NULL))
1155 		release_resource(&crashk_res);
1156 
1157 	ram_res->start = end;
1158 	ram_res->end = crashk_res.end;
1159 	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1160 	ram_res->name = "System RAM";
1161 
1162 	crashk_res.end = end - 1;
1163 
1164 	insert_resource(&iomem_resource, ram_res);
1165 	crash_unmap_reserved_pages();
1166 
1167 unlock:
1168 	mutex_unlock(&kexec_mutex);
1169 	return ret;
1170 }
1171 
1172 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1173 			    size_t data_len)
1174 {
1175 	struct elf_note note;
1176 
1177 	note.n_namesz = strlen(name) + 1;
1178 	note.n_descsz = data_len;
1179 	note.n_type   = type;
1180 	memcpy(buf, &note, sizeof(note));
1181 	buf += (sizeof(note) + 3)/4;
1182 	memcpy(buf, name, note.n_namesz);
1183 	buf += (note.n_namesz + 3)/4;
1184 	memcpy(buf, data, note.n_descsz);
1185 	buf += (note.n_descsz + 3)/4;
1186 
1187 	return buf;
1188 }
1189 
1190 static void final_note(u32 *buf)
1191 {
1192 	struct elf_note note;
1193 
1194 	note.n_namesz = 0;
1195 	note.n_descsz = 0;
1196 	note.n_type   = 0;
1197 	memcpy(buf, &note, sizeof(note));
1198 }
1199 
1200 void crash_save_cpu(struct pt_regs *regs, int cpu)
1201 {
1202 	struct elf_prstatus prstatus;
1203 	u32 *buf;
1204 
1205 	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1206 		return;
1207 
1208 	/* Using ELF notes here is opportunistic.
1209 	 * I need a well defined structure format
1210 	 * for the data I pass, and I need tags
1211 	 * on the data to indicate what information I have
1212 	 * squirrelled away.  ELF notes happen to provide
1213 	 * all of that, so there is no need to invent something new.
1214 	 */
1215 	buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1216 	if (!buf)
1217 		return;
1218 	memset(&prstatus, 0, sizeof(prstatus));
1219 	prstatus.pr_pid = current->pid;
1220 	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1221 	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1222 			      &prstatus, sizeof(prstatus));
1223 	final_note(buf);
1224 }
1225 
1226 static int __init crash_notes_memory_init(void)
1227 {
1228 	/* Allocate memory for saving cpu registers. */
1229 	crash_notes = alloc_percpu(note_buf_t);
1230 	if (!crash_notes) {
1231 		pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1232 		return -ENOMEM;
1233 	}
1234 	return 0;
1235 }
1236 subsys_initcall(crash_notes_memory_init);
1237 
1238 
1239 /*
1240  * parsing the "crashkernel" commandline
1241  *
1242  * this code is intended to be called from architecture specific code
1243  */
1244 
1245 
1246 /*
1247  * This function parses command lines in the format
1248  *
1249  *   crashkernel=ramsize-range:size[,...][@offset]
1250  *
1251  * The function returns 0 on success and -EINVAL on failure.
1252  */
1253 static int __init parse_crashkernel_mem(char *cmdline,
1254 					unsigned long long system_ram,
1255 					unsigned long long *crash_size,
1256 					unsigned long long *crash_base)
1257 {
1258 	char *cur = cmdline, *tmp;
1259 
1260 	/* for each entry of the comma-separated list */
1261 	do {
1262 		unsigned long long start, end = ULLONG_MAX, size;
1263 
1264 		/* get the start of the range */
1265 		start = memparse(cur, &tmp);
1266 		if (cur == tmp) {
1267 			pr_warn("crashkernel: Memory value expected\n");
1268 			return -EINVAL;
1269 		}
1270 		cur = tmp;
1271 		if (*cur != '-') {
1272 			pr_warn("crashkernel: '-' expected\n");
1273 			return -EINVAL;
1274 		}
1275 		cur++;
1276 
1277 		/* if no ':' is here, than we read the end */
1278 		if (*cur != ':') {
1279 			end = memparse(cur, &tmp);
1280 			if (cur == tmp) {
1281 				pr_warn("crashkernel: Memory value expected\n");
1282 				return -EINVAL;
1283 			}
1284 			cur = tmp;
1285 			if (end <= start) {
1286 				pr_warn("crashkernel: end <= start\n");
1287 				return -EINVAL;
1288 			}
1289 		}
1290 
1291 		if (*cur != ':') {
1292 			pr_warn("crashkernel: ':' expected\n");
1293 			return -EINVAL;
1294 		}
1295 		cur++;
1296 
1297 		size = memparse(cur, &tmp);
1298 		if (cur == tmp) {
1299 			pr_warn("Memory value expected\n");
1300 			return -EINVAL;
1301 		}
1302 		cur = tmp;
1303 		if (size >= system_ram) {
1304 			pr_warn("crashkernel: invalid size\n");
1305 			return -EINVAL;
1306 		}
1307 
1308 		/* match ? */
1309 		if (system_ram >= start && system_ram < end) {
1310 			*crash_size = size;
1311 			break;
1312 		}
1313 	} while (*cur++ == ',');
1314 
1315 	if (*crash_size > 0) {
1316 		while (*cur && *cur != ' ' && *cur != '@')
1317 			cur++;
1318 		if (*cur == '@') {
1319 			cur++;
1320 			*crash_base = memparse(cur, &tmp);
1321 			if (cur == tmp) {
1322 				pr_warn("Memory value expected after '@'\n");
1323 				return -EINVAL;
1324 			}
1325 		}
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 /*
1332  * That function parses "simple" (old) crashkernel command lines like
1333  *
1334  *	crashkernel=size[@offset]
1335  *
1336  * It returns 0 on success and -EINVAL on failure.
1337  */
1338 static int __init parse_crashkernel_simple(char *cmdline,
1339 					   unsigned long long *crash_size,
1340 					   unsigned long long *crash_base)
1341 {
1342 	char *cur = cmdline;
1343 
1344 	*crash_size = memparse(cmdline, &cur);
1345 	if (cmdline == cur) {
1346 		pr_warn("crashkernel: memory value expected\n");
1347 		return -EINVAL;
1348 	}
1349 
1350 	if (*cur == '@')
1351 		*crash_base = memparse(cur+1, &cur);
1352 	else if (*cur != ' ' && *cur != '\0') {
1353 		pr_warn("crashkernel: unrecognized char\n");
1354 		return -EINVAL;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 #define SUFFIX_HIGH 0
1361 #define SUFFIX_LOW  1
1362 #define SUFFIX_NULL 2
1363 static __initdata char *suffix_tbl[] = {
1364 	[SUFFIX_HIGH] = ",high",
1365 	[SUFFIX_LOW]  = ",low",
1366 	[SUFFIX_NULL] = NULL,
1367 };
1368 
1369 /*
1370  * That function parses "suffix"  crashkernel command lines like
1371  *
1372  *	crashkernel=size,[high|low]
1373  *
1374  * It returns 0 on success and -EINVAL on failure.
1375  */
1376 static int __init parse_crashkernel_suffix(char *cmdline,
1377 					   unsigned long long	*crash_size,
1378 					   unsigned long long	*crash_base,
1379 					   const char *suffix)
1380 {
1381 	char *cur = cmdline;
1382 
1383 	*crash_size = memparse(cmdline, &cur);
1384 	if (cmdline == cur) {
1385 		pr_warn("crashkernel: memory value expected\n");
1386 		return -EINVAL;
1387 	}
1388 
1389 	/* check with suffix */
1390 	if (strncmp(cur, suffix, strlen(suffix))) {
1391 		pr_warn("crashkernel: unrecognized char\n");
1392 		return -EINVAL;
1393 	}
1394 	cur += strlen(suffix);
1395 	if (*cur != ' ' && *cur != '\0') {
1396 		pr_warn("crashkernel: unrecognized char\n");
1397 		return -EINVAL;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static __init char *get_last_crashkernel(char *cmdline,
1404 			     const char *name,
1405 			     const char *suffix)
1406 {
1407 	char *p = cmdline, *ck_cmdline = NULL;
1408 
1409 	/* find crashkernel and use the last one if there are more */
1410 	p = strstr(p, name);
1411 	while (p) {
1412 		char *end_p = strchr(p, ' ');
1413 		char *q;
1414 
1415 		if (!end_p)
1416 			end_p = p + strlen(p);
1417 
1418 		if (!suffix) {
1419 			int i;
1420 
1421 			/* skip the one with any known suffix */
1422 			for (i = 0; suffix_tbl[i]; i++) {
1423 				q = end_p - strlen(suffix_tbl[i]);
1424 				if (!strncmp(q, suffix_tbl[i],
1425 					     strlen(suffix_tbl[i])))
1426 					goto next;
1427 			}
1428 			ck_cmdline = p;
1429 		} else {
1430 			q = end_p - strlen(suffix);
1431 			if (!strncmp(q, suffix, strlen(suffix)))
1432 				ck_cmdline = p;
1433 		}
1434 next:
1435 		p = strstr(p+1, name);
1436 	}
1437 
1438 	if (!ck_cmdline)
1439 		return NULL;
1440 
1441 	return ck_cmdline;
1442 }
1443 
1444 static int __init __parse_crashkernel(char *cmdline,
1445 			     unsigned long long system_ram,
1446 			     unsigned long long *crash_size,
1447 			     unsigned long long *crash_base,
1448 			     const char *name,
1449 			     const char *suffix)
1450 {
1451 	char	*first_colon, *first_space;
1452 	char	*ck_cmdline;
1453 
1454 	BUG_ON(!crash_size || !crash_base);
1455 	*crash_size = 0;
1456 	*crash_base = 0;
1457 
1458 	ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1459 
1460 	if (!ck_cmdline)
1461 		return -EINVAL;
1462 
1463 	ck_cmdline += strlen(name);
1464 
1465 	if (suffix)
1466 		return parse_crashkernel_suffix(ck_cmdline, crash_size,
1467 				crash_base, suffix);
1468 	/*
1469 	 * if the commandline contains a ':', then that's the extended
1470 	 * syntax -- if not, it must be the classic syntax
1471 	 */
1472 	first_colon = strchr(ck_cmdline, ':');
1473 	first_space = strchr(ck_cmdline, ' ');
1474 	if (first_colon && (!first_space || first_colon < first_space))
1475 		return parse_crashkernel_mem(ck_cmdline, system_ram,
1476 				crash_size, crash_base);
1477 
1478 	return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1479 }
1480 
1481 /*
1482  * That function is the entry point for command line parsing and should be
1483  * called from the arch-specific code.
1484  */
1485 int __init parse_crashkernel(char *cmdline,
1486 			     unsigned long long system_ram,
1487 			     unsigned long long *crash_size,
1488 			     unsigned long long *crash_base)
1489 {
1490 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1491 					"crashkernel=", NULL);
1492 }
1493 
1494 int __init parse_crashkernel_high(char *cmdline,
1495 			     unsigned long long system_ram,
1496 			     unsigned long long *crash_size,
1497 			     unsigned long long *crash_base)
1498 {
1499 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1500 				"crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1501 }
1502 
1503 int __init parse_crashkernel_low(char *cmdline,
1504 			     unsigned long long system_ram,
1505 			     unsigned long long *crash_size,
1506 			     unsigned long long *crash_base)
1507 {
1508 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1509 				"crashkernel=", suffix_tbl[SUFFIX_LOW]);
1510 }
1511 
1512 static void update_vmcoreinfo_note(void)
1513 {
1514 	u32 *buf = vmcoreinfo_note;
1515 
1516 	if (!vmcoreinfo_size)
1517 		return;
1518 	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1519 			      vmcoreinfo_size);
1520 	final_note(buf);
1521 }
1522 
1523 void crash_save_vmcoreinfo(void)
1524 {
1525 	vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1526 	update_vmcoreinfo_note();
1527 }
1528 
1529 void vmcoreinfo_append_str(const char *fmt, ...)
1530 {
1531 	va_list args;
1532 	char buf[0x50];
1533 	size_t r;
1534 
1535 	va_start(args, fmt);
1536 	r = vscnprintf(buf, sizeof(buf), fmt, args);
1537 	va_end(args);
1538 
1539 	r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1540 
1541 	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1542 
1543 	vmcoreinfo_size += r;
1544 }
1545 
1546 /*
1547  * provide an empty default implementation here -- architecture
1548  * code may override this
1549  */
1550 void __weak arch_crash_save_vmcoreinfo(void)
1551 {}
1552 
1553 unsigned long __weak paddr_vmcoreinfo_note(void)
1554 {
1555 	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1556 }
1557 
1558 static int __init crash_save_vmcoreinfo_init(void)
1559 {
1560 	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1561 	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1562 
1563 	VMCOREINFO_SYMBOL(init_uts_ns);
1564 	VMCOREINFO_SYMBOL(node_online_map);
1565 #ifdef CONFIG_MMU
1566 	VMCOREINFO_SYMBOL(swapper_pg_dir);
1567 #endif
1568 	VMCOREINFO_SYMBOL(_stext);
1569 	VMCOREINFO_SYMBOL(vmap_area_list);
1570 
1571 #ifndef CONFIG_NEED_MULTIPLE_NODES
1572 	VMCOREINFO_SYMBOL(mem_map);
1573 	VMCOREINFO_SYMBOL(contig_page_data);
1574 #endif
1575 #ifdef CONFIG_SPARSEMEM
1576 	VMCOREINFO_SYMBOL(mem_section);
1577 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1578 	VMCOREINFO_STRUCT_SIZE(mem_section);
1579 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1580 #endif
1581 	VMCOREINFO_STRUCT_SIZE(page);
1582 	VMCOREINFO_STRUCT_SIZE(pglist_data);
1583 	VMCOREINFO_STRUCT_SIZE(zone);
1584 	VMCOREINFO_STRUCT_SIZE(free_area);
1585 	VMCOREINFO_STRUCT_SIZE(list_head);
1586 	VMCOREINFO_SIZE(nodemask_t);
1587 	VMCOREINFO_OFFSET(page, flags);
1588 	VMCOREINFO_OFFSET(page, _count);
1589 	VMCOREINFO_OFFSET(page, mapping);
1590 	VMCOREINFO_OFFSET(page, lru);
1591 	VMCOREINFO_OFFSET(page, _mapcount);
1592 	VMCOREINFO_OFFSET(page, private);
1593 	VMCOREINFO_OFFSET(pglist_data, node_zones);
1594 	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1595 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1596 	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1597 #endif
1598 	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1599 	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1600 	VMCOREINFO_OFFSET(pglist_data, node_id);
1601 	VMCOREINFO_OFFSET(zone, free_area);
1602 	VMCOREINFO_OFFSET(zone, vm_stat);
1603 	VMCOREINFO_OFFSET(zone, spanned_pages);
1604 	VMCOREINFO_OFFSET(free_area, free_list);
1605 	VMCOREINFO_OFFSET(list_head, next);
1606 	VMCOREINFO_OFFSET(list_head, prev);
1607 	VMCOREINFO_OFFSET(vmap_area, va_start);
1608 	VMCOREINFO_OFFSET(vmap_area, list);
1609 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1610 	log_buf_kexec_setup();
1611 	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1612 	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1613 	VMCOREINFO_NUMBER(PG_lru);
1614 	VMCOREINFO_NUMBER(PG_private);
1615 	VMCOREINFO_NUMBER(PG_swapcache);
1616 	VMCOREINFO_NUMBER(PG_slab);
1617 #ifdef CONFIG_MEMORY_FAILURE
1618 	VMCOREINFO_NUMBER(PG_hwpoison);
1619 #endif
1620 	VMCOREINFO_NUMBER(PG_head_mask);
1621 	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1622 
1623 	arch_crash_save_vmcoreinfo();
1624 	update_vmcoreinfo_note();
1625 
1626 	return 0;
1627 }
1628 
1629 subsys_initcall(crash_save_vmcoreinfo_init);
1630 
1631 /*
1632  * Move into place and start executing a preloaded standalone
1633  * executable.  If nothing was preloaded return an error.
1634  */
1635 int kernel_kexec(void)
1636 {
1637 	int error = 0;
1638 
1639 	if (!mutex_trylock(&kexec_mutex))
1640 		return -EBUSY;
1641 	if (!kexec_image) {
1642 		error = -EINVAL;
1643 		goto Unlock;
1644 	}
1645 
1646 #ifdef CONFIG_KEXEC_JUMP
1647 	if (kexec_image->preserve_context) {
1648 		lock_system_sleep();
1649 		pm_prepare_console();
1650 		error = freeze_processes();
1651 		if (error) {
1652 			error = -EBUSY;
1653 			goto Restore_console;
1654 		}
1655 		suspend_console();
1656 		error = dpm_suspend_start(PMSG_FREEZE);
1657 		if (error)
1658 			goto Resume_console;
1659 		/* At this point, dpm_suspend_start() has been called,
1660 		 * but *not* dpm_suspend_end(). We *must* call
1661 		 * dpm_suspend_end() now.  Otherwise, drivers for
1662 		 * some devices (e.g. interrupt controllers) become
1663 		 * desynchronized with the actual state of the
1664 		 * hardware at resume time, and evil weirdness ensues.
1665 		 */
1666 		error = dpm_suspend_end(PMSG_FREEZE);
1667 		if (error)
1668 			goto Resume_devices;
1669 		error = disable_nonboot_cpus();
1670 		if (error)
1671 			goto Enable_cpus;
1672 		local_irq_disable();
1673 		error = syscore_suspend();
1674 		if (error)
1675 			goto Enable_irqs;
1676 	} else
1677 #endif
1678 	{
1679 		kexec_in_progress = true;
1680 		kernel_restart_prepare(NULL);
1681 		migrate_to_reboot_cpu();
1682 
1683 		/*
1684 		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1685 		 * no further code needs to use CPU hotplug (which is true in
1686 		 * the reboot case). However, the kexec path depends on using
1687 		 * CPU hotplug again; so re-enable it here.
1688 		 */
1689 		cpu_hotplug_enable();
1690 		pr_emerg("Starting new kernel\n");
1691 		machine_shutdown();
1692 	}
1693 
1694 	machine_kexec(kexec_image);
1695 
1696 #ifdef CONFIG_KEXEC_JUMP
1697 	if (kexec_image->preserve_context) {
1698 		syscore_resume();
1699  Enable_irqs:
1700 		local_irq_enable();
1701  Enable_cpus:
1702 		enable_nonboot_cpus();
1703 		dpm_resume_start(PMSG_RESTORE);
1704  Resume_devices:
1705 		dpm_resume_end(PMSG_RESTORE);
1706  Resume_console:
1707 		resume_console();
1708 		thaw_processes();
1709  Restore_console:
1710 		pm_restore_console();
1711 		unlock_system_sleep();
1712 	}
1713 #endif
1714 
1715  Unlock:
1716 	mutex_unlock(&kexec_mutex);
1717 	return error;
1718 }
1719