xref: /openbmc/linux/fs/proc/vmcore.c (revision cbabf03c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	fs/proc/vmcore.c Interface for accessing the crash
4  * 				 dump from the system's previous life.
5  * 	Heavily borrowed from fs/proc/kcore.c
6  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7  *	Copyright (C) IBM Corporation, 2004. All rights reserved
8  *
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/cc_platform.h>
30 #include <asm/io.h>
31 #include "internal.h"
32 
33 /* List representing chunks of contiguous memory areas and their offsets in
34  * vmcore file.
35  */
36 static LIST_HEAD(vmcore_list);
37 
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
42 
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
47 
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
50 
51 static struct proc_dir_entry *proc_vmcore;
52 
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
57 
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61 
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
64 
65 static DEFINE_SPINLOCK(vmcore_cb_lock);
66 DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67 /* List of registered vmcore callbacks. */
68 static LIST_HEAD(vmcore_cb_list);
69 /* Whether the vmcore has been opened once. */
70 static bool vmcore_opened;
71 
72 void register_vmcore_cb(struct vmcore_cb *cb)
73 {
74 	INIT_LIST_HEAD(&cb->next);
75 	spin_lock(&vmcore_cb_lock);
76 	list_add_tail(&cb->next, &vmcore_cb_list);
77 	/*
78 	 * Registering a vmcore callback after the vmcore was opened is
79 	 * very unusual (e.g., manual driver loading).
80 	 */
81 	if (vmcore_opened)
82 		pr_warn_once("Unexpected vmcore callback registration\n");
83 	spin_unlock(&vmcore_cb_lock);
84 }
85 EXPORT_SYMBOL_GPL(register_vmcore_cb);
86 
87 void unregister_vmcore_cb(struct vmcore_cb *cb)
88 {
89 	spin_lock(&vmcore_cb_lock);
90 	list_del_rcu(&cb->next);
91 	/*
92 	 * Unregistering a vmcore callback after the vmcore was opened is
93 	 * very unusual (e.g., forced driver removal), but we cannot stop
94 	 * unregistering.
95 	 */
96 	if (vmcore_opened)
97 		pr_warn_once("Unexpected vmcore callback unregistration\n");
98 	spin_unlock(&vmcore_cb_lock);
99 
100 	synchronize_srcu(&vmcore_cb_srcu);
101 }
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
103 
104 static bool pfn_is_ram(unsigned long pfn)
105 {
106 	struct vmcore_cb *cb;
107 	bool ret = true;
108 
109 	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
110 				 srcu_read_lock_held(&vmcore_cb_srcu)) {
111 		if (unlikely(!cb->pfn_is_ram))
112 			continue;
113 		ret = cb->pfn_is_ram(cb, pfn);
114 		if (!ret)
115 			break;
116 	}
117 
118 	return ret;
119 }
120 
121 static int open_vmcore(struct inode *inode, struct file *file)
122 {
123 	spin_lock(&vmcore_cb_lock);
124 	vmcore_opened = true;
125 	spin_unlock(&vmcore_cb_lock);
126 
127 	return 0;
128 }
129 
130 /* Reads a page from the oldmem device from given offset. */
131 ssize_t read_from_oldmem(char *buf, size_t count,
132 			 u64 *ppos, int userbuf,
133 			 bool encrypted)
134 {
135 	unsigned long pfn, offset;
136 	size_t nr_bytes;
137 	ssize_t read = 0, tmp;
138 	int idx;
139 
140 	if (!count)
141 		return 0;
142 
143 	offset = (unsigned long)(*ppos % PAGE_SIZE);
144 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
145 
146 	idx = srcu_read_lock(&vmcore_cb_srcu);
147 	do {
148 		if (count > (PAGE_SIZE - offset))
149 			nr_bytes = PAGE_SIZE - offset;
150 		else
151 			nr_bytes = count;
152 
153 		/* If pfn is not ram, return zeros for sparse dump files */
154 		if (!pfn_is_ram(pfn)) {
155 			tmp = 0;
156 			if (!userbuf)
157 				memset(buf, 0, nr_bytes);
158 			else if (clear_user(buf, nr_bytes))
159 				tmp = -EFAULT;
160 		} else {
161 			if (encrypted)
162 				tmp = copy_oldmem_page_encrypted(pfn, buf,
163 								 nr_bytes,
164 								 offset,
165 								 userbuf);
166 			else
167 				tmp = copy_oldmem_page(pfn, buf, nr_bytes,
168 						       offset, userbuf);
169 		}
170 		if (tmp < 0) {
171 			srcu_read_unlock(&vmcore_cb_srcu, idx);
172 			return tmp;
173 		}
174 
175 		*ppos += nr_bytes;
176 		count -= nr_bytes;
177 		buf += nr_bytes;
178 		read += nr_bytes;
179 		++pfn;
180 		offset = 0;
181 	} while (count);
182 	srcu_read_unlock(&vmcore_cb_srcu, idx);
183 
184 	return read;
185 }
186 
187 /*
188  * Architectures may override this function to allocate ELF header in 2nd kernel
189  */
190 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
191 {
192 	return 0;
193 }
194 
195 /*
196  * Architectures may override this function to free header
197  */
198 void __weak elfcorehdr_free(unsigned long long addr)
199 {}
200 
201 /*
202  * Architectures may override this function to read from ELF header
203  */
204 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
205 {
206 	return read_from_oldmem(buf, count, ppos, 0, false);
207 }
208 
209 /*
210  * Architectures may override this function to read from notes sections
211  */
212 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
213 {
214 	return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
215 }
216 
217 /*
218  * Architectures may override this function to map oldmem
219  */
220 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
221 				  unsigned long from, unsigned long pfn,
222 				  unsigned long size, pgprot_t prot)
223 {
224 	prot = pgprot_encrypted(prot);
225 	return remap_pfn_range(vma, from, pfn, size, prot);
226 }
227 
228 /*
229  * Architectures which support memory encryption override this.
230  */
231 ssize_t __weak
232 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
233 			   unsigned long offset, int userbuf)
234 {
235 	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
236 }
237 
238 /*
239  * Copy to either kernel or user space
240  */
241 static int copy_to(void *target, void *src, size_t size, int userbuf)
242 {
243 	if (userbuf) {
244 		if (copy_to_user((char __user *) target, src, size))
245 			return -EFAULT;
246 	} else {
247 		memcpy(target, src, size);
248 	}
249 	return 0;
250 }
251 
252 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
253 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
254 {
255 	struct vmcoredd_node *dump;
256 	u64 offset = 0;
257 	int ret = 0;
258 	size_t tsz;
259 	char *buf;
260 
261 	mutex_lock(&vmcoredd_mutex);
262 	list_for_each_entry(dump, &vmcoredd_list, list) {
263 		if (start < offset + dump->size) {
264 			tsz = min(offset + (u64)dump->size - start, (u64)size);
265 			buf = dump->buf + start - offset;
266 			if (copy_to(dst, buf, tsz, userbuf)) {
267 				ret = -EFAULT;
268 				goto out_unlock;
269 			}
270 
271 			size -= tsz;
272 			start += tsz;
273 			dst += tsz;
274 
275 			/* Leave now if buffer filled already */
276 			if (!size)
277 				goto out_unlock;
278 		}
279 		offset += dump->size;
280 	}
281 
282 out_unlock:
283 	mutex_unlock(&vmcoredd_mutex);
284 	return ret;
285 }
286 
287 #ifdef CONFIG_MMU
288 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
289 			       u64 start, size_t size)
290 {
291 	struct vmcoredd_node *dump;
292 	u64 offset = 0;
293 	int ret = 0;
294 	size_t tsz;
295 	char *buf;
296 
297 	mutex_lock(&vmcoredd_mutex);
298 	list_for_each_entry(dump, &vmcoredd_list, list) {
299 		if (start < offset + dump->size) {
300 			tsz = min(offset + (u64)dump->size - start, (u64)size);
301 			buf = dump->buf + start - offset;
302 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
303 							tsz)) {
304 				ret = -EFAULT;
305 				goto out_unlock;
306 			}
307 
308 			size -= tsz;
309 			start += tsz;
310 			dst += tsz;
311 
312 			/* Leave now if buffer filled already */
313 			if (!size)
314 				goto out_unlock;
315 		}
316 		offset += dump->size;
317 	}
318 
319 out_unlock:
320 	mutex_unlock(&vmcoredd_mutex);
321 	return ret;
322 }
323 #endif /* CONFIG_MMU */
324 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
325 
326 /* Read from the ELF header and then the crash dump. On error, negative value is
327  * returned otherwise number of bytes read are returned.
328  */
329 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
330 			     int userbuf)
331 {
332 	ssize_t acc = 0, tmp;
333 	size_t tsz;
334 	u64 start;
335 	struct vmcore *m = NULL;
336 
337 	if (buflen == 0 || *fpos >= vmcore_size)
338 		return 0;
339 
340 	/* trim buflen to not go beyond EOF */
341 	if (buflen > vmcore_size - *fpos)
342 		buflen = vmcore_size - *fpos;
343 
344 	/* Read ELF core header */
345 	if (*fpos < elfcorebuf_sz) {
346 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
347 		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
348 			return -EFAULT;
349 		buflen -= tsz;
350 		*fpos += tsz;
351 		buffer += tsz;
352 		acc += tsz;
353 
354 		/* leave now if filled buffer already */
355 		if (buflen == 0)
356 			return acc;
357 	}
358 
359 	/* Read Elf note segment */
360 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
361 		void *kaddr;
362 
363 		/* We add device dumps before other elf notes because the
364 		 * other elf notes may not fill the elf notes buffer
365 		 * completely and we will end up with zero-filled data
366 		 * between the elf notes and the device dumps. Tools will
367 		 * then try to decode this zero-filled data as valid notes
368 		 * and we don't want that. Hence, adding device dumps before
369 		 * the other elf notes ensure that zero-filled data can be
370 		 * avoided.
371 		 */
372 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
373 		/* Read device dumps */
374 		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
375 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
376 				  (size_t)*fpos, buflen);
377 			start = *fpos - elfcorebuf_sz;
378 			if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
379 				return -EFAULT;
380 
381 			buflen -= tsz;
382 			*fpos += tsz;
383 			buffer += tsz;
384 			acc += tsz;
385 
386 			/* leave now if filled buffer already */
387 			if (!buflen)
388 				return acc;
389 		}
390 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
391 
392 		/* Read remaining elf notes */
393 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
394 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
395 		if (copy_to(buffer, kaddr, tsz, userbuf))
396 			return -EFAULT;
397 
398 		buflen -= tsz;
399 		*fpos += tsz;
400 		buffer += tsz;
401 		acc += tsz;
402 
403 		/* leave now if filled buffer already */
404 		if (buflen == 0)
405 			return acc;
406 	}
407 
408 	list_for_each_entry(m, &vmcore_list, list) {
409 		if (*fpos < m->offset + m->size) {
410 			tsz = (size_t)min_t(unsigned long long,
411 					    m->offset + m->size - *fpos,
412 					    buflen);
413 			start = m->paddr + *fpos - m->offset;
414 			tmp = read_from_oldmem(buffer, tsz, &start,
415 					       userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
416 			if (tmp < 0)
417 				return tmp;
418 			buflen -= tsz;
419 			*fpos += tsz;
420 			buffer += tsz;
421 			acc += tsz;
422 
423 			/* leave now if filled buffer already */
424 			if (buflen == 0)
425 				return acc;
426 		}
427 	}
428 
429 	return acc;
430 }
431 
432 static ssize_t read_vmcore(struct file *file, char __user *buffer,
433 			   size_t buflen, loff_t *fpos)
434 {
435 	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
436 }
437 
438 /*
439  * The vmcore fault handler uses the page cache and fills data using the
440  * standard __vmcore_read() function.
441  *
442  * On s390 the fault handler is used for memory regions that can't be mapped
443  * directly with remap_pfn_range().
444  */
445 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
446 {
447 #ifdef CONFIG_S390
448 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
449 	pgoff_t index = vmf->pgoff;
450 	struct page *page;
451 	loff_t offset;
452 	char *buf;
453 	int rc;
454 
455 	page = find_or_create_page(mapping, index, GFP_KERNEL);
456 	if (!page)
457 		return VM_FAULT_OOM;
458 	if (!PageUptodate(page)) {
459 		offset = (loff_t) index << PAGE_SHIFT;
460 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
461 		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
462 		if (rc < 0) {
463 			unlock_page(page);
464 			put_page(page);
465 			return vmf_error(rc);
466 		}
467 		SetPageUptodate(page);
468 	}
469 	unlock_page(page);
470 	vmf->page = page;
471 	return 0;
472 #else
473 	return VM_FAULT_SIGBUS;
474 #endif
475 }
476 
477 static const struct vm_operations_struct vmcore_mmap_ops = {
478 	.fault = mmap_vmcore_fault,
479 };
480 
481 /**
482  * vmcore_alloc_buf - allocate buffer in vmalloc memory
483  * @size: size of buffer
484  *
485  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
486  * the buffer to user-space by means of remap_vmalloc_range().
487  *
488  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
489  * disabled and there's no need to allow users to mmap the buffer.
490  */
491 static inline char *vmcore_alloc_buf(size_t size)
492 {
493 #ifdef CONFIG_MMU
494 	return vmalloc_user(size);
495 #else
496 	return vzalloc(size);
497 #endif
498 }
499 
500 /*
501  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
502  * essential for mmap_vmcore() in order to map physically
503  * non-contiguous objects (ELF header, ELF note segment and memory
504  * regions in the 1st kernel pointed to by PT_LOAD entries) into
505  * virtually contiguous user-space in ELF layout.
506  */
507 #ifdef CONFIG_MMU
508 /*
509  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
510  * reported as not being ram with the zero page.
511  *
512  * @vma: vm_area_struct describing requested mapping
513  * @from: start remapping from
514  * @pfn: page frame number to start remapping to
515  * @size: remapping size
516  * @prot: protection bits
517  *
518  * Returns zero on success, -EAGAIN on failure.
519  */
520 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
521 				    unsigned long from, unsigned long pfn,
522 				    unsigned long size, pgprot_t prot)
523 {
524 	unsigned long map_size;
525 	unsigned long pos_start, pos_end, pos;
526 	unsigned long zeropage_pfn = my_zero_pfn(0);
527 	size_t len = 0;
528 
529 	pos_start = pfn;
530 	pos_end = pfn + (size >> PAGE_SHIFT);
531 
532 	for (pos = pos_start; pos < pos_end; ++pos) {
533 		if (!pfn_is_ram(pos)) {
534 			/*
535 			 * We hit a page which is not ram. Remap the continuous
536 			 * region between pos_start and pos-1 and replace
537 			 * the non-ram page at pos with the zero page.
538 			 */
539 			if (pos > pos_start) {
540 				/* Remap continuous region */
541 				map_size = (pos - pos_start) << PAGE_SHIFT;
542 				if (remap_oldmem_pfn_range(vma, from + len,
543 							   pos_start, map_size,
544 							   prot))
545 					goto fail;
546 				len += map_size;
547 			}
548 			/* Remap the zero page */
549 			if (remap_oldmem_pfn_range(vma, from + len,
550 						   zeropage_pfn,
551 						   PAGE_SIZE, prot))
552 				goto fail;
553 			len += PAGE_SIZE;
554 			pos_start = pos + 1;
555 		}
556 	}
557 	if (pos > pos_start) {
558 		/* Remap the rest */
559 		map_size = (pos - pos_start) << PAGE_SHIFT;
560 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
561 					   map_size, prot))
562 			goto fail;
563 	}
564 	return 0;
565 fail:
566 	do_munmap(vma->vm_mm, from, len, NULL);
567 	return -EAGAIN;
568 }
569 
570 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
571 			    unsigned long from, unsigned long pfn,
572 			    unsigned long size, pgprot_t prot)
573 {
574 	int ret, idx;
575 
576 	/*
577 	 * Check if a callback was registered to avoid looping over all
578 	 * pages without a reason.
579 	 */
580 	idx = srcu_read_lock(&vmcore_cb_srcu);
581 	if (!list_empty(&vmcore_cb_list))
582 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
583 	else
584 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
585 	srcu_read_unlock(&vmcore_cb_srcu, idx);
586 	return ret;
587 }
588 
589 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
590 {
591 	size_t size = vma->vm_end - vma->vm_start;
592 	u64 start, end, len, tsz;
593 	struct vmcore *m;
594 
595 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
596 	end = start + size;
597 
598 	if (size > vmcore_size || end > vmcore_size)
599 		return -EINVAL;
600 
601 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
602 		return -EPERM;
603 
604 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
605 	vma->vm_flags |= VM_MIXEDMAP;
606 	vma->vm_ops = &vmcore_mmap_ops;
607 
608 	len = 0;
609 
610 	if (start < elfcorebuf_sz) {
611 		u64 pfn;
612 
613 		tsz = min(elfcorebuf_sz - (size_t)start, size);
614 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
615 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
616 				    vma->vm_page_prot))
617 			return -EAGAIN;
618 		size -= tsz;
619 		start += tsz;
620 		len += tsz;
621 
622 		if (size == 0)
623 			return 0;
624 	}
625 
626 	if (start < elfcorebuf_sz + elfnotes_sz) {
627 		void *kaddr;
628 
629 		/* We add device dumps before other elf notes because the
630 		 * other elf notes may not fill the elf notes buffer
631 		 * completely and we will end up with zero-filled data
632 		 * between the elf notes and the device dumps. Tools will
633 		 * then try to decode this zero-filled data as valid notes
634 		 * and we don't want that. Hence, adding device dumps before
635 		 * the other elf notes ensure that zero-filled data can be
636 		 * avoided. This also ensures that the device dumps and
637 		 * other elf notes can be properly mmaped at page aligned
638 		 * address.
639 		 */
640 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
641 		/* Read device dumps */
642 		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
643 			u64 start_off;
644 
645 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
646 				  (size_t)start, size);
647 			start_off = start - elfcorebuf_sz;
648 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
649 						start_off, tsz))
650 				goto fail;
651 
652 			size -= tsz;
653 			start += tsz;
654 			len += tsz;
655 
656 			/* leave now if filled buffer already */
657 			if (!size)
658 				return 0;
659 		}
660 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
661 
662 		/* Read remaining elf notes */
663 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
664 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
665 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
666 						kaddr, 0, tsz))
667 			goto fail;
668 
669 		size -= tsz;
670 		start += tsz;
671 		len += tsz;
672 
673 		if (size == 0)
674 			return 0;
675 	}
676 
677 	list_for_each_entry(m, &vmcore_list, list) {
678 		if (start < m->offset + m->size) {
679 			u64 paddr = 0;
680 
681 			tsz = (size_t)min_t(unsigned long long,
682 					    m->offset + m->size - start, size);
683 			paddr = m->paddr + start - m->offset;
684 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
685 						    paddr >> PAGE_SHIFT, tsz,
686 						    vma->vm_page_prot))
687 				goto fail;
688 			size -= tsz;
689 			start += tsz;
690 			len += tsz;
691 
692 			if (size == 0)
693 				return 0;
694 		}
695 	}
696 
697 	return 0;
698 fail:
699 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
700 	return -EAGAIN;
701 }
702 #else
703 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
704 {
705 	return -ENOSYS;
706 }
707 #endif
708 
709 static const struct proc_ops vmcore_proc_ops = {
710 	.proc_open	= open_vmcore,
711 	.proc_read	= read_vmcore,
712 	.proc_lseek	= default_llseek,
713 	.proc_mmap	= mmap_vmcore,
714 };
715 
716 static struct vmcore* __init get_new_element(void)
717 {
718 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
719 }
720 
721 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
722 			   struct list_head *vc_list)
723 {
724 	u64 size;
725 	struct vmcore *m;
726 
727 	size = elfsz + elfnotesegsz;
728 	list_for_each_entry(m, vc_list, list) {
729 		size += m->size;
730 	}
731 	return size;
732 }
733 
734 /**
735  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
736  *
737  * @ehdr_ptr: ELF header
738  *
739  * This function updates p_memsz member of each PT_NOTE entry in the
740  * program header table pointed to by @ehdr_ptr to real size of ELF
741  * note segment.
742  */
743 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
744 {
745 	int i, rc=0;
746 	Elf64_Phdr *phdr_ptr;
747 	Elf64_Nhdr *nhdr_ptr;
748 
749 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
750 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
751 		void *notes_section;
752 		u64 offset, max_sz, sz, real_sz = 0;
753 		if (phdr_ptr->p_type != PT_NOTE)
754 			continue;
755 		max_sz = phdr_ptr->p_memsz;
756 		offset = phdr_ptr->p_offset;
757 		notes_section = kmalloc(max_sz, GFP_KERNEL);
758 		if (!notes_section)
759 			return -ENOMEM;
760 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
761 		if (rc < 0) {
762 			kfree(notes_section);
763 			return rc;
764 		}
765 		nhdr_ptr = notes_section;
766 		while (nhdr_ptr->n_namesz != 0) {
767 			sz = sizeof(Elf64_Nhdr) +
768 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
769 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
770 			if ((real_sz + sz) > max_sz) {
771 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
772 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
773 				break;
774 			}
775 			real_sz += sz;
776 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
777 		}
778 		kfree(notes_section);
779 		phdr_ptr->p_memsz = real_sz;
780 		if (real_sz == 0) {
781 			pr_warn("Warning: Zero PT_NOTE entries found\n");
782 		}
783 	}
784 
785 	return 0;
786 }
787 
788 /**
789  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
790  * headers and sum of real size of their ELF note segment headers and
791  * data.
792  *
793  * @ehdr_ptr: ELF header
794  * @nr_ptnote: buffer for the number of PT_NOTE program headers
795  * @sz_ptnote: buffer for size of unique PT_NOTE program header
796  *
797  * This function is used to merge multiple PT_NOTE program headers
798  * into a unique single one. The resulting unique entry will have
799  * @sz_ptnote in its phdr->p_mem.
800  *
801  * It is assumed that program headers with PT_NOTE type pointed to by
802  * @ehdr_ptr has already been updated by update_note_header_size_elf64
803  * and each of PT_NOTE program headers has actual ELF note segment
804  * size in its p_memsz member.
805  */
806 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
807 						 int *nr_ptnote, u64 *sz_ptnote)
808 {
809 	int i;
810 	Elf64_Phdr *phdr_ptr;
811 
812 	*nr_ptnote = *sz_ptnote = 0;
813 
814 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
815 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
816 		if (phdr_ptr->p_type != PT_NOTE)
817 			continue;
818 		*nr_ptnote += 1;
819 		*sz_ptnote += phdr_ptr->p_memsz;
820 	}
821 
822 	return 0;
823 }
824 
825 /**
826  * copy_notes_elf64 - copy ELF note segments in a given buffer
827  *
828  * @ehdr_ptr: ELF header
829  * @notes_buf: buffer into which ELF note segments are copied
830  *
831  * This function is used to copy ELF note segment in the 1st kernel
832  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
833  * size of the buffer @notes_buf is equal to or larger than sum of the
834  * real ELF note segment headers and data.
835  *
836  * It is assumed that program headers with PT_NOTE type pointed to by
837  * @ehdr_ptr has already been updated by update_note_header_size_elf64
838  * and each of PT_NOTE program headers has actual ELF note segment
839  * size in its p_memsz member.
840  */
841 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
842 {
843 	int i, rc=0;
844 	Elf64_Phdr *phdr_ptr;
845 
846 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
847 
848 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
849 		u64 offset;
850 		if (phdr_ptr->p_type != PT_NOTE)
851 			continue;
852 		offset = phdr_ptr->p_offset;
853 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
854 					   &offset);
855 		if (rc < 0)
856 			return rc;
857 		notes_buf += phdr_ptr->p_memsz;
858 	}
859 
860 	return 0;
861 }
862 
863 /* Merges all the PT_NOTE headers into one. */
864 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
865 					   char **notes_buf, size_t *notes_sz)
866 {
867 	int i, nr_ptnote=0, rc=0;
868 	char *tmp;
869 	Elf64_Ehdr *ehdr_ptr;
870 	Elf64_Phdr phdr;
871 	u64 phdr_sz = 0, note_off;
872 
873 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
874 
875 	rc = update_note_header_size_elf64(ehdr_ptr);
876 	if (rc < 0)
877 		return rc;
878 
879 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
880 	if (rc < 0)
881 		return rc;
882 
883 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
884 	*notes_buf = vmcore_alloc_buf(*notes_sz);
885 	if (!*notes_buf)
886 		return -ENOMEM;
887 
888 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
889 	if (rc < 0)
890 		return rc;
891 
892 	/* Prepare merged PT_NOTE program header. */
893 	phdr.p_type    = PT_NOTE;
894 	phdr.p_flags   = 0;
895 	note_off = sizeof(Elf64_Ehdr) +
896 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
897 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
898 	phdr.p_vaddr   = phdr.p_paddr = 0;
899 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
900 	phdr.p_align   = 0;
901 
902 	/* Add merged PT_NOTE program header*/
903 	tmp = elfptr + sizeof(Elf64_Ehdr);
904 	memcpy(tmp, &phdr, sizeof(phdr));
905 	tmp += sizeof(phdr);
906 
907 	/* Remove unwanted PT_NOTE program headers. */
908 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
909 	*elfsz = *elfsz - i;
910 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
911 	memset(elfptr + *elfsz, 0, i);
912 	*elfsz = roundup(*elfsz, PAGE_SIZE);
913 
914 	/* Modify e_phnum to reflect merged headers. */
915 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
916 
917 	/* Store the size of all notes.  We need this to update the note
918 	 * header when the device dumps will be added.
919 	 */
920 	elfnotes_orig_sz = phdr.p_memsz;
921 
922 	return 0;
923 }
924 
925 /**
926  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
927  *
928  * @ehdr_ptr: ELF header
929  *
930  * This function updates p_memsz member of each PT_NOTE entry in the
931  * program header table pointed to by @ehdr_ptr to real size of ELF
932  * note segment.
933  */
934 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
935 {
936 	int i, rc=0;
937 	Elf32_Phdr *phdr_ptr;
938 	Elf32_Nhdr *nhdr_ptr;
939 
940 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
941 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
942 		void *notes_section;
943 		u64 offset, max_sz, sz, real_sz = 0;
944 		if (phdr_ptr->p_type != PT_NOTE)
945 			continue;
946 		max_sz = phdr_ptr->p_memsz;
947 		offset = phdr_ptr->p_offset;
948 		notes_section = kmalloc(max_sz, GFP_KERNEL);
949 		if (!notes_section)
950 			return -ENOMEM;
951 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
952 		if (rc < 0) {
953 			kfree(notes_section);
954 			return rc;
955 		}
956 		nhdr_ptr = notes_section;
957 		while (nhdr_ptr->n_namesz != 0) {
958 			sz = sizeof(Elf32_Nhdr) +
959 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
960 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
961 			if ((real_sz + sz) > max_sz) {
962 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
963 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
964 				break;
965 			}
966 			real_sz += sz;
967 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
968 		}
969 		kfree(notes_section);
970 		phdr_ptr->p_memsz = real_sz;
971 		if (real_sz == 0) {
972 			pr_warn("Warning: Zero PT_NOTE entries found\n");
973 		}
974 	}
975 
976 	return 0;
977 }
978 
979 /**
980  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
981  * headers and sum of real size of their ELF note segment headers and
982  * data.
983  *
984  * @ehdr_ptr: ELF header
985  * @nr_ptnote: buffer for the number of PT_NOTE program headers
986  * @sz_ptnote: buffer for size of unique PT_NOTE program header
987  *
988  * This function is used to merge multiple PT_NOTE program headers
989  * into a unique single one. The resulting unique entry will have
990  * @sz_ptnote in its phdr->p_mem.
991  *
992  * It is assumed that program headers with PT_NOTE type pointed to by
993  * @ehdr_ptr has already been updated by update_note_header_size_elf32
994  * and each of PT_NOTE program headers has actual ELF note segment
995  * size in its p_memsz member.
996  */
997 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
998 						 int *nr_ptnote, u64 *sz_ptnote)
999 {
1000 	int i;
1001 	Elf32_Phdr *phdr_ptr;
1002 
1003 	*nr_ptnote = *sz_ptnote = 0;
1004 
1005 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
1006 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1007 		if (phdr_ptr->p_type != PT_NOTE)
1008 			continue;
1009 		*nr_ptnote += 1;
1010 		*sz_ptnote += phdr_ptr->p_memsz;
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 /**
1017  * copy_notes_elf32 - copy ELF note segments in a given buffer
1018  *
1019  * @ehdr_ptr: ELF header
1020  * @notes_buf: buffer into which ELF note segments are copied
1021  *
1022  * This function is used to copy ELF note segment in the 1st kernel
1023  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1024  * size of the buffer @notes_buf is equal to or larger than sum of the
1025  * real ELF note segment headers and data.
1026  *
1027  * It is assumed that program headers with PT_NOTE type pointed to by
1028  * @ehdr_ptr has already been updated by update_note_header_size_elf32
1029  * and each of PT_NOTE program headers has actual ELF note segment
1030  * size in its p_memsz member.
1031  */
1032 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1033 {
1034 	int i, rc=0;
1035 	Elf32_Phdr *phdr_ptr;
1036 
1037 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1038 
1039 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1040 		u64 offset;
1041 		if (phdr_ptr->p_type != PT_NOTE)
1042 			continue;
1043 		offset = phdr_ptr->p_offset;
1044 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1045 					   &offset);
1046 		if (rc < 0)
1047 			return rc;
1048 		notes_buf += phdr_ptr->p_memsz;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 /* Merges all the PT_NOTE headers into one. */
1055 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1056 					   char **notes_buf, size_t *notes_sz)
1057 {
1058 	int i, nr_ptnote=0, rc=0;
1059 	char *tmp;
1060 	Elf32_Ehdr *ehdr_ptr;
1061 	Elf32_Phdr phdr;
1062 	u64 phdr_sz = 0, note_off;
1063 
1064 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1065 
1066 	rc = update_note_header_size_elf32(ehdr_ptr);
1067 	if (rc < 0)
1068 		return rc;
1069 
1070 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1071 	if (rc < 0)
1072 		return rc;
1073 
1074 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1075 	*notes_buf = vmcore_alloc_buf(*notes_sz);
1076 	if (!*notes_buf)
1077 		return -ENOMEM;
1078 
1079 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1080 	if (rc < 0)
1081 		return rc;
1082 
1083 	/* Prepare merged PT_NOTE program header. */
1084 	phdr.p_type    = PT_NOTE;
1085 	phdr.p_flags   = 0;
1086 	note_off = sizeof(Elf32_Ehdr) +
1087 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1088 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1089 	phdr.p_vaddr   = phdr.p_paddr = 0;
1090 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1091 	phdr.p_align   = 0;
1092 
1093 	/* Add merged PT_NOTE program header*/
1094 	tmp = elfptr + sizeof(Elf32_Ehdr);
1095 	memcpy(tmp, &phdr, sizeof(phdr));
1096 	tmp += sizeof(phdr);
1097 
1098 	/* Remove unwanted PT_NOTE program headers. */
1099 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1100 	*elfsz = *elfsz - i;
1101 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1102 	memset(elfptr + *elfsz, 0, i);
1103 	*elfsz = roundup(*elfsz, PAGE_SIZE);
1104 
1105 	/* Modify e_phnum to reflect merged headers. */
1106 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1107 
1108 	/* Store the size of all notes.  We need this to update the note
1109 	 * header when the device dumps will be added.
1110 	 */
1111 	elfnotes_orig_sz = phdr.p_memsz;
1112 
1113 	return 0;
1114 }
1115 
1116 /* Add memory chunks represented by program headers to vmcore list. Also update
1117  * the new offset fields of exported program headers. */
1118 static int __init process_ptload_program_headers_elf64(char *elfptr,
1119 						size_t elfsz,
1120 						size_t elfnotes_sz,
1121 						struct list_head *vc_list)
1122 {
1123 	int i;
1124 	Elf64_Ehdr *ehdr_ptr;
1125 	Elf64_Phdr *phdr_ptr;
1126 	loff_t vmcore_off;
1127 	struct vmcore *new;
1128 
1129 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1130 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1131 
1132 	/* Skip Elf header, program headers and Elf note segment. */
1133 	vmcore_off = elfsz + elfnotes_sz;
1134 
1135 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1136 		u64 paddr, start, end, size;
1137 
1138 		if (phdr_ptr->p_type != PT_LOAD)
1139 			continue;
1140 
1141 		paddr = phdr_ptr->p_offset;
1142 		start = rounddown(paddr, PAGE_SIZE);
1143 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1144 		size = end - start;
1145 
1146 		/* Add this contiguous chunk of memory to vmcore list.*/
1147 		new = get_new_element();
1148 		if (!new)
1149 			return -ENOMEM;
1150 		new->paddr = start;
1151 		new->size = size;
1152 		list_add_tail(&new->list, vc_list);
1153 
1154 		/* Update the program header offset. */
1155 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1156 		vmcore_off = vmcore_off + size;
1157 	}
1158 	return 0;
1159 }
1160 
1161 static int __init process_ptload_program_headers_elf32(char *elfptr,
1162 						size_t elfsz,
1163 						size_t elfnotes_sz,
1164 						struct list_head *vc_list)
1165 {
1166 	int i;
1167 	Elf32_Ehdr *ehdr_ptr;
1168 	Elf32_Phdr *phdr_ptr;
1169 	loff_t vmcore_off;
1170 	struct vmcore *new;
1171 
1172 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1173 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1174 
1175 	/* Skip Elf header, program headers and Elf note segment. */
1176 	vmcore_off = elfsz + elfnotes_sz;
1177 
1178 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1179 		u64 paddr, start, end, size;
1180 
1181 		if (phdr_ptr->p_type != PT_LOAD)
1182 			continue;
1183 
1184 		paddr = phdr_ptr->p_offset;
1185 		start = rounddown(paddr, PAGE_SIZE);
1186 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1187 		size = end - start;
1188 
1189 		/* Add this contiguous chunk of memory to vmcore list.*/
1190 		new = get_new_element();
1191 		if (!new)
1192 			return -ENOMEM;
1193 		new->paddr = start;
1194 		new->size = size;
1195 		list_add_tail(&new->list, vc_list);
1196 
1197 		/* Update the program header offset */
1198 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1199 		vmcore_off = vmcore_off + size;
1200 	}
1201 	return 0;
1202 }
1203 
1204 /* Sets offset fields of vmcore elements. */
1205 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1206 				    struct list_head *vc_list)
1207 {
1208 	loff_t vmcore_off;
1209 	struct vmcore *m;
1210 
1211 	/* Skip Elf header, program headers and Elf note segment. */
1212 	vmcore_off = elfsz + elfnotes_sz;
1213 
1214 	list_for_each_entry(m, vc_list, list) {
1215 		m->offset = vmcore_off;
1216 		vmcore_off += m->size;
1217 	}
1218 }
1219 
1220 static void free_elfcorebuf(void)
1221 {
1222 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1223 	elfcorebuf = NULL;
1224 	vfree(elfnotes_buf);
1225 	elfnotes_buf = NULL;
1226 }
1227 
1228 static int __init parse_crash_elf64_headers(void)
1229 {
1230 	int rc=0;
1231 	Elf64_Ehdr ehdr;
1232 	u64 addr;
1233 
1234 	addr = elfcorehdr_addr;
1235 
1236 	/* Read Elf header */
1237 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1238 	if (rc < 0)
1239 		return rc;
1240 
1241 	/* Do some basic Verification. */
1242 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1243 		(ehdr.e_type != ET_CORE) ||
1244 		!vmcore_elf64_check_arch(&ehdr) ||
1245 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1246 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1247 		ehdr.e_version != EV_CURRENT ||
1248 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1249 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1250 		ehdr.e_phnum == 0) {
1251 		pr_warn("Warning: Core image elf header is not sane\n");
1252 		return -EINVAL;
1253 	}
1254 
1255 	/* Read in all elf headers. */
1256 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1257 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1258 	elfcorebuf_sz = elfcorebuf_sz_orig;
1259 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1260 					      get_order(elfcorebuf_sz_orig));
1261 	if (!elfcorebuf)
1262 		return -ENOMEM;
1263 	addr = elfcorehdr_addr;
1264 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1265 	if (rc < 0)
1266 		goto fail;
1267 
1268 	/* Merge all PT_NOTE headers into one. */
1269 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1270 				      &elfnotes_buf, &elfnotes_sz);
1271 	if (rc)
1272 		goto fail;
1273 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1274 						  elfnotes_sz, &vmcore_list);
1275 	if (rc)
1276 		goto fail;
1277 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1278 	return 0;
1279 fail:
1280 	free_elfcorebuf();
1281 	return rc;
1282 }
1283 
1284 static int __init parse_crash_elf32_headers(void)
1285 {
1286 	int rc=0;
1287 	Elf32_Ehdr ehdr;
1288 	u64 addr;
1289 
1290 	addr = elfcorehdr_addr;
1291 
1292 	/* Read Elf header */
1293 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1294 	if (rc < 0)
1295 		return rc;
1296 
1297 	/* Do some basic Verification. */
1298 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1299 		(ehdr.e_type != ET_CORE) ||
1300 		!vmcore_elf32_check_arch(&ehdr) ||
1301 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1302 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1303 		ehdr.e_version != EV_CURRENT ||
1304 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1305 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1306 		ehdr.e_phnum == 0) {
1307 		pr_warn("Warning: Core image elf header is not sane\n");
1308 		return -EINVAL;
1309 	}
1310 
1311 	/* Read in all elf headers. */
1312 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1313 	elfcorebuf_sz = elfcorebuf_sz_orig;
1314 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1315 					      get_order(elfcorebuf_sz_orig));
1316 	if (!elfcorebuf)
1317 		return -ENOMEM;
1318 	addr = elfcorehdr_addr;
1319 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1320 	if (rc < 0)
1321 		goto fail;
1322 
1323 	/* Merge all PT_NOTE headers into one. */
1324 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1325 				      &elfnotes_buf, &elfnotes_sz);
1326 	if (rc)
1327 		goto fail;
1328 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1329 						  elfnotes_sz, &vmcore_list);
1330 	if (rc)
1331 		goto fail;
1332 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1333 	return 0;
1334 fail:
1335 	free_elfcorebuf();
1336 	return rc;
1337 }
1338 
1339 static int __init parse_crash_elf_headers(void)
1340 {
1341 	unsigned char e_ident[EI_NIDENT];
1342 	u64 addr;
1343 	int rc=0;
1344 
1345 	addr = elfcorehdr_addr;
1346 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1347 	if (rc < 0)
1348 		return rc;
1349 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1350 		pr_warn("Warning: Core image elf header not found\n");
1351 		return -EINVAL;
1352 	}
1353 
1354 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1355 		rc = parse_crash_elf64_headers();
1356 		if (rc)
1357 			return rc;
1358 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1359 		rc = parse_crash_elf32_headers();
1360 		if (rc)
1361 			return rc;
1362 	} else {
1363 		pr_warn("Warning: Core image elf header is not sane\n");
1364 		return -EINVAL;
1365 	}
1366 
1367 	/* Determine vmcore size. */
1368 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1369 				      &vmcore_list);
1370 
1371 	return 0;
1372 }
1373 
1374 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1375 /**
1376  * vmcoredd_write_header - Write vmcore device dump header at the
1377  * beginning of the dump's buffer.
1378  * @buf: Output buffer where the note is written
1379  * @data: Dump info
1380  * @size: Size of the dump
1381  *
1382  * Fills beginning of the dump's buffer with vmcore device dump header.
1383  */
1384 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1385 				  u32 size)
1386 {
1387 	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1388 
1389 	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1390 	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1391 	vdd_hdr->n_type = NT_VMCOREDD;
1392 
1393 	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1394 		sizeof(vdd_hdr->name));
1395 	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1396 }
1397 
1398 /**
1399  * vmcoredd_update_program_headers - Update all Elf program headers
1400  * @elfptr: Pointer to elf header
1401  * @elfnotesz: Size of elf notes aligned to page size
1402  * @vmcoreddsz: Size of device dumps to be added to elf note header
1403  *
1404  * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1405  * Also update the offsets of all the program headers after the elf note header.
1406  */
1407 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1408 					    size_t vmcoreddsz)
1409 {
1410 	unsigned char *e_ident = (unsigned char *)elfptr;
1411 	u64 start, end, size;
1412 	loff_t vmcore_off;
1413 	u32 i;
1414 
1415 	vmcore_off = elfcorebuf_sz + elfnotesz;
1416 
1417 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1418 		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1419 		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1420 
1421 		/* Update all program headers */
1422 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1423 			if (phdr->p_type == PT_NOTE) {
1424 				/* Update note size */
1425 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1426 				phdr->p_filesz = phdr->p_memsz;
1427 				continue;
1428 			}
1429 
1430 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1431 			end = roundup(phdr->p_offset + phdr->p_memsz,
1432 				      PAGE_SIZE);
1433 			size = end - start;
1434 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1435 			vmcore_off += size;
1436 		}
1437 	} else {
1438 		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1439 		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1440 
1441 		/* Update all program headers */
1442 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1443 			if (phdr->p_type == PT_NOTE) {
1444 				/* Update note size */
1445 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1446 				phdr->p_filesz = phdr->p_memsz;
1447 				continue;
1448 			}
1449 
1450 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1451 			end = roundup(phdr->p_offset + phdr->p_memsz,
1452 				      PAGE_SIZE);
1453 			size = end - start;
1454 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1455 			vmcore_off += size;
1456 		}
1457 	}
1458 }
1459 
1460 /**
1461  * vmcoredd_update_size - Update the total size of the device dumps and update
1462  * Elf header
1463  * @dump_size: Size of the current device dump to be added to total size
1464  *
1465  * Update the total size of all the device dumps and update the Elf program
1466  * headers. Calculate the new offsets for the vmcore list and update the
1467  * total vmcore size.
1468  */
1469 static void vmcoredd_update_size(size_t dump_size)
1470 {
1471 	vmcoredd_orig_sz += dump_size;
1472 	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1473 	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1474 					vmcoredd_orig_sz);
1475 
1476 	/* Update vmcore list offsets */
1477 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1478 
1479 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1480 				      &vmcore_list);
1481 	proc_vmcore->size = vmcore_size;
1482 }
1483 
1484 /**
1485  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1486  * @data: dump info.
1487  *
1488  * Allocate a buffer and invoke the calling driver's dump collect routine.
1489  * Write Elf note at the beginning of the buffer to indicate vmcore device
1490  * dump and add the dump to global list.
1491  */
1492 int vmcore_add_device_dump(struct vmcoredd_data *data)
1493 {
1494 	struct vmcoredd_node *dump;
1495 	void *buf = NULL;
1496 	size_t data_size;
1497 	int ret;
1498 
1499 	if (vmcoredd_disabled) {
1500 		pr_err_once("Device dump is disabled\n");
1501 		return -EINVAL;
1502 	}
1503 
1504 	if (!data || !strlen(data->dump_name) ||
1505 	    !data->vmcoredd_callback || !data->size)
1506 		return -EINVAL;
1507 
1508 	dump = vzalloc(sizeof(*dump));
1509 	if (!dump) {
1510 		ret = -ENOMEM;
1511 		goto out_err;
1512 	}
1513 
1514 	/* Keep size of the buffer page aligned so that it can be mmaped */
1515 	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1516 			    PAGE_SIZE);
1517 
1518 	/* Allocate buffer for driver's to write their dumps */
1519 	buf = vmcore_alloc_buf(data_size);
1520 	if (!buf) {
1521 		ret = -ENOMEM;
1522 		goto out_err;
1523 	}
1524 
1525 	vmcoredd_write_header(buf, data, data_size -
1526 			      sizeof(struct vmcoredd_header));
1527 
1528 	/* Invoke the driver's dump collection routing */
1529 	ret = data->vmcoredd_callback(data, buf +
1530 				      sizeof(struct vmcoredd_header));
1531 	if (ret)
1532 		goto out_err;
1533 
1534 	dump->buf = buf;
1535 	dump->size = data_size;
1536 
1537 	/* Add the dump to driver sysfs list */
1538 	mutex_lock(&vmcoredd_mutex);
1539 	list_add_tail(&dump->list, &vmcoredd_list);
1540 	mutex_unlock(&vmcoredd_mutex);
1541 
1542 	vmcoredd_update_size(data_size);
1543 	return 0;
1544 
1545 out_err:
1546 	vfree(buf);
1547 	vfree(dump);
1548 
1549 	return ret;
1550 }
1551 EXPORT_SYMBOL(vmcore_add_device_dump);
1552 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1553 
1554 /* Free all dumps in vmcore device dump list */
1555 static void vmcore_free_device_dumps(void)
1556 {
1557 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1558 	mutex_lock(&vmcoredd_mutex);
1559 	while (!list_empty(&vmcoredd_list)) {
1560 		struct vmcoredd_node *dump;
1561 
1562 		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1563 					list);
1564 		list_del(&dump->list);
1565 		vfree(dump->buf);
1566 		vfree(dump);
1567 	}
1568 	mutex_unlock(&vmcoredd_mutex);
1569 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1570 }
1571 
1572 /* Init function for vmcore module. */
1573 static int __init vmcore_init(void)
1574 {
1575 	int rc = 0;
1576 
1577 	/* Allow architectures to allocate ELF header in 2nd kernel */
1578 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1579 	if (rc)
1580 		return rc;
1581 	/*
1582 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1583 	 * then capture the dump.
1584 	 */
1585 	if (!(is_vmcore_usable()))
1586 		return rc;
1587 	rc = parse_crash_elf_headers();
1588 	if (rc) {
1589 		pr_warn("Kdump: vmcore not initialized\n");
1590 		return rc;
1591 	}
1592 	elfcorehdr_free(elfcorehdr_addr);
1593 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1594 
1595 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1596 	if (proc_vmcore)
1597 		proc_vmcore->size = vmcore_size;
1598 	return 0;
1599 }
1600 fs_initcall(vmcore_init);
1601 
1602 /* Cleanup function for vmcore module. */
1603 void vmcore_cleanup(void)
1604 {
1605 	if (proc_vmcore) {
1606 		proc_remove(proc_vmcore);
1607 		proc_vmcore = NULL;
1608 	}
1609 
1610 	/* clear the vmcore list. */
1611 	while (!list_empty(&vmcore_list)) {
1612 		struct vmcore *m;
1613 
1614 		m = list_first_entry(&vmcore_list, struct vmcore, list);
1615 		list_del(&m->list);
1616 		kfree(m);
1617 	}
1618 	free_elfcorebuf();
1619 
1620 	/* clear vmcore device dump list */
1621 	vmcore_free_device_dumps();
1622 }
1623