xref: /openbmc/linux/fs/proc/vmcore.c (revision 5104d265)
1 /*
2  *	fs/proc/vmcore.c Interface for accessing the crash
3  * 				 dump from the system's previous life.
4  * 	Heavily borrowed from fs/proc/kcore.c
5  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *	Copyright (C) IBM Corporation, 2004. All rights reserved
7  *
8  */
9 
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include "internal.h"
27 
28 /* List representing chunks of contiguous memory areas and their offsets in
29  * vmcore file.
30  */
31 static LIST_HEAD(vmcore_list);
32 
33 /* Stores the pointer to the buffer containing kernel elf core headers. */
34 static char *elfcorebuf;
35 static size_t elfcorebuf_sz;
36 static size_t elfcorebuf_sz_orig;
37 
38 static char *elfnotes_buf;
39 static size_t elfnotes_sz;
40 
41 /* Total size of vmcore file. */
42 static u64 vmcore_size;
43 
44 static struct proc_dir_entry *proc_vmcore = NULL;
45 
46 /*
47  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
48  * The called function has to take care of module refcounting.
49  */
50 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
51 
52 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
53 {
54 	if (oldmem_pfn_is_ram)
55 		return -EBUSY;
56 	oldmem_pfn_is_ram = fn;
57 	return 0;
58 }
59 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
60 
61 void unregister_oldmem_pfn_is_ram(void)
62 {
63 	oldmem_pfn_is_ram = NULL;
64 	wmb();
65 }
66 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
67 
68 static int pfn_is_ram(unsigned long pfn)
69 {
70 	int (*fn)(unsigned long pfn);
71 	/* pfn is ram unless fn() checks pagetype */
72 	int ret = 1;
73 
74 	/*
75 	 * Ask hypervisor if the pfn is really ram.
76 	 * A ballooned page contains no data and reading from such a page
77 	 * will cause high load in the hypervisor.
78 	 */
79 	fn = oldmem_pfn_is_ram;
80 	if (fn)
81 		ret = fn(pfn);
82 
83 	return ret;
84 }
85 
86 /* Reads a page from the oldmem device from given offset. */
87 static ssize_t read_from_oldmem(char *buf, size_t count,
88 				u64 *ppos, int userbuf)
89 {
90 	unsigned long pfn, offset;
91 	size_t nr_bytes;
92 	ssize_t read = 0, tmp;
93 
94 	if (!count)
95 		return 0;
96 
97 	offset = (unsigned long)(*ppos % PAGE_SIZE);
98 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
99 
100 	do {
101 		if (count > (PAGE_SIZE - offset))
102 			nr_bytes = PAGE_SIZE - offset;
103 		else
104 			nr_bytes = count;
105 
106 		/* If pfn is not ram, return zeros for sparse dump files */
107 		if (pfn_is_ram(pfn) == 0)
108 			memset(buf, 0, nr_bytes);
109 		else {
110 			tmp = copy_oldmem_page(pfn, buf, nr_bytes,
111 						offset, userbuf);
112 			if (tmp < 0)
113 				return tmp;
114 		}
115 		*ppos += nr_bytes;
116 		count -= nr_bytes;
117 		buf += nr_bytes;
118 		read += nr_bytes;
119 		++pfn;
120 		offset = 0;
121 	} while (count);
122 
123 	return read;
124 }
125 
126 /* Read from the ELF header and then the crash dump. On error, negative value is
127  * returned otherwise number of bytes read are returned.
128  */
129 static ssize_t read_vmcore(struct file *file, char __user *buffer,
130 				size_t buflen, loff_t *fpos)
131 {
132 	ssize_t acc = 0, tmp;
133 	size_t tsz;
134 	u64 start;
135 	struct vmcore *m = NULL;
136 
137 	if (buflen == 0 || *fpos >= vmcore_size)
138 		return 0;
139 
140 	/* trim buflen to not go beyond EOF */
141 	if (buflen > vmcore_size - *fpos)
142 		buflen = vmcore_size - *fpos;
143 
144 	/* Read ELF core header */
145 	if (*fpos < elfcorebuf_sz) {
146 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
147 		if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
148 			return -EFAULT;
149 		buflen -= tsz;
150 		*fpos += tsz;
151 		buffer += tsz;
152 		acc += tsz;
153 
154 		/* leave now if filled buffer already */
155 		if (buflen == 0)
156 			return acc;
157 	}
158 
159 	/* Read Elf note segment */
160 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
161 		void *kaddr;
162 
163 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
164 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
165 		if (copy_to_user(buffer, kaddr, tsz))
166 			return -EFAULT;
167 		buflen -= tsz;
168 		*fpos += tsz;
169 		buffer += tsz;
170 		acc += tsz;
171 
172 		/* leave now if filled buffer already */
173 		if (buflen == 0)
174 			return acc;
175 	}
176 
177 	list_for_each_entry(m, &vmcore_list, list) {
178 		if (*fpos < m->offset + m->size) {
179 			tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
180 			start = m->paddr + *fpos - m->offset;
181 			tmp = read_from_oldmem(buffer, tsz, &start, 1);
182 			if (tmp < 0)
183 				return tmp;
184 			buflen -= tsz;
185 			*fpos += tsz;
186 			buffer += tsz;
187 			acc += tsz;
188 
189 			/* leave now if filled buffer already */
190 			if (buflen == 0)
191 				return acc;
192 		}
193 	}
194 
195 	return acc;
196 }
197 
198 /**
199  * alloc_elfnotes_buf - allocate buffer for ELF note segment in
200  *                      vmalloc memory
201  *
202  * @notes_sz: size of buffer
203  *
204  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
205  * the buffer to user-space by means of remap_vmalloc_range().
206  *
207  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
208  * disabled and there's no need to allow users to mmap the buffer.
209  */
210 static inline char *alloc_elfnotes_buf(size_t notes_sz)
211 {
212 #ifdef CONFIG_MMU
213 	return vmalloc_user(notes_sz);
214 #else
215 	return vzalloc(notes_sz);
216 #endif
217 }
218 
219 /*
220  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
221  * essential for mmap_vmcore() in order to map physically
222  * non-contiguous objects (ELF header, ELF note segment and memory
223  * regions in the 1st kernel pointed to by PT_LOAD entries) into
224  * virtually contiguous user-space in ELF layout.
225  */
226 #if defined(CONFIG_MMU) && !defined(CONFIG_S390)
227 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
228 {
229 	size_t size = vma->vm_end - vma->vm_start;
230 	u64 start, end, len, tsz;
231 	struct vmcore *m;
232 
233 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
234 	end = start + size;
235 
236 	if (size > vmcore_size || end > vmcore_size)
237 		return -EINVAL;
238 
239 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
240 		return -EPERM;
241 
242 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
243 	vma->vm_flags |= VM_MIXEDMAP;
244 
245 	len = 0;
246 
247 	if (start < elfcorebuf_sz) {
248 		u64 pfn;
249 
250 		tsz = min(elfcorebuf_sz - (size_t)start, size);
251 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
252 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
253 				    vma->vm_page_prot))
254 			return -EAGAIN;
255 		size -= tsz;
256 		start += tsz;
257 		len += tsz;
258 
259 		if (size == 0)
260 			return 0;
261 	}
262 
263 	if (start < elfcorebuf_sz + elfnotes_sz) {
264 		void *kaddr;
265 
266 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
267 		kaddr = elfnotes_buf + start - elfcorebuf_sz;
268 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
269 						kaddr, tsz))
270 			goto fail;
271 		size -= tsz;
272 		start += tsz;
273 		len += tsz;
274 
275 		if (size == 0)
276 			return 0;
277 	}
278 
279 	list_for_each_entry(m, &vmcore_list, list) {
280 		if (start < m->offset + m->size) {
281 			u64 paddr = 0;
282 
283 			tsz = min_t(size_t, m->offset + m->size - start, size);
284 			paddr = m->paddr + start - m->offset;
285 			if (remap_pfn_range(vma, vma->vm_start + len,
286 					    paddr >> PAGE_SHIFT, tsz,
287 					    vma->vm_page_prot))
288 				goto fail;
289 			size -= tsz;
290 			start += tsz;
291 			len += tsz;
292 
293 			if (size == 0)
294 				return 0;
295 		}
296 	}
297 
298 	return 0;
299 fail:
300 	do_munmap(vma->vm_mm, vma->vm_start, len);
301 	return -EAGAIN;
302 }
303 #else
304 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
305 {
306 	return -ENOSYS;
307 }
308 #endif
309 
310 static const struct file_operations proc_vmcore_operations = {
311 	.read		= read_vmcore,
312 	.llseek		= default_llseek,
313 	.mmap		= mmap_vmcore,
314 };
315 
316 static struct vmcore* __init get_new_element(void)
317 {
318 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
319 }
320 
321 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
322 				  struct list_head *vc_list)
323 {
324 	u64 size;
325 	struct vmcore *m;
326 
327 	size = elfsz + elfnotesegsz;
328 	list_for_each_entry(m, vc_list, list) {
329 		size += m->size;
330 	}
331 	return size;
332 }
333 
334 /**
335  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
336  *
337  * @ehdr_ptr: ELF header
338  *
339  * This function updates p_memsz member of each PT_NOTE entry in the
340  * program header table pointed to by @ehdr_ptr to real size of ELF
341  * note segment.
342  */
343 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
344 {
345 	int i, rc=0;
346 	Elf64_Phdr *phdr_ptr;
347 	Elf64_Nhdr *nhdr_ptr;
348 
349 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
350 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
351 		void *notes_section;
352 		u64 offset, max_sz, sz, real_sz = 0;
353 		if (phdr_ptr->p_type != PT_NOTE)
354 			continue;
355 		max_sz = phdr_ptr->p_memsz;
356 		offset = phdr_ptr->p_offset;
357 		notes_section = kmalloc(max_sz, GFP_KERNEL);
358 		if (!notes_section)
359 			return -ENOMEM;
360 		rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
361 		if (rc < 0) {
362 			kfree(notes_section);
363 			return rc;
364 		}
365 		nhdr_ptr = notes_section;
366 		while (real_sz < max_sz) {
367 			if (nhdr_ptr->n_namesz == 0)
368 				break;
369 			sz = sizeof(Elf64_Nhdr) +
370 				((nhdr_ptr->n_namesz + 3) & ~3) +
371 				((nhdr_ptr->n_descsz + 3) & ~3);
372 			real_sz += sz;
373 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
374 		}
375 		kfree(notes_section);
376 		phdr_ptr->p_memsz = real_sz;
377 	}
378 
379 	return 0;
380 }
381 
382 /**
383  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
384  * headers and sum of real size of their ELF note segment headers and
385  * data.
386  *
387  * @ehdr_ptr: ELF header
388  * @nr_ptnote: buffer for the number of PT_NOTE program headers
389  * @sz_ptnote: buffer for size of unique PT_NOTE program header
390  *
391  * This function is used to merge multiple PT_NOTE program headers
392  * into a unique single one. The resulting unique entry will have
393  * @sz_ptnote in its phdr->p_mem.
394  *
395  * It is assumed that program headers with PT_NOTE type pointed to by
396  * @ehdr_ptr has already been updated by update_note_header_size_elf64
397  * and each of PT_NOTE program headers has actual ELF note segment
398  * size in its p_memsz member.
399  */
400 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
401 						 int *nr_ptnote, u64 *sz_ptnote)
402 {
403 	int i;
404 	Elf64_Phdr *phdr_ptr;
405 
406 	*nr_ptnote = *sz_ptnote = 0;
407 
408 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
409 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
410 		if (phdr_ptr->p_type != PT_NOTE)
411 			continue;
412 		*nr_ptnote += 1;
413 		*sz_ptnote += phdr_ptr->p_memsz;
414 	}
415 
416 	return 0;
417 }
418 
419 /**
420  * copy_notes_elf64 - copy ELF note segments in a given buffer
421  *
422  * @ehdr_ptr: ELF header
423  * @notes_buf: buffer into which ELF note segments are copied
424  *
425  * This function is used to copy ELF note segment in the 1st kernel
426  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
427  * size of the buffer @notes_buf is equal to or larger than sum of the
428  * real ELF note segment headers and data.
429  *
430  * It is assumed that program headers with PT_NOTE type pointed to by
431  * @ehdr_ptr has already been updated by update_note_header_size_elf64
432  * and each of PT_NOTE program headers has actual ELF note segment
433  * size in its p_memsz member.
434  */
435 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
436 {
437 	int i, rc=0;
438 	Elf64_Phdr *phdr_ptr;
439 
440 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
441 
442 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
443 		u64 offset;
444 		if (phdr_ptr->p_type != PT_NOTE)
445 			continue;
446 		offset = phdr_ptr->p_offset;
447 		rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
448 		if (rc < 0)
449 			return rc;
450 		notes_buf += phdr_ptr->p_memsz;
451 	}
452 
453 	return 0;
454 }
455 
456 /* Merges all the PT_NOTE headers into one. */
457 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
458 					   char **notes_buf, size_t *notes_sz)
459 {
460 	int i, nr_ptnote=0, rc=0;
461 	char *tmp;
462 	Elf64_Ehdr *ehdr_ptr;
463 	Elf64_Phdr phdr;
464 	u64 phdr_sz = 0, note_off;
465 
466 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
467 
468 	rc = update_note_header_size_elf64(ehdr_ptr);
469 	if (rc < 0)
470 		return rc;
471 
472 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
473 	if (rc < 0)
474 		return rc;
475 
476 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
477 	*notes_buf = alloc_elfnotes_buf(*notes_sz);
478 	if (!*notes_buf)
479 		return -ENOMEM;
480 
481 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
482 	if (rc < 0)
483 		return rc;
484 
485 	/* Prepare merged PT_NOTE program header. */
486 	phdr.p_type    = PT_NOTE;
487 	phdr.p_flags   = 0;
488 	note_off = sizeof(Elf64_Ehdr) +
489 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
490 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
491 	phdr.p_vaddr   = phdr.p_paddr = 0;
492 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
493 	phdr.p_align   = 0;
494 
495 	/* Add merged PT_NOTE program header*/
496 	tmp = elfptr + sizeof(Elf64_Ehdr);
497 	memcpy(tmp, &phdr, sizeof(phdr));
498 	tmp += sizeof(phdr);
499 
500 	/* Remove unwanted PT_NOTE program headers. */
501 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
502 	*elfsz = *elfsz - i;
503 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
504 	memset(elfptr + *elfsz, 0, i);
505 	*elfsz = roundup(*elfsz, PAGE_SIZE);
506 
507 	/* Modify e_phnum to reflect merged headers. */
508 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
509 
510 	return 0;
511 }
512 
513 /**
514  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
515  *
516  * @ehdr_ptr: ELF header
517  *
518  * This function updates p_memsz member of each PT_NOTE entry in the
519  * program header table pointed to by @ehdr_ptr to real size of ELF
520  * note segment.
521  */
522 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
523 {
524 	int i, rc=0;
525 	Elf32_Phdr *phdr_ptr;
526 	Elf32_Nhdr *nhdr_ptr;
527 
528 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
529 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
530 		void *notes_section;
531 		u64 offset, max_sz, sz, real_sz = 0;
532 		if (phdr_ptr->p_type != PT_NOTE)
533 			continue;
534 		max_sz = phdr_ptr->p_memsz;
535 		offset = phdr_ptr->p_offset;
536 		notes_section = kmalloc(max_sz, GFP_KERNEL);
537 		if (!notes_section)
538 			return -ENOMEM;
539 		rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
540 		if (rc < 0) {
541 			kfree(notes_section);
542 			return rc;
543 		}
544 		nhdr_ptr = notes_section;
545 		while (real_sz < max_sz) {
546 			if (nhdr_ptr->n_namesz == 0)
547 				break;
548 			sz = sizeof(Elf32_Nhdr) +
549 				((nhdr_ptr->n_namesz + 3) & ~3) +
550 				((nhdr_ptr->n_descsz + 3) & ~3);
551 			real_sz += sz;
552 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
553 		}
554 		kfree(notes_section);
555 		phdr_ptr->p_memsz = real_sz;
556 	}
557 
558 	return 0;
559 }
560 
561 /**
562  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
563  * headers and sum of real size of their ELF note segment headers and
564  * data.
565  *
566  * @ehdr_ptr: ELF header
567  * @nr_ptnote: buffer for the number of PT_NOTE program headers
568  * @sz_ptnote: buffer for size of unique PT_NOTE program header
569  *
570  * This function is used to merge multiple PT_NOTE program headers
571  * into a unique single one. The resulting unique entry will have
572  * @sz_ptnote in its phdr->p_mem.
573  *
574  * It is assumed that program headers with PT_NOTE type pointed to by
575  * @ehdr_ptr has already been updated by update_note_header_size_elf32
576  * and each of PT_NOTE program headers has actual ELF note segment
577  * size in its p_memsz member.
578  */
579 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
580 						 int *nr_ptnote, u64 *sz_ptnote)
581 {
582 	int i;
583 	Elf32_Phdr *phdr_ptr;
584 
585 	*nr_ptnote = *sz_ptnote = 0;
586 
587 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
588 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
589 		if (phdr_ptr->p_type != PT_NOTE)
590 			continue;
591 		*nr_ptnote += 1;
592 		*sz_ptnote += phdr_ptr->p_memsz;
593 	}
594 
595 	return 0;
596 }
597 
598 /**
599  * copy_notes_elf32 - copy ELF note segments in a given buffer
600  *
601  * @ehdr_ptr: ELF header
602  * @notes_buf: buffer into which ELF note segments are copied
603  *
604  * This function is used to copy ELF note segment in the 1st kernel
605  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
606  * size of the buffer @notes_buf is equal to or larger than sum of the
607  * real ELF note segment headers and data.
608  *
609  * It is assumed that program headers with PT_NOTE type pointed to by
610  * @ehdr_ptr has already been updated by update_note_header_size_elf32
611  * and each of PT_NOTE program headers has actual ELF note segment
612  * size in its p_memsz member.
613  */
614 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
615 {
616 	int i, rc=0;
617 	Elf32_Phdr *phdr_ptr;
618 
619 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
620 
621 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
622 		u64 offset;
623 		if (phdr_ptr->p_type != PT_NOTE)
624 			continue;
625 		offset = phdr_ptr->p_offset;
626 		rc = read_from_oldmem(notes_buf, phdr_ptr->p_memsz, &offset, 0);
627 		if (rc < 0)
628 			return rc;
629 		notes_buf += phdr_ptr->p_memsz;
630 	}
631 
632 	return 0;
633 }
634 
635 /* Merges all the PT_NOTE headers into one. */
636 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
637 					   char **notes_buf, size_t *notes_sz)
638 {
639 	int i, nr_ptnote=0, rc=0;
640 	char *tmp;
641 	Elf32_Ehdr *ehdr_ptr;
642 	Elf32_Phdr phdr;
643 	u64 phdr_sz = 0, note_off;
644 
645 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
646 
647 	rc = update_note_header_size_elf32(ehdr_ptr);
648 	if (rc < 0)
649 		return rc;
650 
651 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
652 	if (rc < 0)
653 		return rc;
654 
655 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
656 	*notes_buf = alloc_elfnotes_buf(*notes_sz);
657 	if (!*notes_buf)
658 		return -ENOMEM;
659 
660 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
661 	if (rc < 0)
662 		return rc;
663 
664 	/* Prepare merged PT_NOTE program header. */
665 	phdr.p_type    = PT_NOTE;
666 	phdr.p_flags   = 0;
667 	note_off = sizeof(Elf32_Ehdr) +
668 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
669 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
670 	phdr.p_vaddr   = phdr.p_paddr = 0;
671 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
672 	phdr.p_align   = 0;
673 
674 	/* Add merged PT_NOTE program header*/
675 	tmp = elfptr + sizeof(Elf32_Ehdr);
676 	memcpy(tmp, &phdr, sizeof(phdr));
677 	tmp += sizeof(phdr);
678 
679 	/* Remove unwanted PT_NOTE program headers. */
680 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
681 	*elfsz = *elfsz - i;
682 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
683 	memset(elfptr + *elfsz, 0, i);
684 	*elfsz = roundup(*elfsz, PAGE_SIZE);
685 
686 	/* Modify e_phnum to reflect merged headers. */
687 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
688 
689 	return 0;
690 }
691 
692 /* Add memory chunks represented by program headers to vmcore list. Also update
693  * the new offset fields of exported program headers. */
694 static int __init process_ptload_program_headers_elf64(char *elfptr,
695 						size_t elfsz,
696 						size_t elfnotes_sz,
697 						struct list_head *vc_list)
698 {
699 	int i;
700 	Elf64_Ehdr *ehdr_ptr;
701 	Elf64_Phdr *phdr_ptr;
702 	loff_t vmcore_off;
703 	struct vmcore *new;
704 
705 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
706 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
707 
708 	/* Skip Elf header, program headers and Elf note segment. */
709 	vmcore_off = elfsz + elfnotes_sz;
710 
711 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
712 		u64 paddr, start, end, size;
713 
714 		if (phdr_ptr->p_type != PT_LOAD)
715 			continue;
716 
717 		paddr = phdr_ptr->p_offset;
718 		start = rounddown(paddr, PAGE_SIZE);
719 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
720 		size = end - start;
721 
722 		/* Add this contiguous chunk of memory to vmcore list.*/
723 		new = get_new_element();
724 		if (!new)
725 			return -ENOMEM;
726 		new->paddr = start;
727 		new->size = size;
728 		list_add_tail(&new->list, vc_list);
729 
730 		/* Update the program header offset. */
731 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
732 		vmcore_off = vmcore_off + size;
733 	}
734 	return 0;
735 }
736 
737 static int __init process_ptload_program_headers_elf32(char *elfptr,
738 						size_t elfsz,
739 						size_t elfnotes_sz,
740 						struct list_head *vc_list)
741 {
742 	int i;
743 	Elf32_Ehdr *ehdr_ptr;
744 	Elf32_Phdr *phdr_ptr;
745 	loff_t vmcore_off;
746 	struct vmcore *new;
747 
748 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
749 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
750 
751 	/* Skip Elf header, program headers and Elf note segment. */
752 	vmcore_off = elfsz + elfnotes_sz;
753 
754 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
755 		u64 paddr, start, end, size;
756 
757 		if (phdr_ptr->p_type != PT_LOAD)
758 			continue;
759 
760 		paddr = phdr_ptr->p_offset;
761 		start = rounddown(paddr, PAGE_SIZE);
762 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
763 		size = end - start;
764 
765 		/* Add this contiguous chunk of memory to vmcore list.*/
766 		new = get_new_element();
767 		if (!new)
768 			return -ENOMEM;
769 		new->paddr = start;
770 		new->size = size;
771 		list_add_tail(&new->list, vc_list);
772 
773 		/* Update the program header offset */
774 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
775 		vmcore_off = vmcore_off + size;
776 	}
777 	return 0;
778 }
779 
780 /* Sets offset fields of vmcore elements. */
781 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
782 					   struct list_head *vc_list)
783 {
784 	loff_t vmcore_off;
785 	struct vmcore *m;
786 
787 	/* Skip Elf header, program headers and Elf note segment. */
788 	vmcore_off = elfsz + elfnotes_sz;
789 
790 	list_for_each_entry(m, vc_list, list) {
791 		m->offset = vmcore_off;
792 		vmcore_off += m->size;
793 	}
794 }
795 
796 static void free_elfcorebuf(void)
797 {
798 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
799 	elfcorebuf = NULL;
800 	vfree(elfnotes_buf);
801 	elfnotes_buf = NULL;
802 }
803 
804 static int __init parse_crash_elf64_headers(void)
805 {
806 	int rc=0;
807 	Elf64_Ehdr ehdr;
808 	u64 addr;
809 
810 	addr = elfcorehdr_addr;
811 
812 	/* Read Elf header */
813 	rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
814 	if (rc < 0)
815 		return rc;
816 
817 	/* Do some basic Verification. */
818 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
819 		(ehdr.e_type != ET_CORE) ||
820 		!vmcore_elf64_check_arch(&ehdr) ||
821 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
822 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
823 		ehdr.e_version != EV_CURRENT ||
824 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
825 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
826 		ehdr.e_phnum == 0) {
827 		pr_warn("Warning: Core image elf header is not sane\n");
828 		return -EINVAL;
829 	}
830 
831 	/* Read in all elf headers. */
832 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
833 				ehdr.e_phnum * sizeof(Elf64_Phdr);
834 	elfcorebuf_sz = elfcorebuf_sz_orig;
835 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
836 					      get_order(elfcorebuf_sz_orig));
837 	if (!elfcorebuf)
838 		return -ENOMEM;
839 	addr = elfcorehdr_addr;
840 	rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
841 	if (rc < 0)
842 		goto fail;
843 
844 	/* Merge all PT_NOTE headers into one. */
845 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
846 				      &elfnotes_buf, &elfnotes_sz);
847 	if (rc)
848 		goto fail;
849 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
850 						  elfnotes_sz, &vmcore_list);
851 	if (rc)
852 		goto fail;
853 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
854 	return 0;
855 fail:
856 	free_elfcorebuf();
857 	return rc;
858 }
859 
860 static int __init parse_crash_elf32_headers(void)
861 {
862 	int rc=0;
863 	Elf32_Ehdr ehdr;
864 	u64 addr;
865 
866 	addr = elfcorehdr_addr;
867 
868 	/* Read Elf header */
869 	rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
870 	if (rc < 0)
871 		return rc;
872 
873 	/* Do some basic Verification. */
874 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
875 		(ehdr.e_type != ET_CORE) ||
876 		!elf_check_arch(&ehdr) ||
877 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
878 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
879 		ehdr.e_version != EV_CURRENT ||
880 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
881 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
882 		ehdr.e_phnum == 0) {
883 		pr_warn("Warning: Core image elf header is not sane\n");
884 		return -EINVAL;
885 	}
886 
887 	/* Read in all elf headers. */
888 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
889 	elfcorebuf_sz = elfcorebuf_sz_orig;
890 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
891 					      get_order(elfcorebuf_sz_orig));
892 	if (!elfcorebuf)
893 		return -ENOMEM;
894 	addr = elfcorehdr_addr;
895 	rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
896 	if (rc < 0)
897 		goto fail;
898 
899 	/* Merge all PT_NOTE headers into one. */
900 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
901 				      &elfnotes_buf, &elfnotes_sz);
902 	if (rc)
903 		goto fail;
904 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
905 						  elfnotes_sz, &vmcore_list);
906 	if (rc)
907 		goto fail;
908 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
909 	return 0;
910 fail:
911 	free_elfcorebuf();
912 	return rc;
913 }
914 
915 static int __init parse_crash_elf_headers(void)
916 {
917 	unsigned char e_ident[EI_NIDENT];
918 	u64 addr;
919 	int rc=0;
920 
921 	addr = elfcorehdr_addr;
922 	rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
923 	if (rc < 0)
924 		return rc;
925 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
926 		pr_warn("Warning: Core image elf header not found\n");
927 		return -EINVAL;
928 	}
929 
930 	if (e_ident[EI_CLASS] == ELFCLASS64) {
931 		rc = parse_crash_elf64_headers();
932 		if (rc)
933 			return rc;
934 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
935 		rc = parse_crash_elf32_headers();
936 		if (rc)
937 			return rc;
938 	} else {
939 		pr_warn("Warning: Core image elf header is not sane\n");
940 		return -EINVAL;
941 	}
942 
943 	/* Determine vmcore size. */
944 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
945 				      &vmcore_list);
946 
947 	return 0;
948 }
949 
950 /* Init function for vmcore module. */
951 static int __init vmcore_init(void)
952 {
953 	int rc = 0;
954 
955 	/* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
956 	if (!(is_vmcore_usable()))
957 		return rc;
958 	rc = parse_crash_elf_headers();
959 	if (rc) {
960 		pr_warn("Kdump: vmcore not initialized\n");
961 		return rc;
962 	}
963 
964 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
965 	if (proc_vmcore)
966 		proc_vmcore->size = vmcore_size;
967 	return 0;
968 }
969 module_init(vmcore_init)
970 
971 /* Cleanup function for vmcore module. */
972 void vmcore_cleanup(void)
973 {
974 	struct list_head *pos, *next;
975 
976 	if (proc_vmcore) {
977 		proc_remove(proc_vmcore);
978 		proc_vmcore = NULL;
979 	}
980 
981 	/* clear the vmcore list. */
982 	list_for_each_safe(pos, next, &vmcore_list) {
983 		struct vmcore *m;
984 
985 		m = list_entry(pos, struct vmcore, list);
986 		list_del(&m->list);
987 		kfree(m);
988 	}
989 	free_elfcorebuf();
990 }
991 EXPORT_SYMBOL_GPL(vmcore_cleanup);
992