xref: /openbmc/linux/fs/proc/vmcore.c (revision 92b19ff5)
1 /*
2  *	fs/proc/vmcore.c Interface for accessing the crash
3  * 				 dump from the system's previous life.
4  * 	Heavily borrowed from fs/proc/kcore.c
5  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *	Copyright (C) IBM Corporation, 2004. All rights reserved
7  *
8  */
9 
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pagemap.h>
25 #include <asm/uaccess.h>
26 #include <asm/io.h>
27 #include "internal.h"
28 
29 /* List representing chunks of contiguous memory areas and their offsets in
30  * vmcore file.
31  */
32 static LIST_HEAD(vmcore_list);
33 
34 /* Stores the pointer to the buffer containing kernel elf core headers. */
35 static char *elfcorebuf;
36 static size_t elfcorebuf_sz;
37 static size_t elfcorebuf_sz_orig;
38 
39 static char *elfnotes_buf;
40 static size_t elfnotes_sz;
41 
42 /* Total size of vmcore file. */
43 static u64 vmcore_size;
44 
45 static struct proc_dir_entry *proc_vmcore;
46 
47 /*
48  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49  * The called function has to take care of module refcounting.
50  */
51 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
52 
53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
54 {
55 	if (oldmem_pfn_is_ram)
56 		return -EBUSY;
57 	oldmem_pfn_is_ram = fn;
58 	return 0;
59 }
60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
61 
62 void unregister_oldmem_pfn_is_ram(void)
63 {
64 	oldmem_pfn_is_ram = NULL;
65 	wmb();
66 }
67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
68 
69 static int pfn_is_ram(unsigned long pfn)
70 {
71 	int (*fn)(unsigned long pfn);
72 	/* pfn is ram unless fn() checks pagetype */
73 	int ret = 1;
74 
75 	/*
76 	 * Ask hypervisor if the pfn is really ram.
77 	 * A ballooned page contains no data and reading from such a page
78 	 * will cause high load in the hypervisor.
79 	 */
80 	fn = oldmem_pfn_is_ram;
81 	if (fn)
82 		ret = fn(pfn);
83 
84 	return ret;
85 }
86 
87 /* Reads a page from the oldmem device from given offset. */
88 static ssize_t read_from_oldmem(char *buf, size_t count,
89 				u64 *ppos, int userbuf)
90 {
91 	unsigned long pfn, offset;
92 	size_t nr_bytes;
93 	ssize_t read = 0, tmp;
94 
95 	if (!count)
96 		return 0;
97 
98 	offset = (unsigned long)(*ppos % PAGE_SIZE);
99 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
100 
101 	do {
102 		if (count > (PAGE_SIZE - offset))
103 			nr_bytes = PAGE_SIZE - offset;
104 		else
105 			nr_bytes = count;
106 
107 		/* If pfn is not ram, return zeros for sparse dump files */
108 		if (pfn_is_ram(pfn) == 0)
109 			memset(buf, 0, nr_bytes);
110 		else {
111 			tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112 						offset, userbuf);
113 			if (tmp < 0)
114 				return tmp;
115 		}
116 		*ppos += nr_bytes;
117 		count -= nr_bytes;
118 		buf += nr_bytes;
119 		read += nr_bytes;
120 		++pfn;
121 		offset = 0;
122 	} while (count);
123 
124 	return read;
125 }
126 
127 /*
128  * Architectures may override this function to allocate ELF header in 2nd kernel
129  */
130 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
131 {
132 	return 0;
133 }
134 
135 /*
136  * Architectures may override this function to free header
137  */
138 void __weak elfcorehdr_free(unsigned long long addr)
139 {}
140 
141 /*
142  * Architectures may override this function to read from ELF header
143  */
144 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
145 {
146 	return read_from_oldmem(buf, count, ppos, 0);
147 }
148 
149 /*
150  * Architectures may override this function to read from notes sections
151  */
152 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
153 {
154 	return read_from_oldmem(buf, count, ppos, 0);
155 }
156 
157 /*
158  * Architectures may override this function to map oldmem
159  */
160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 				  unsigned long from, unsigned long pfn,
162 				  unsigned long size, pgprot_t prot)
163 {
164 	return remap_pfn_range(vma, from, pfn, size, prot);
165 }
166 
167 /*
168  * Copy to either kernel or user space
169  */
170 static int copy_to(void *target, void *src, size_t size, int userbuf)
171 {
172 	if (userbuf) {
173 		if (copy_to_user((char __user *) target, src, size))
174 			return -EFAULT;
175 	} else {
176 		memcpy(target, src, size);
177 	}
178 	return 0;
179 }
180 
181 /* Read from the ELF header and then the crash dump. On error, negative value is
182  * returned otherwise number of bytes read are returned.
183  */
184 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
185 			     int userbuf)
186 {
187 	ssize_t acc = 0, tmp;
188 	size_t tsz;
189 	u64 start;
190 	struct vmcore *m = NULL;
191 
192 	if (buflen == 0 || *fpos >= vmcore_size)
193 		return 0;
194 
195 	/* trim buflen to not go beyond EOF */
196 	if (buflen > vmcore_size - *fpos)
197 		buflen = vmcore_size - *fpos;
198 
199 	/* Read ELF core header */
200 	if (*fpos < elfcorebuf_sz) {
201 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
202 		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
203 			return -EFAULT;
204 		buflen -= tsz;
205 		*fpos += tsz;
206 		buffer += tsz;
207 		acc += tsz;
208 
209 		/* leave now if filled buffer already */
210 		if (buflen == 0)
211 			return acc;
212 	}
213 
214 	/* Read Elf note segment */
215 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
216 		void *kaddr;
217 
218 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
219 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
220 		if (copy_to(buffer, kaddr, tsz, userbuf))
221 			return -EFAULT;
222 		buflen -= tsz;
223 		*fpos += tsz;
224 		buffer += tsz;
225 		acc += tsz;
226 
227 		/* leave now if filled buffer already */
228 		if (buflen == 0)
229 			return acc;
230 	}
231 
232 	list_for_each_entry(m, &vmcore_list, list) {
233 		if (*fpos < m->offset + m->size) {
234 			tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
235 			start = m->paddr + *fpos - m->offset;
236 			tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
237 			if (tmp < 0)
238 				return tmp;
239 			buflen -= tsz;
240 			*fpos += tsz;
241 			buffer += tsz;
242 			acc += tsz;
243 
244 			/* leave now if filled buffer already */
245 			if (buflen == 0)
246 				return acc;
247 		}
248 	}
249 
250 	return acc;
251 }
252 
253 static ssize_t read_vmcore(struct file *file, char __user *buffer,
254 			   size_t buflen, loff_t *fpos)
255 {
256 	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
257 }
258 
259 /*
260  * The vmcore fault handler uses the page cache and fills data using the
261  * standard __vmcore_read() function.
262  *
263  * On s390 the fault handler is used for memory regions that can't be mapped
264  * directly with remap_pfn_range().
265  */
266 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
267 {
268 #ifdef CONFIG_S390
269 	struct address_space *mapping = vma->vm_file->f_mapping;
270 	pgoff_t index = vmf->pgoff;
271 	struct page *page;
272 	loff_t offset;
273 	char *buf;
274 	int rc;
275 
276 	page = find_or_create_page(mapping, index, GFP_KERNEL);
277 	if (!page)
278 		return VM_FAULT_OOM;
279 	if (!PageUptodate(page)) {
280 		offset = (loff_t) index << PAGE_CACHE_SHIFT;
281 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
282 		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
283 		if (rc < 0) {
284 			unlock_page(page);
285 			page_cache_release(page);
286 			return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
287 		}
288 		SetPageUptodate(page);
289 	}
290 	unlock_page(page);
291 	vmf->page = page;
292 	return 0;
293 #else
294 	return VM_FAULT_SIGBUS;
295 #endif
296 }
297 
298 static const struct vm_operations_struct vmcore_mmap_ops = {
299 	.fault = mmap_vmcore_fault,
300 };
301 
302 /**
303  * alloc_elfnotes_buf - allocate buffer for ELF note segment in
304  *                      vmalloc memory
305  *
306  * @notes_sz: size of buffer
307  *
308  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
309  * the buffer to user-space by means of remap_vmalloc_range().
310  *
311  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
312  * disabled and there's no need to allow users to mmap the buffer.
313  */
314 static inline char *alloc_elfnotes_buf(size_t notes_sz)
315 {
316 #ifdef CONFIG_MMU
317 	return vmalloc_user(notes_sz);
318 #else
319 	return vzalloc(notes_sz);
320 #endif
321 }
322 
323 /*
324  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
325  * essential for mmap_vmcore() in order to map physically
326  * non-contiguous objects (ELF header, ELF note segment and memory
327  * regions in the 1st kernel pointed to by PT_LOAD entries) into
328  * virtually contiguous user-space in ELF layout.
329  */
330 #ifdef CONFIG_MMU
331 /*
332  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
333  * reported as not being ram with the zero page.
334  *
335  * @vma: vm_area_struct describing requested mapping
336  * @from: start remapping from
337  * @pfn: page frame number to start remapping to
338  * @size: remapping size
339  * @prot: protection bits
340  *
341  * Returns zero on success, -EAGAIN on failure.
342  */
343 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
344 				    unsigned long from, unsigned long pfn,
345 				    unsigned long size, pgprot_t prot)
346 {
347 	unsigned long map_size;
348 	unsigned long pos_start, pos_end, pos;
349 	unsigned long zeropage_pfn = my_zero_pfn(0);
350 	size_t len = 0;
351 
352 	pos_start = pfn;
353 	pos_end = pfn + (size >> PAGE_SHIFT);
354 
355 	for (pos = pos_start; pos < pos_end; ++pos) {
356 		if (!pfn_is_ram(pos)) {
357 			/*
358 			 * We hit a page which is not ram. Remap the continuous
359 			 * region between pos_start and pos-1 and replace
360 			 * the non-ram page at pos with the zero page.
361 			 */
362 			if (pos > pos_start) {
363 				/* Remap continuous region */
364 				map_size = (pos - pos_start) << PAGE_SHIFT;
365 				if (remap_oldmem_pfn_range(vma, from + len,
366 							   pos_start, map_size,
367 							   prot))
368 					goto fail;
369 				len += map_size;
370 			}
371 			/* Remap the zero page */
372 			if (remap_oldmem_pfn_range(vma, from + len,
373 						   zeropage_pfn,
374 						   PAGE_SIZE, prot))
375 				goto fail;
376 			len += PAGE_SIZE;
377 			pos_start = pos + 1;
378 		}
379 	}
380 	if (pos > pos_start) {
381 		/* Remap the rest */
382 		map_size = (pos - pos_start) << PAGE_SHIFT;
383 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
384 					   map_size, prot))
385 			goto fail;
386 	}
387 	return 0;
388 fail:
389 	do_munmap(vma->vm_mm, from, len);
390 	return -EAGAIN;
391 }
392 
393 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
394 			    unsigned long from, unsigned long pfn,
395 			    unsigned long size, pgprot_t prot)
396 {
397 	/*
398 	 * Check if oldmem_pfn_is_ram was registered to avoid
399 	 * looping over all pages without a reason.
400 	 */
401 	if (oldmem_pfn_is_ram)
402 		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
403 	else
404 		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
405 }
406 
407 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
408 {
409 	size_t size = vma->vm_end - vma->vm_start;
410 	u64 start, end, len, tsz;
411 	struct vmcore *m;
412 
413 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
414 	end = start + size;
415 
416 	if (size > vmcore_size || end > vmcore_size)
417 		return -EINVAL;
418 
419 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
420 		return -EPERM;
421 
422 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
423 	vma->vm_flags |= VM_MIXEDMAP;
424 	vma->vm_ops = &vmcore_mmap_ops;
425 
426 	len = 0;
427 
428 	if (start < elfcorebuf_sz) {
429 		u64 pfn;
430 
431 		tsz = min(elfcorebuf_sz - (size_t)start, size);
432 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
433 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
434 				    vma->vm_page_prot))
435 			return -EAGAIN;
436 		size -= tsz;
437 		start += tsz;
438 		len += tsz;
439 
440 		if (size == 0)
441 			return 0;
442 	}
443 
444 	if (start < elfcorebuf_sz + elfnotes_sz) {
445 		void *kaddr;
446 
447 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
448 		kaddr = elfnotes_buf + start - elfcorebuf_sz;
449 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
450 						kaddr, tsz))
451 			goto fail;
452 		size -= tsz;
453 		start += tsz;
454 		len += tsz;
455 
456 		if (size == 0)
457 			return 0;
458 	}
459 
460 	list_for_each_entry(m, &vmcore_list, list) {
461 		if (start < m->offset + m->size) {
462 			u64 paddr = 0;
463 
464 			tsz = min_t(size_t, m->offset + m->size - start, size);
465 			paddr = m->paddr + start - m->offset;
466 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
467 						    paddr >> PAGE_SHIFT, tsz,
468 						    vma->vm_page_prot))
469 				goto fail;
470 			size -= tsz;
471 			start += tsz;
472 			len += tsz;
473 
474 			if (size == 0)
475 				return 0;
476 		}
477 	}
478 
479 	return 0;
480 fail:
481 	do_munmap(vma->vm_mm, vma->vm_start, len);
482 	return -EAGAIN;
483 }
484 #else
485 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
486 {
487 	return -ENOSYS;
488 }
489 #endif
490 
491 static const struct file_operations proc_vmcore_operations = {
492 	.read		= read_vmcore,
493 	.llseek		= default_llseek,
494 	.mmap		= mmap_vmcore,
495 };
496 
497 static struct vmcore* __init get_new_element(void)
498 {
499 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
500 }
501 
502 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
503 				  struct list_head *vc_list)
504 {
505 	u64 size;
506 	struct vmcore *m;
507 
508 	size = elfsz + elfnotesegsz;
509 	list_for_each_entry(m, vc_list, list) {
510 		size += m->size;
511 	}
512 	return size;
513 }
514 
515 /**
516  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
517  *
518  * @ehdr_ptr: ELF header
519  *
520  * This function updates p_memsz member of each PT_NOTE entry in the
521  * program header table pointed to by @ehdr_ptr to real size of ELF
522  * note segment.
523  */
524 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
525 {
526 	int i, rc=0;
527 	Elf64_Phdr *phdr_ptr;
528 	Elf64_Nhdr *nhdr_ptr;
529 
530 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
531 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
532 		void *notes_section;
533 		u64 offset, max_sz, sz, real_sz = 0;
534 		if (phdr_ptr->p_type != PT_NOTE)
535 			continue;
536 		max_sz = phdr_ptr->p_memsz;
537 		offset = phdr_ptr->p_offset;
538 		notes_section = kmalloc(max_sz, GFP_KERNEL);
539 		if (!notes_section)
540 			return -ENOMEM;
541 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
542 		if (rc < 0) {
543 			kfree(notes_section);
544 			return rc;
545 		}
546 		nhdr_ptr = notes_section;
547 		while (nhdr_ptr->n_namesz != 0) {
548 			sz = sizeof(Elf64_Nhdr) +
549 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
550 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
551 			if ((real_sz + sz) > max_sz) {
552 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
553 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
554 				break;
555 			}
556 			real_sz += sz;
557 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
558 		}
559 		kfree(notes_section);
560 		phdr_ptr->p_memsz = real_sz;
561 		if (real_sz == 0) {
562 			pr_warn("Warning: Zero PT_NOTE entries found\n");
563 		}
564 	}
565 
566 	return 0;
567 }
568 
569 /**
570  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
571  * headers and sum of real size of their ELF note segment headers and
572  * data.
573  *
574  * @ehdr_ptr: ELF header
575  * @nr_ptnote: buffer for the number of PT_NOTE program headers
576  * @sz_ptnote: buffer for size of unique PT_NOTE program header
577  *
578  * This function is used to merge multiple PT_NOTE program headers
579  * into a unique single one. The resulting unique entry will have
580  * @sz_ptnote in its phdr->p_mem.
581  *
582  * It is assumed that program headers with PT_NOTE type pointed to by
583  * @ehdr_ptr has already been updated by update_note_header_size_elf64
584  * and each of PT_NOTE program headers has actual ELF note segment
585  * size in its p_memsz member.
586  */
587 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
588 						 int *nr_ptnote, u64 *sz_ptnote)
589 {
590 	int i;
591 	Elf64_Phdr *phdr_ptr;
592 
593 	*nr_ptnote = *sz_ptnote = 0;
594 
595 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
596 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
597 		if (phdr_ptr->p_type != PT_NOTE)
598 			continue;
599 		*nr_ptnote += 1;
600 		*sz_ptnote += phdr_ptr->p_memsz;
601 	}
602 
603 	return 0;
604 }
605 
606 /**
607  * copy_notes_elf64 - copy ELF note segments in a given buffer
608  *
609  * @ehdr_ptr: ELF header
610  * @notes_buf: buffer into which ELF note segments are copied
611  *
612  * This function is used to copy ELF note segment in the 1st kernel
613  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
614  * size of the buffer @notes_buf is equal to or larger than sum of the
615  * real ELF note segment headers and data.
616  *
617  * It is assumed that program headers with PT_NOTE type pointed to by
618  * @ehdr_ptr has already been updated by update_note_header_size_elf64
619  * and each of PT_NOTE program headers has actual ELF note segment
620  * size in its p_memsz member.
621  */
622 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
623 {
624 	int i, rc=0;
625 	Elf64_Phdr *phdr_ptr;
626 
627 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
628 
629 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
630 		u64 offset;
631 		if (phdr_ptr->p_type != PT_NOTE)
632 			continue;
633 		offset = phdr_ptr->p_offset;
634 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
635 					   &offset);
636 		if (rc < 0)
637 			return rc;
638 		notes_buf += phdr_ptr->p_memsz;
639 	}
640 
641 	return 0;
642 }
643 
644 /* Merges all the PT_NOTE headers into one. */
645 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
646 					   char **notes_buf, size_t *notes_sz)
647 {
648 	int i, nr_ptnote=0, rc=0;
649 	char *tmp;
650 	Elf64_Ehdr *ehdr_ptr;
651 	Elf64_Phdr phdr;
652 	u64 phdr_sz = 0, note_off;
653 
654 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
655 
656 	rc = update_note_header_size_elf64(ehdr_ptr);
657 	if (rc < 0)
658 		return rc;
659 
660 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
661 	if (rc < 0)
662 		return rc;
663 
664 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
665 	*notes_buf = alloc_elfnotes_buf(*notes_sz);
666 	if (!*notes_buf)
667 		return -ENOMEM;
668 
669 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
670 	if (rc < 0)
671 		return rc;
672 
673 	/* Prepare merged PT_NOTE program header. */
674 	phdr.p_type    = PT_NOTE;
675 	phdr.p_flags   = 0;
676 	note_off = sizeof(Elf64_Ehdr) +
677 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
678 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
679 	phdr.p_vaddr   = phdr.p_paddr = 0;
680 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
681 	phdr.p_align   = 0;
682 
683 	/* Add merged PT_NOTE program header*/
684 	tmp = elfptr + sizeof(Elf64_Ehdr);
685 	memcpy(tmp, &phdr, sizeof(phdr));
686 	tmp += sizeof(phdr);
687 
688 	/* Remove unwanted PT_NOTE program headers. */
689 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
690 	*elfsz = *elfsz - i;
691 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
692 	memset(elfptr + *elfsz, 0, i);
693 	*elfsz = roundup(*elfsz, PAGE_SIZE);
694 
695 	/* Modify e_phnum to reflect merged headers. */
696 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
697 
698 	return 0;
699 }
700 
701 /**
702  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
703  *
704  * @ehdr_ptr: ELF header
705  *
706  * This function updates p_memsz member of each PT_NOTE entry in the
707  * program header table pointed to by @ehdr_ptr to real size of ELF
708  * note segment.
709  */
710 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
711 {
712 	int i, rc=0;
713 	Elf32_Phdr *phdr_ptr;
714 	Elf32_Nhdr *nhdr_ptr;
715 
716 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
717 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
718 		void *notes_section;
719 		u64 offset, max_sz, sz, real_sz = 0;
720 		if (phdr_ptr->p_type != PT_NOTE)
721 			continue;
722 		max_sz = phdr_ptr->p_memsz;
723 		offset = phdr_ptr->p_offset;
724 		notes_section = kmalloc(max_sz, GFP_KERNEL);
725 		if (!notes_section)
726 			return -ENOMEM;
727 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
728 		if (rc < 0) {
729 			kfree(notes_section);
730 			return rc;
731 		}
732 		nhdr_ptr = notes_section;
733 		while (nhdr_ptr->n_namesz != 0) {
734 			sz = sizeof(Elf32_Nhdr) +
735 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
736 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
737 			if ((real_sz + sz) > max_sz) {
738 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
739 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
740 				break;
741 			}
742 			real_sz += sz;
743 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
744 		}
745 		kfree(notes_section);
746 		phdr_ptr->p_memsz = real_sz;
747 		if (real_sz == 0) {
748 			pr_warn("Warning: Zero PT_NOTE entries found\n");
749 		}
750 	}
751 
752 	return 0;
753 }
754 
755 /**
756  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
757  * headers and sum of real size of their ELF note segment headers and
758  * data.
759  *
760  * @ehdr_ptr: ELF header
761  * @nr_ptnote: buffer for the number of PT_NOTE program headers
762  * @sz_ptnote: buffer for size of unique PT_NOTE program header
763  *
764  * This function is used to merge multiple PT_NOTE program headers
765  * into a unique single one. The resulting unique entry will have
766  * @sz_ptnote in its phdr->p_mem.
767  *
768  * It is assumed that program headers with PT_NOTE type pointed to by
769  * @ehdr_ptr has already been updated by update_note_header_size_elf32
770  * and each of PT_NOTE program headers has actual ELF note segment
771  * size in its p_memsz member.
772  */
773 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
774 						 int *nr_ptnote, u64 *sz_ptnote)
775 {
776 	int i;
777 	Elf32_Phdr *phdr_ptr;
778 
779 	*nr_ptnote = *sz_ptnote = 0;
780 
781 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
782 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
783 		if (phdr_ptr->p_type != PT_NOTE)
784 			continue;
785 		*nr_ptnote += 1;
786 		*sz_ptnote += phdr_ptr->p_memsz;
787 	}
788 
789 	return 0;
790 }
791 
792 /**
793  * copy_notes_elf32 - copy ELF note segments in a given buffer
794  *
795  * @ehdr_ptr: ELF header
796  * @notes_buf: buffer into which ELF note segments are copied
797  *
798  * This function is used to copy ELF note segment in the 1st kernel
799  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
800  * size of the buffer @notes_buf is equal to or larger than sum of the
801  * real ELF note segment headers and data.
802  *
803  * It is assumed that program headers with PT_NOTE type pointed to by
804  * @ehdr_ptr has already been updated by update_note_header_size_elf32
805  * and each of PT_NOTE program headers has actual ELF note segment
806  * size in its p_memsz member.
807  */
808 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
809 {
810 	int i, rc=0;
811 	Elf32_Phdr *phdr_ptr;
812 
813 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
814 
815 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
816 		u64 offset;
817 		if (phdr_ptr->p_type != PT_NOTE)
818 			continue;
819 		offset = phdr_ptr->p_offset;
820 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
821 					   &offset);
822 		if (rc < 0)
823 			return rc;
824 		notes_buf += phdr_ptr->p_memsz;
825 	}
826 
827 	return 0;
828 }
829 
830 /* Merges all the PT_NOTE headers into one. */
831 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
832 					   char **notes_buf, size_t *notes_sz)
833 {
834 	int i, nr_ptnote=0, rc=0;
835 	char *tmp;
836 	Elf32_Ehdr *ehdr_ptr;
837 	Elf32_Phdr phdr;
838 	u64 phdr_sz = 0, note_off;
839 
840 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
841 
842 	rc = update_note_header_size_elf32(ehdr_ptr);
843 	if (rc < 0)
844 		return rc;
845 
846 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
847 	if (rc < 0)
848 		return rc;
849 
850 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
851 	*notes_buf = alloc_elfnotes_buf(*notes_sz);
852 	if (!*notes_buf)
853 		return -ENOMEM;
854 
855 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
856 	if (rc < 0)
857 		return rc;
858 
859 	/* Prepare merged PT_NOTE program header. */
860 	phdr.p_type    = PT_NOTE;
861 	phdr.p_flags   = 0;
862 	note_off = sizeof(Elf32_Ehdr) +
863 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
864 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
865 	phdr.p_vaddr   = phdr.p_paddr = 0;
866 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
867 	phdr.p_align   = 0;
868 
869 	/* Add merged PT_NOTE program header*/
870 	tmp = elfptr + sizeof(Elf32_Ehdr);
871 	memcpy(tmp, &phdr, sizeof(phdr));
872 	tmp += sizeof(phdr);
873 
874 	/* Remove unwanted PT_NOTE program headers. */
875 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
876 	*elfsz = *elfsz - i;
877 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
878 	memset(elfptr + *elfsz, 0, i);
879 	*elfsz = roundup(*elfsz, PAGE_SIZE);
880 
881 	/* Modify e_phnum to reflect merged headers. */
882 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
883 
884 	return 0;
885 }
886 
887 /* Add memory chunks represented by program headers to vmcore list. Also update
888  * the new offset fields of exported program headers. */
889 static int __init process_ptload_program_headers_elf64(char *elfptr,
890 						size_t elfsz,
891 						size_t elfnotes_sz,
892 						struct list_head *vc_list)
893 {
894 	int i;
895 	Elf64_Ehdr *ehdr_ptr;
896 	Elf64_Phdr *phdr_ptr;
897 	loff_t vmcore_off;
898 	struct vmcore *new;
899 
900 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
901 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
902 
903 	/* Skip Elf header, program headers and Elf note segment. */
904 	vmcore_off = elfsz + elfnotes_sz;
905 
906 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
907 		u64 paddr, start, end, size;
908 
909 		if (phdr_ptr->p_type != PT_LOAD)
910 			continue;
911 
912 		paddr = phdr_ptr->p_offset;
913 		start = rounddown(paddr, PAGE_SIZE);
914 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
915 		size = end - start;
916 
917 		/* Add this contiguous chunk of memory to vmcore list.*/
918 		new = get_new_element();
919 		if (!new)
920 			return -ENOMEM;
921 		new->paddr = start;
922 		new->size = size;
923 		list_add_tail(&new->list, vc_list);
924 
925 		/* Update the program header offset. */
926 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
927 		vmcore_off = vmcore_off + size;
928 	}
929 	return 0;
930 }
931 
932 static int __init process_ptload_program_headers_elf32(char *elfptr,
933 						size_t elfsz,
934 						size_t elfnotes_sz,
935 						struct list_head *vc_list)
936 {
937 	int i;
938 	Elf32_Ehdr *ehdr_ptr;
939 	Elf32_Phdr *phdr_ptr;
940 	loff_t vmcore_off;
941 	struct vmcore *new;
942 
943 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
944 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
945 
946 	/* Skip Elf header, program headers and Elf note segment. */
947 	vmcore_off = elfsz + elfnotes_sz;
948 
949 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
950 		u64 paddr, start, end, size;
951 
952 		if (phdr_ptr->p_type != PT_LOAD)
953 			continue;
954 
955 		paddr = phdr_ptr->p_offset;
956 		start = rounddown(paddr, PAGE_SIZE);
957 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
958 		size = end - start;
959 
960 		/* Add this contiguous chunk of memory to vmcore list.*/
961 		new = get_new_element();
962 		if (!new)
963 			return -ENOMEM;
964 		new->paddr = start;
965 		new->size = size;
966 		list_add_tail(&new->list, vc_list);
967 
968 		/* Update the program header offset */
969 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
970 		vmcore_off = vmcore_off + size;
971 	}
972 	return 0;
973 }
974 
975 /* Sets offset fields of vmcore elements. */
976 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
977 					   struct list_head *vc_list)
978 {
979 	loff_t vmcore_off;
980 	struct vmcore *m;
981 
982 	/* Skip Elf header, program headers and Elf note segment. */
983 	vmcore_off = elfsz + elfnotes_sz;
984 
985 	list_for_each_entry(m, vc_list, list) {
986 		m->offset = vmcore_off;
987 		vmcore_off += m->size;
988 	}
989 }
990 
991 static void free_elfcorebuf(void)
992 {
993 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
994 	elfcorebuf = NULL;
995 	vfree(elfnotes_buf);
996 	elfnotes_buf = NULL;
997 }
998 
999 static int __init parse_crash_elf64_headers(void)
1000 {
1001 	int rc=0;
1002 	Elf64_Ehdr ehdr;
1003 	u64 addr;
1004 
1005 	addr = elfcorehdr_addr;
1006 
1007 	/* Read Elf header */
1008 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1009 	if (rc < 0)
1010 		return rc;
1011 
1012 	/* Do some basic Verification. */
1013 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1014 		(ehdr.e_type != ET_CORE) ||
1015 		!vmcore_elf64_check_arch(&ehdr) ||
1016 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1017 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1018 		ehdr.e_version != EV_CURRENT ||
1019 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1020 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1021 		ehdr.e_phnum == 0) {
1022 		pr_warn("Warning: Core image elf header is not sane\n");
1023 		return -EINVAL;
1024 	}
1025 
1026 	/* Read in all elf headers. */
1027 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1028 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1029 	elfcorebuf_sz = elfcorebuf_sz_orig;
1030 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1031 					      get_order(elfcorebuf_sz_orig));
1032 	if (!elfcorebuf)
1033 		return -ENOMEM;
1034 	addr = elfcorehdr_addr;
1035 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1036 	if (rc < 0)
1037 		goto fail;
1038 
1039 	/* Merge all PT_NOTE headers into one. */
1040 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1041 				      &elfnotes_buf, &elfnotes_sz);
1042 	if (rc)
1043 		goto fail;
1044 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1045 						  elfnotes_sz, &vmcore_list);
1046 	if (rc)
1047 		goto fail;
1048 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1049 	return 0;
1050 fail:
1051 	free_elfcorebuf();
1052 	return rc;
1053 }
1054 
1055 static int __init parse_crash_elf32_headers(void)
1056 {
1057 	int rc=0;
1058 	Elf32_Ehdr ehdr;
1059 	u64 addr;
1060 
1061 	addr = elfcorehdr_addr;
1062 
1063 	/* Read Elf header */
1064 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1065 	if (rc < 0)
1066 		return rc;
1067 
1068 	/* Do some basic Verification. */
1069 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1070 		(ehdr.e_type != ET_CORE) ||
1071 		!elf_check_arch(&ehdr) ||
1072 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1073 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1074 		ehdr.e_version != EV_CURRENT ||
1075 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1076 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1077 		ehdr.e_phnum == 0) {
1078 		pr_warn("Warning: Core image elf header is not sane\n");
1079 		return -EINVAL;
1080 	}
1081 
1082 	/* Read in all elf headers. */
1083 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1084 	elfcorebuf_sz = elfcorebuf_sz_orig;
1085 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1086 					      get_order(elfcorebuf_sz_orig));
1087 	if (!elfcorebuf)
1088 		return -ENOMEM;
1089 	addr = elfcorehdr_addr;
1090 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1091 	if (rc < 0)
1092 		goto fail;
1093 
1094 	/* Merge all PT_NOTE headers into one. */
1095 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1096 				      &elfnotes_buf, &elfnotes_sz);
1097 	if (rc)
1098 		goto fail;
1099 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1100 						  elfnotes_sz, &vmcore_list);
1101 	if (rc)
1102 		goto fail;
1103 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1104 	return 0;
1105 fail:
1106 	free_elfcorebuf();
1107 	return rc;
1108 }
1109 
1110 static int __init parse_crash_elf_headers(void)
1111 {
1112 	unsigned char e_ident[EI_NIDENT];
1113 	u64 addr;
1114 	int rc=0;
1115 
1116 	addr = elfcorehdr_addr;
1117 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1118 	if (rc < 0)
1119 		return rc;
1120 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1121 		pr_warn("Warning: Core image elf header not found\n");
1122 		return -EINVAL;
1123 	}
1124 
1125 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1126 		rc = parse_crash_elf64_headers();
1127 		if (rc)
1128 			return rc;
1129 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1130 		rc = parse_crash_elf32_headers();
1131 		if (rc)
1132 			return rc;
1133 	} else {
1134 		pr_warn("Warning: Core image elf header is not sane\n");
1135 		return -EINVAL;
1136 	}
1137 
1138 	/* Determine vmcore size. */
1139 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1140 				      &vmcore_list);
1141 
1142 	return 0;
1143 }
1144 
1145 /* Init function for vmcore module. */
1146 static int __init vmcore_init(void)
1147 {
1148 	int rc = 0;
1149 
1150 	/* Allow architectures to allocate ELF header in 2nd kernel */
1151 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1152 	if (rc)
1153 		return rc;
1154 	/*
1155 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1156 	 * then capture the dump.
1157 	 */
1158 	if (!(is_vmcore_usable()))
1159 		return rc;
1160 	rc = parse_crash_elf_headers();
1161 	if (rc) {
1162 		pr_warn("Kdump: vmcore not initialized\n");
1163 		return rc;
1164 	}
1165 	elfcorehdr_free(elfcorehdr_addr);
1166 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1167 
1168 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1169 	if (proc_vmcore)
1170 		proc_vmcore->size = vmcore_size;
1171 	return 0;
1172 }
1173 fs_initcall(vmcore_init);
1174 
1175 /* Cleanup function for vmcore module. */
1176 void vmcore_cleanup(void)
1177 {
1178 	struct list_head *pos, *next;
1179 
1180 	if (proc_vmcore) {
1181 		proc_remove(proc_vmcore);
1182 		proc_vmcore = NULL;
1183 	}
1184 
1185 	/* clear the vmcore list. */
1186 	list_for_each_safe(pos, next, &vmcore_list) {
1187 		struct vmcore *m;
1188 
1189 		m = list_entry(pos, struct vmcore, list);
1190 		list_del(&m->list);
1191 		kfree(m);
1192 	}
1193 	free_elfcorebuf();
1194 }
1195