xref: /openbmc/linux/fs/proc/vmcore.c (revision aad29a73199b7fbccfbabea3f1ee627ad1924f52)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2666bfddbSVivek Goyal /*
3666bfddbSVivek Goyal  *	fs/proc/vmcore.c Interface for accessing the crash
4666bfddbSVivek Goyal  * 				 dump from the system's previous life.
5666bfddbSVivek Goyal  * 	Heavily borrowed from fs/proc/kcore.c
6666bfddbSVivek Goyal  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7666bfddbSVivek Goyal  *	Copyright (C) IBM Corporation, 2004. All rights reserved
8666bfddbSVivek Goyal  *
9666bfddbSVivek Goyal  */
10666bfddbSVivek Goyal 
11666bfddbSVivek Goyal #include <linux/mm.h>
122f96b8c1SDavid Howells #include <linux/kcore.h>
13666bfddbSVivek Goyal #include <linux/user.h>
14666bfddbSVivek Goyal #include <linux/elf.h>
15666bfddbSVivek Goyal #include <linux/elfcore.h>
16afeacc8cSPaul Gortmaker #include <linux/export.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
18666bfddbSVivek Goyal #include <linux/highmem.h>
1987ebdc00SAndrew Morton #include <linux/printk.h>
2057c8a661SMike Rapoport #include <linux/memblock.h>
21666bfddbSVivek Goyal #include <linux/init.h>
22666bfddbSVivek Goyal #include <linux/crash_dump.h>
23666bfddbSVivek Goyal #include <linux/list.h>
24c6c40533SKairui Song #include <linux/moduleparam.h>
252724273eSRahul Lakkireddy #include <linux/mutex.h>
2683086978SHATAYAMA Daisuke #include <linux/vmalloc.h>
279cb21813SMichael Holzheu #include <linux/pagemap.h>
285d8de293SMatthew Wilcox (Oracle) #include <linux/uio.h>
29e9d1d2bbSTom Lendacky #include <linux/cc_platform.h>
30666bfddbSVivek Goyal #include <asm/io.h>
312f96b8c1SDavid Howells #include "internal.h"
32666bfddbSVivek Goyal 
33666bfddbSVivek Goyal /* List representing chunks of contiguous memory areas and their offsets in
34666bfddbSVivek Goyal  * vmcore file.
35666bfddbSVivek Goyal  */
36666bfddbSVivek Goyal static LIST_HEAD(vmcore_list);
37666bfddbSVivek Goyal 
38666bfddbSVivek Goyal /* Stores the pointer to the buffer containing kernel elf core headers. */
39666bfddbSVivek Goyal static char *elfcorebuf;
40666bfddbSVivek Goyal static size_t elfcorebuf_sz;
41f2bdacddSHATAYAMA Daisuke static size_t elfcorebuf_sz_orig;
42666bfddbSVivek Goyal 
43087350c9SHATAYAMA Daisuke static char *elfnotes_buf;
44087350c9SHATAYAMA Daisuke static size_t elfnotes_sz;
457efe48dfSRahul Lakkireddy /* Size of all notes minus the device dump notes */
467efe48dfSRahul Lakkireddy static size_t elfnotes_orig_sz;
47087350c9SHATAYAMA Daisuke 
48666bfddbSVivek Goyal /* Total size of vmcore file. */
49666bfddbSVivek Goyal static u64 vmcore_size;
50666bfddbSVivek Goyal 
51a05e16adSFabian Frederick static struct proc_dir_entry *proc_vmcore;
52666bfddbSVivek Goyal 
532724273eSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
542724273eSRahul Lakkireddy /* Device Dump list and mutex to synchronize access to list */
552724273eSRahul Lakkireddy static LIST_HEAD(vmcoredd_list);
562724273eSRahul Lakkireddy static DEFINE_MUTEX(vmcoredd_mutex);
57c6c40533SKairui Song 
58c6c40533SKairui Song static bool vmcoredd_disabled;
59c6c40533SKairui Song core_param(novmcoredd, vmcoredd_disabled, bool, 0);
602724273eSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
612724273eSRahul Lakkireddy 
627efe48dfSRahul Lakkireddy /* Device Dump Size */
637efe48dfSRahul Lakkireddy static size_t vmcoredd_orig_sz;
647efe48dfSRahul Lakkireddy 
655039b170SDavid Hildenbrand static DEFINE_SPINLOCK(vmcore_cb_lock);
665039b170SDavid Hildenbrand DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67cc5f2704SDavid Hildenbrand /* List of registered vmcore callbacks. */
68cc5f2704SDavid Hildenbrand static LIST_HEAD(vmcore_cb_list);
69cc5f2704SDavid Hildenbrand /* Whether the vmcore has been opened once. */
70cc5f2704SDavid Hildenbrand static bool vmcore_opened;
71cc5f2704SDavid Hildenbrand 
register_vmcore_cb(struct vmcore_cb * cb)72cc5f2704SDavid Hildenbrand void register_vmcore_cb(struct vmcore_cb *cb)
73cc5f2704SDavid Hildenbrand {
74cc5f2704SDavid Hildenbrand 	INIT_LIST_HEAD(&cb->next);
755039b170SDavid Hildenbrand 	spin_lock(&vmcore_cb_lock);
76cc5f2704SDavid Hildenbrand 	list_add_tail(&cb->next, &vmcore_cb_list);
77997c136fSOlaf Hering 	/*
78cc5f2704SDavid Hildenbrand 	 * Registering a vmcore callback after the vmcore was opened is
79cc5f2704SDavid Hildenbrand 	 * very unusual (e.g., manual driver loading).
80997c136fSOlaf Hering 	 */
81cc5f2704SDavid Hildenbrand 	if (vmcore_opened)
82cc5f2704SDavid Hildenbrand 		pr_warn_once("Unexpected vmcore callback registration\n");
835039b170SDavid Hildenbrand 	spin_unlock(&vmcore_cb_lock);
84997c136fSOlaf Hering }
85cc5f2704SDavid Hildenbrand EXPORT_SYMBOL_GPL(register_vmcore_cb);
86997c136fSOlaf Hering 
unregister_vmcore_cb(struct vmcore_cb * cb)87cc5f2704SDavid Hildenbrand void unregister_vmcore_cb(struct vmcore_cb *cb)
88997c136fSOlaf Hering {
895039b170SDavid Hildenbrand 	spin_lock(&vmcore_cb_lock);
905039b170SDavid Hildenbrand 	list_del_rcu(&cb->next);
91997c136fSOlaf Hering 	/*
92cc5f2704SDavid Hildenbrand 	 * Unregistering a vmcore callback after the vmcore was opened is
93cc5f2704SDavid Hildenbrand 	 * very unusual (e.g., forced driver removal), but we cannot stop
94cc5f2704SDavid Hildenbrand 	 * unregistering.
95997c136fSOlaf Hering 	 */
9625bc5b0dSDavid Hildenbrand 	if (vmcore_opened)
97cc5f2704SDavid Hildenbrand 		pr_warn_once("Unexpected vmcore callback unregistration\n");
985039b170SDavid Hildenbrand 	spin_unlock(&vmcore_cb_lock);
995039b170SDavid Hildenbrand 
1005039b170SDavid Hildenbrand 	synchronize_srcu(&vmcore_cb_srcu);
101cc5f2704SDavid Hildenbrand }
102cc5f2704SDavid Hildenbrand EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
103997c136fSOlaf Hering 
pfn_is_ram(unsigned long pfn)1042c9feeaeSDavid Hildenbrand static bool pfn_is_ram(unsigned long pfn)
105997c136fSOlaf Hering {
106cc5f2704SDavid Hildenbrand 	struct vmcore_cb *cb;
1072c9feeaeSDavid Hildenbrand 	bool ret = true;
108997c136fSOlaf Hering 
1095039b170SDavid Hildenbrand 	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
1105039b170SDavid Hildenbrand 				 srcu_read_lock_held(&vmcore_cb_srcu)) {
111cc5f2704SDavid Hildenbrand 		if (unlikely(!cb->pfn_is_ram))
112cc5f2704SDavid Hildenbrand 			continue;
113cc5f2704SDavid Hildenbrand 		ret = cb->pfn_is_ram(cb, pfn);
114cc5f2704SDavid Hildenbrand 		if (!ret)
115cc5f2704SDavid Hildenbrand 			break;
116cc5f2704SDavid Hildenbrand 	}
117997c136fSOlaf Hering 
118997c136fSOlaf Hering 	return ret;
119997c136fSOlaf Hering }
120997c136fSOlaf Hering 
open_vmcore(struct inode * inode,struct file * file)121cc5f2704SDavid Hildenbrand static int open_vmcore(struct inode *inode, struct file *file)
122cc5f2704SDavid Hildenbrand {
1235039b170SDavid Hildenbrand 	spin_lock(&vmcore_cb_lock);
124cc5f2704SDavid Hildenbrand 	vmcore_opened = true;
1255039b170SDavid Hildenbrand 	spin_unlock(&vmcore_cb_lock);
126cc5f2704SDavid Hildenbrand 
127cc5f2704SDavid Hildenbrand 	return 0;
128cc5f2704SDavid Hildenbrand }
129cc5f2704SDavid Hildenbrand 
130666bfddbSVivek Goyal /* Reads a page from the oldmem device from given offset. */
read_from_oldmem(struct iov_iter * iter,size_t count,u64 * ppos,bool encrypted)131e0690479SMatthew Wilcox (Oracle) ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
1325d8de293SMatthew Wilcox (Oracle) 			 u64 *ppos, bool encrypted)
133666bfddbSVivek Goyal {
134666bfddbSVivek Goyal 	unsigned long pfn, offset;
135641db40fSDan Carpenter 	ssize_t nr_bytes;
136666bfddbSVivek Goyal 	ssize_t read = 0, tmp;
1375039b170SDavid Hildenbrand 	int idx;
138666bfddbSVivek Goyal 
139666bfddbSVivek Goyal 	if (!count)
140666bfddbSVivek Goyal 		return 0;
141666bfddbSVivek Goyal 
142666bfddbSVivek Goyal 	offset = (unsigned long)(*ppos % PAGE_SIZE);
143666bfddbSVivek Goyal 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
144666bfddbSVivek Goyal 
1455039b170SDavid Hildenbrand 	idx = srcu_read_lock(&vmcore_cb_srcu);
146666bfddbSVivek Goyal 	do {
147666bfddbSVivek Goyal 		if (count > (PAGE_SIZE - offset))
148666bfddbSVivek Goyal 			nr_bytes = PAGE_SIZE - offset;
149666bfddbSVivek Goyal 		else
150666bfddbSVivek Goyal 			nr_bytes = count;
151666bfddbSVivek Goyal 
152997c136fSOlaf Hering 		/* If pfn is not ram, return zeros for sparse dump files */
153c1e63117SDavid Hildenbrand 		if (!pfn_is_ram(pfn)) {
1545d8de293SMatthew Wilcox (Oracle) 			tmp = iov_iter_zero(nr_bytes, iter);
155c1e63117SDavid Hildenbrand 		} else {
156992b649aSLianbo Jiang 			if (encrypted)
1575d8de293SMatthew Wilcox (Oracle) 				tmp = copy_oldmem_page_encrypted(iter, pfn,
158992b649aSLianbo Jiang 								 nr_bytes,
1595d8de293SMatthew Wilcox (Oracle) 								 offset);
160992b649aSLianbo Jiang 			else
1615d8de293SMatthew Wilcox (Oracle) 				tmp = copy_oldmem_page(iter, pfn, nr_bytes,
1625d8de293SMatthew Wilcox (Oracle) 						       offset);
163c1e63117SDavid Hildenbrand 		}
1645d8de293SMatthew Wilcox (Oracle) 		if (tmp < nr_bytes) {
1655039b170SDavid Hildenbrand 			srcu_read_unlock(&vmcore_cb_srcu, idx);
1665d8de293SMatthew Wilcox (Oracle) 			return -EFAULT;
167997c136fSOlaf Hering 		}
168c1e63117SDavid Hildenbrand 
169666bfddbSVivek Goyal 		*ppos += nr_bytes;
170666bfddbSVivek Goyal 		count -= nr_bytes;
171666bfddbSVivek Goyal 		read += nr_bytes;
172666bfddbSVivek Goyal 		++pfn;
173666bfddbSVivek Goyal 		offset = 0;
174666bfddbSVivek Goyal 	} while (count);
1755039b170SDavid Hildenbrand 	srcu_read_unlock(&vmcore_cb_srcu, idx);
176666bfddbSVivek Goyal 
177666bfddbSVivek Goyal 	return read;
178666bfddbSVivek Goyal }
179666bfddbSVivek Goyal 
180be8a8d06SMichael Holzheu /*
181be8a8d06SMichael Holzheu  * Architectures may override this function to allocate ELF header in 2nd kernel
182be8a8d06SMichael Holzheu  */
elfcorehdr_alloc(unsigned long long * addr,unsigned long long * size)183be8a8d06SMichael Holzheu int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
184be8a8d06SMichael Holzheu {
185be8a8d06SMichael Holzheu 	return 0;
186be8a8d06SMichael Holzheu }
187be8a8d06SMichael Holzheu 
188be8a8d06SMichael Holzheu /*
189be8a8d06SMichael Holzheu  * Architectures may override this function to free header
190be8a8d06SMichael Holzheu  */
elfcorehdr_free(unsigned long long addr)191be8a8d06SMichael Holzheu void __weak elfcorehdr_free(unsigned long long addr)
192be8a8d06SMichael Holzheu {}
193be8a8d06SMichael Holzheu 
194be8a8d06SMichael Holzheu /*
195be8a8d06SMichael Holzheu  * Architectures may override this function to read from ELF header
196be8a8d06SMichael Holzheu  */
elfcorehdr_read(char * buf,size_t count,u64 * ppos)197be8a8d06SMichael Holzheu ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
198be8a8d06SMichael Holzheu {
199e0690479SMatthew Wilcox (Oracle) 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
200e0690479SMatthew Wilcox (Oracle) 	struct iov_iter iter;
201e0690479SMatthew Wilcox (Oracle) 
202de4eda9dSAl Viro 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
203e0690479SMatthew Wilcox (Oracle) 
204e0690479SMatthew Wilcox (Oracle) 	return read_from_oldmem(&iter, count, ppos, false);
205be8a8d06SMichael Holzheu }
206be8a8d06SMichael Holzheu 
207be8a8d06SMichael Holzheu /*
208be8a8d06SMichael Holzheu  * Architectures may override this function to read from notes sections
209be8a8d06SMichael Holzheu  */
elfcorehdr_read_notes(char * buf,size_t count,u64 * ppos)210be8a8d06SMichael Holzheu ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
211be8a8d06SMichael Holzheu {
212e0690479SMatthew Wilcox (Oracle) 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
213e0690479SMatthew Wilcox (Oracle) 	struct iov_iter iter;
214e0690479SMatthew Wilcox (Oracle) 
215de4eda9dSAl Viro 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
216e0690479SMatthew Wilcox (Oracle) 
217e0690479SMatthew Wilcox (Oracle) 	return read_from_oldmem(&iter, count, ppos,
218e0690479SMatthew Wilcox (Oracle) 			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
219be8a8d06SMichael Holzheu }
220be8a8d06SMichael Holzheu 
2219cb21813SMichael Holzheu /*
2229cb21813SMichael Holzheu  * Architectures may override this function to map oldmem
2239cb21813SMichael Holzheu  */
remap_oldmem_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)2249cb21813SMichael Holzheu int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
2259cb21813SMichael Holzheu 				  unsigned long from, unsigned long pfn,
2269cb21813SMichael Holzheu 				  unsigned long size, pgprot_t prot)
2279cb21813SMichael Holzheu {
228992b649aSLianbo Jiang 	prot = pgprot_encrypted(prot);
2299cb21813SMichael Holzheu 	return remap_pfn_range(vma, from, pfn, size, prot);
2309cb21813SMichael Holzheu }
2319cb21813SMichael Holzheu 
2329cb21813SMichael Holzheu /*
233cf089611SBorislav Petkov  * Architectures which support memory encryption override this.
234cf089611SBorislav Petkov  */
copy_oldmem_page_encrypted(struct iov_iter * iter,unsigned long pfn,size_t csize,unsigned long offset)2355d8de293SMatthew Wilcox (Oracle) ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
2365d8de293SMatthew Wilcox (Oracle) 		unsigned long pfn, size_t csize, unsigned long offset)
237cf089611SBorislav Petkov {
2385d8de293SMatthew Wilcox (Oracle) 	return copy_oldmem_page(iter, pfn, csize, offset);
239cf089611SBorislav Petkov }
240cf089611SBorislav Petkov 
2417efe48dfSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
vmcoredd_copy_dumps(struct iov_iter * iter,u64 start,size_t size)2424a22fd20SMatthew Wilcox (Oracle) static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
2437efe48dfSRahul Lakkireddy {
2447efe48dfSRahul Lakkireddy 	struct vmcoredd_node *dump;
2457efe48dfSRahul Lakkireddy 	u64 offset = 0;
2467efe48dfSRahul Lakkireddy 	int ret = 0;
2477efe48dfSRahul Lakkireddy 	size_t tsz;
2487efe48dfSRahul Lakkireddy 	char *buf;
2497efe48dfSRahul Lakkireddy 
2507efe48dfSRahul Lakkireddy 	mutex_lock(&vmcoredd_mutex);
2517efe48dfSRahul Lakkireddy 	list_for_each_entry(dump, &vmcoredd_list, list) {
2527efe48dfSRahul Lakkireddy 		if (start < offset + dump->size) {
2537efe48dfSRahul Lakkireddy 			tsz = min(offset + (u64)dump->size - start, (u64)size);
2547efe48dfSRahul Lakkireddy 			buf = dump->buf + start - offset;
2554a22fd20SMatthew Wilcox (Oracle) 			if (copy_to_iter(buf, tsz, iter) < tsz) {
2567efe48dfSRahul Lakkireddy 				ret = -EFAULT;
2577efe48dfSRahul Lakkireddy 				goto out_unlock;
2587efe48dfSRahul Lakkireddy 			}
2597efe48dfSRahul Lakkireddy 
2607efe48dfSRahul Lakkireddy 			size -= tsz;
2617efe48dfSRahul Lakkireddy 			start += tsz;
2627efe48dfSRahul Lakkireddy 
2637efe48dfSRahul Lakkireddy 			/* Leave now if buffer filled already */
2647efe48dfSRahul Lakkireddy 			if (!size)
2657efe48dfSRahul Lakkireddy 				goto out_unlock;
2667efe48dfSRahul Lakkireddy 		}
2677efe48dfSRahul Lakkireddy 		offset += dump->size;
2687efe48dfSRahul Lakkireddy 	}
2697efe48dfSRahul Lakkireddy 
2707efe48dfSRahul Lakkireddy out_unlock:
2717efe48dfSRahul Lakkireddy 	mutex_unlock(&vmcoredd_mutex);
2727efe48dfSRahul Lakkireddy 	return ret;
2737efe48dfSRahul Lakkireddy }
2747efe48dfSRahul Lakkireddy 
275a2036a1eSArnd Bergmann #ifdef CONFIG_MMU
vmcoredd_mmap_dumps(struct vm_area_struct * vma,unsigned long dst,u64 start,size_t size)2767efe48dfSRahul Lakkireddy static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
2777efe48dfSRahul Lakkireddy 			       u64 start, size_t size)
2787efe48dfSRahul Lakkireddy {
2797efe48dfSRahul Lakkireddy 	struct vmcoredd_node *dump;
2807efe48dfSRahul Lakkireddy 	u64 offset = 0;
2817efe48dfSRahul Lakkireddy 	int ret = 0;
2827efe48dfSRahul Lakkireddy 	size_t tsz;
2837efe48dfSRahul Lakkireddy 	char *buf;
2847efe48dfSRahul Lakkireddy 
2857efe48dfSRahul Lakkireddy 	mutex_lock(&vmcoredd_mutex);
2867efe48dfSRahul Lakkireddy 	list_for_each_entry(dump, &vmcoredd_list, list) {
2877efe48dfSRahul Lakkireddy 		if (start < offset + dump->size) {
2887efe48dfSRahul Lakkireddy 			tsz = min(offset + (u64)dump->size - start, (u64)size);
2897efe48dfSRahul Lakkireddy 			buf = dump->buf + start - offset;
290bdebd6a2SJann Horn 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
291bdebd6a2SJann Horn 							tsz)) {
2927efe48dfSRahul Lakkireddy 				ret = -EFAULT;
2937efe48dfSRahul Lakkireddy 				goto out_unlock;
2947efe48dfSRahul Lakkireddy 			}
2957efe48dfSRahul Lakkireddy 
2967efe48dfSRahul Lakkireddy 			size -= tsz;
2977efe48dfSRahul Lakkireddy 			start += tsz;
2987efe48dfSRahul Lakkireddy 			dst += tsz;
2997efe48dfSRahul Lakkireddy 
3007efe48dfSRahul Lakkireddy 			/* Leave now if buffer filled already */
3017efe48dfSRahul Lakkireddy 			if (!size)
3027efe48dfSRahul Lakkireddy 				goto out_unlock;
3037efe48dfSRahul Lakkireddy 		}
3047efe48dfSRahul Lakkireddy 		offset += dump->size;
3057efe48dfSRahul Lakkireddy 	}
3067efe48dfSRahul Lakkireddy 
3077efe48dfSRahul Lakkireddy out_unlock:
3087efe48dfSRahul Lakkireddy 	mutex_unlock(&vmcoredd_mutex);
3097efe48dfSRahul Lakkireddy 	return ret;
3107efe48dfSRahul Lakkireddy }
311a2036a1eSArnd Bergmann #endif /* CONFIG_MMU */
3127efe48dfSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
3137efe48dfSRahul Lakkireddy 
314666bfddbSVivek Goyal /* Read from the ELF header and then the crash dump. On error, negative value is
315666bfddbSVivek Goyal  * returned otherwise number of bytes read are returned.
316666bfddbSVivek Goyal  */
__read_vmcore(struct iov_iter * iter,loff_t * fpos)3174a22fd20SMatthew Wilcox (Oracle) static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
318666bfddbSVivek Goyal {
319666bfddbSVivek Goyal 	ssize_t acc = 0, tmp;
32080e8ff63SVivek Goyal 	size_t tsz;
321b27eb186SHATAYAMA Daisuke 	u64 start;
322b27eb186SHATAYAMA Daisuke 	struct vmcore *m = NULL;
323666bfddbSVivek Goyal 
3244a22fd20SMatthew Wilcox (Oracle) 	if (!iov_iter_count(iter) || *fpos >= vmcore_size)
325666bfddbSVivek Goyal 		return 0;
326666bfddbSVivek Goyal 
3274a22fd20SMatthew Wilcox (Oracle) 	iov_iter_truncate(iter, vmcore_size - *fpos);
328666bfddbSVivek Goyal 
329666bfddbSVivek Goyal 	/* Read ELF core header */
330666bfddbSVivek Goyal 	if (*fpos < elfcorebuf_sz) {
3314a22fd20SMatthew Wilcox (Oracle) 		tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
3324a22fd20SMatthew Wilcox (Oracle) 		if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
333666bfddbSVivek Goyal 			return -EFAULT;
334666bfddbSVivek Goyal 		*fpos += tsz;
335666bfddbSVivek Goyal 		acc += tsz;
336666bfddbSVivek Goyal 
337666bfddbSVivek Goyal 		/* leave now if filled buffer already */
3384a22fd20SMatthew Wilcox (Oracle) 		if (!iov_iter_count(iter))
339666bfddbSVivek Goyal 			return acc;
340666bfddbSVivek Goyal 	}
341666bfddbSVivek Goyal 
34270e79866SAlexey Dobriyan 	/* Read ELF note segment */
343087350c9SHATAYAMA Daisuke 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
344087350c9SHATAYAMA Daisuke 		void *kaddr;
345087350c9SHATAYAMA Daisuke 
3467efe48dfSRahul Lakkireddy 		/* We add device dumps before other elf notes because the
3477efe48dfSRahul Lakkireddy 		 * other elf notes may not fill the elf notes buffer
3487efe48dfSRahul Lakkireddy 		 * completely and we will end up with zero-filled data
3497efe48dfSRahul Lakkireddy 		 * between the elf notes and the device dumps. Tools will
3507efe48dfSRahul Lakkireddy 		 * then try to decode this zero-filled data as valid notes
3517efe48dfSRahul Lakkireddy 		 * and we don't want that. Hence, adding device dumps before
3527efe48dfSRahul Lakkireddy 		 * the other elf notes ensure that zero-filled data can be
3537efe48dfSRahul Lakkireddy 		 * avoided.
3547efe48dfSRahul Lakkireddy 		 */
3557efe48dfSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
3567efe48dfSRahul Lakkireddy 		/* Read device dumps */
3577efe48dfSRahul Lakkireddy 		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
3587efe48dfSRahul Lakkireddy 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
3594a22fd20SMatthew Wilcox (Oracle) 				  (size_t)*fpos, iov_iter_count(iter));
3607efe48dfSRahul Lakkireddy 			start = *fpos - elfcorebuf_sz;
3614a22fd20SMatthew Wilcox (Oracle) 			if (vmcoredd_copy_dumps(iter, start, tsz))
3627efe48dfSRahul Lakkireddy 				return -EFAULT;
3637efe48dfSRahul Lakkireddy 
3647efe48dfSRahul Lakkireddy 			*fpos += tsz;
3657efe48dfSRahul Lakkireddy 			acc += tsz;
3667efe48dfSRahul Lakkireddy 
3677efe48dfSRahul Lakkireddy 			/* leave now if filled buffer already */
3684a22fd20SMatthew Wilcox (Oracle) 			if (!iov_iter_count(iter))
3697efe48dfSRahul Lakkireddy 				return acc;
3707efe48dfSRahul Lakkireddy 		}
3717efe48dfSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
3727efe48dfSRahul Lakkireddy 
3737efe48dfSRahul Lakkireddy 		/* Read remaining elf notes */
3744a22fd20SMatthew Wilcox (Oracle) 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
3754a22fd20SMatthew Wilcox (Oracle) 			  iov_iter_count(iter));
3767efe48dfSRahul Lakkireddy 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
3774a22fd20SMatthew Wilcox (Oracle) 		if (copy_to_iter(kaddr, tsz, iter) < tsz)
378087350c9SHATAYAMA Daisuke 			return -EFAULT;
3797efe48dfSRahul Lakkireddy 
380087350c9SHATAYAMA Daisuke 		*fpos += tsz;
381087350c9SHATAYAMA Daisuke 		acc += tsz;
382087350c9SHATAYAMA Daisuke 
383087350c9SHATAYAMA Daisuke 		/* leave now if filled buffer already */
3844a22fd20SMatthew Wilcox (Oracle) 		if (!iov_iter_count(iter))
385087350c9SHATAYAMA Daisuke 			return acc;
386518fbd64SRik van Riel 
387518fbd64SRik van Riel 		cond_resched();
388087350c9SHATAYAMA Daisuke 	}
389087350c9SHATAYAMA Daisuke 
390b27eb186SHATAYAMA Daisuke 	list_for_each_entry(m, &vmcore_list, list) {
391b27eb186SHATAYAMA Daisuke 		if (*fpos < m->offset + m->size) {
3920b50a2d8SDave Young 			tsz = (size_t)min_t(unsigned long long,
3930b50a2d8SDave Young 					    m->offset + m->size - *fpos,
3944a22fd20SMatthew Wilcox (Oracle) 					    iov_iter_count(iter));
395b27eb186SHATAYAMA Daisuke 			start = m->paddr + *fpos - m->offset;
396e0690479SMatthew Wilcox (Oracle) 			tmp = read_from_oldmem(iter, tsz, &start,
3974a22fd20SMatthew Wilcox (Oracle) 					cc_platform_has(CC_ATTR_MEM_ENCRYPT));
398666bfddbSVivek Goyal 			if (tmp < 0)
399666bfddbSVivek Goyal 				return tmp;
400666bfddbSVivek Goyal 			*fpos += tsz;
401666bfddbSVivek Goyal 			acc += tsz;
402b27eb186SHATAYAMA Daisuke 
403b27eb186SHATAYAMA Daisuke 			/* leave now if filled buffer already */
4044a22fd20SMatthew Wilcox (Oracle) 			if (!iov_iter_count(iter))
405b27eb186SHATAYAMA Daisuke 				return acc;
406666bfddbSVivek Goyal 		}
407*a5a2ee81SRik van Riel 
408*a5a2ee81SRik van Riel 		cond_resched();
409666bfddbSVivek Goyal 	}
410b27eb186SHATAYAMA Daisuke 
411666bfddbSVivek Goyal 	return acc;
412666bfddbSVivek Goyal }
413666bfddbSVivek Goyal 
read_vmcore(struct kiocb * iocb,struct iov_iter * iter)4144a22fd20SMatthew Wilcox (Oracle) static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
4159cb21813SMichael Holzheu {
4164a22fd20SMatthew Wilcox (Oracle) 	return __read_vmcore(iter, &iocb->ki_pos);
4179cb21813SMichael Holzheu }
4189cb21813SMichael Holzheu 
4199cb21813SMichael Holzheu /*
4209cb21813SMichael Holzheu  * The vmcore fault handler uses the page cache and fills data using the
4214a22fd20SMatthew Wilcox (Oracle)  * standard __read_vmcore() function.
4229cb21813SMichael Holzheu  *
4239cb21813SMichael Holzheu  * On s390 the fault handler is used for memory regions that can't be mapped
4249cb21813SMichael Holzheu  * directly with remap_pfn_range().
4259cb21813SMichael Holzheu  */
mmap_vmcore_fault(struct vm_fault * vmf)42636f06204SSouptick Joarder static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
4279cb21813SMichael Holzheu {
4289cb21813SMichael Holzheu #ifdef CONFIG_S390
42911bac800SDave Jiang 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
4309cb21813SMichael Holzheu 	pgoff_t index = vmf->pgoff;
4314a22fd20SMatthew Wilcox (Oracle) 	struct iov_iter iter;
4324a22fd20SMatthew Wilcox (Oracle) 	struct kvec kvec;
4339cb21813SMichael Holzheu 	struct page *page;
4349cb21813SMichael Holzheu 	loff_t offset;
4359cb21813SMichael Holzheu 	int rc;
4369cb21813SMichael Holzheu 
4379cb21813SMichael Holzheu 	page = find_or_create_page(mapping, index, GFP_KERNEL);
4389cb21813SMichael Holzheu 	if (!page)
4399cb21813SMichael Holzheu 		return VM_FAULT_OOM;
4409cb21813SMichael Holzheu 	if (!PageUptodate(page)) {
44109cbfeafSKirill A. Shutemov 		offset = (loff_t) index << PAGE_SHIFT;
4424a22fd20SMatthew Wilcox (Oracle) 		kvec.iov_base = page_address(page);
4434a22fd20SMatthew Wilcox (Oracle) 		kvec.iov_len = PAGE_SIZE;
444de4eda9dSAl Viro 		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
4454a22fd20SMatthew Wilcox (Oracle) 
4464a22fd20SMatthew Wilcox (Oracle) 		rc = __read_vmcore(&iter, &offset);
4479cb21813SMichael Holzheu 		if (rc < 0) {
4489cb21813SMichael Holzheu 			unlock_page(page);
44909cbfeafSKirill A. Shutemov 			put_page(page);
450b5c21237SSouptick Joarder 			return vmf_error(rc);
4519cb21813SMichael Holzheu 		}
4529cb21813SMichael Holzheu 		SetPageUptodate(page);
4539cb21813SMichael Holzheu 	}
4549cb21813SMichael Holzheu 	unlock_page(page);
4559cb21813SMichael Holzheu 	vmf->page = page;
4569cb21813SMichael Holzheu 	return 0;
4579cb21813SMichael Holzheu #else
4589cb21813SMichael Holzheu 	return VM_FAULT_SIGBUS;
4599cb21813SMichael Holzheu #endif
4609cb21813SMichael Holzheu }
4619cb21813SMichael Holzheu 
46283086978SHATAYAMA Daisuke /**
4632724273eSRahul Lakkireddy  * vmcore_alloc_buf - allocate buffer in vmalloc memory
464e9f5d101SYang Li  * @size: size of buffer
46583086978SHATAYAMA Daisuke  *
46683086978SHATAYAMA Daisuke  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
46783086978SHATAYAMA Daisuke  * the buffer to user-space by means of remap_vmalloc_range().
46883086978SHATAYAMA Daisuke  *
46983086978SHATAYAMA Daisuke  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
47083086978SHATAYAMA Daisuke  * disabled and there's no need to allow users to mmap the buffer.
47183086978SHATAYAMA Daisuke  */
vmcore_alloc_buf(size_t size)4722724273eSRahul Lakkireddy static inline char *vmcore_alloc_buf(size_t size)
47383086978SHATAYAMA Daisuke {
47483086978SHATAYAMA Daisuke #ifdef CONFIG_MMU
4752724273eSRahul Lakkireddy 	return vmalloc_user(size);
47683086978SHATAYAMA Daisuke #else
4772724273eSRahul Lakkireddy 	return vzalloc(size);
47883086978SHATAYAMA Daisuke #endif
47983086978SHATAYAMA Daisuke }
48083086978SHATAYAMA Daisuke 
48183086978SHATAYAMA Daisuke /*
48283086978SHATAYAMA Daisuke  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
48383086978SHATAYAMA Daisuke  * essential for mmap_vmcore() in order to map physically
48483086978SHATAYAMA Daisuke  * non-contiguous objects (ELF header, ELF note segment and memory
48583086978SHATAYAMA Daisuke  * regions in the 1st kernel pointed to by PT_LOAD entries) into
48683086978SHATAYAMA Daisuke  * virtually contiguous user-space in ELF layout.
48783086978SHATAYAMA Daisuke  */
48811e376a3SMichael Holzheu #ifdef CONFIG_MMU
4895b548fd0SQi Xi 
4905b548fd0SQi Xi static const struct vm_operations_struct vmcore_mmap_ops = {
4915b548fd0SQi Xi 	.fault = mmap_vmcore_fault,
4925b548fd0SQi Xi };
4935b548fd0SQi Xi 
4940692dedcSVitaly Kuznetsov /*
4950692dedcSVitaly Kuznetsov  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
4960692dedcSVitaly Kuznetsov  * reported as not being ram with the zero page.
4970692dedcSVitaly Kuznetsov  *
4980692dedcSVitaly Kuznetsov  * @vma: vm_area_struct describing requested mapping
4990692dedcSVitaly Kuznetsov  * @from: start remapping from
5000692dedcSVitaly Kuznetsov  * @pfn: page frame number to start remapping to
5010692dedcSVitaly Kuznetsov  * @size: remapping size
5020692dedcSVitaly Kuznetsov  * @prot: protection bits
5030692dedcSVitaly Kuznetsov  *
5040692dedcSVitaly Kuznetsov  * Returns zero on success, -EAGAIN on failure.
5050692dedcSVitaly Kuznetsov  */
remap_oldmem_pfn_checked(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)5060692dedcSVitaly Kuznetsov static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
5070692dedcSVitaly Kuznetsov 				    unsigned long from, unsigned long pfn,
5080692dedcSVitaly Kuznetsov 				    unsigned long size, pgprot_t prot)
5090692dedcSVitaly Kuznetsov {
5100692dedcSVitaly Kuznetsov 	unsigned long map_size;
5110692dedcSVitaly Kuznetsov 	unsigned long pos_start, pos_end, pos;
5120692dedcSVitaly Kuznetsov 	unsigned long zeropage_pfn = my_zero_pfn(0);
5130692dedcSVitaly Kuznetsov 	size_t len = 0;
5140692dedcSVitaly Kuznetsov 
5150692dedcSVitaly Kuznetsov 	pos_start = pfn;
5160692dedcSVitaly Kuznetsov 	pos_end = pfn + (size >> PAGE_SHIFT);
5170692dedcSVitaly Kuznetsov 
5180692dedcSVitaly Kuznetsov 	for (pos = pos_start; pos < pos_end; ++pos) {
5190692dedcSVitaly Kuznetsov 		if (!pfn_is_ram(pos)) {
5200692dedcSVitaly Kuznetsov 			/*
5210692dedcSVitaly Kuznetsov 			 * We hit a page which is not ram. Remap the continuous
5220692dedcSVitaly Kuznetsov 			 * region between pos_start and pos-1 and replace
5230692dedcSVitaly Kuznetsov 			 * the non-ram page at pos with the zero page.
5240692dedcSVitaly Kuznetsov 			 */
5250692dedcSVitaly Kuznetsov 			if (pos > pos_start) {
5260692dedcSVitaly Kuznetsov 				/* Remap continuous region */
5270692dedcSVitaly Kuznetsov 				map_size = (pos - pos_start) << PAGE_SHIFT;
5280692dedcSVitaly Kuznetsov 				if (remap_oldmem_pfn_range(vma, from + len,
5290692dedcSVitaly Kuznetsov 							   pos_start, map_size,
5300692dedcSVitaly Kuznetsov 							   prot))
5310692dedcSVitaly Kuznetsov 					goto fail;
5320692dedcSVitaly Kuznetsov 				len += map_size;
5330692dedcSVitaly Kuznetsov 			}
5340692dedcSVitaly Kuznetsov 			/* Remap the zero page */
5350692dedcSVitaly Kuznetsov 			if (remap_oldmem_pfn_range(vma, from + len,
5360692dedcSVitaly Kuznetsov 						   zeropage_pfn,
5370692dedcSVitaly Kuznetsov 						   PAGE_SIZE, prot))
5380692dedcSVitaly Kuznetsov 				goto fail;
5390692dedcSVitaly Kuznetsov 			len += PAGE_SIZE;
5400692dedcSVitaly Kuznetsov 			pos_start = pos + 1;
5410692dedcSVitaly Kuznetsov 		}
5420692dedcSVitaly Kuznetsov 	}
5430692dedcSVitaly Kuznetsov 	if (pos > pos_start) {
5440692dedcSVitaly Kuznetsov 		/* Remap the rest */
5450692dedcSVitaly Kuznetsov 		map_size = (pos - pos_start) << PAGE_SHIFT;
5460692dedcSVitaly Kuznetsov 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
5470692dedcSVitaly Kuznetsov 					   map_size, prot))
5480692dedcSVitaly Kuznetsov 			goto fail;
5490692dedcSVitaly Kuznetsov 	}
5500692dedcSVitaly Kuznetsov 	return 0;
5510692dedcSVitaly Kuznetsov fail:
552897ab3e0SMike Rapoport 	do_munmap(vma->vm_mm, from, len, NULL);
5530692dedcSVitaly Kuznetsov 	return -EAGAIN;
5540692dedcSVitaly Kuznetsov }
5550692dedcSVitaly Kuznetsov 
vmcore_remap_oldmem_pfn(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)5560692dedcSVitaly Kuznetsov static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
5570692dedcSVitaly Kuznetsov 			    unsigned long from, unsigned long pfn,
5580692dedcSVitaly Kuznetsov 			    unsigned long size, pgprot_t prot)
5590692dedcSVitaly Kuznetsov {
5605039b170SDavid Hildenbrand 	int ret, idx;
561cc5f2704SDavid Hildenbrand 
5620692dedcSVitaly Kuznetsov 	/*
5635039b170SDavid Hildenbrand 	 * Check if a callback was registered to avoid looping over all
5645039b170SDavid Hildenbrand 	 * pages without a reason.
5650692dedcSVitaly Kuznetsov 	 */
5665039b170SDavid Hildenbrand 	idx = srcu_read_lock(&vmcore_cb_srcu);
56725bc5b0dSDavid Hildenbrand 	if (!list_empty(&vmcore_cb_list))
568cc5f2704SDavid Hildenbrand 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
5690692dedcSVitaly Kuznetsov 	else
570cc5f2704SDavid Hildenbrand 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
5715039b170SDavid Hildenbrand 	srcu_read_unlock(&vmcore_cb_srcu, idx);
572cc5f2704SDavid Hildenbrand 	return ret;
5730692dedcSVitaly Kuznetsov }
5740692dedcSVitaly Kuznetsov 
mmap_vmcore(struct file * file,struct vm_area_struct * vma)57583086978SHATAYAMA Daisuke static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
57683086978SHATAYAMA Daisuke {
57783086978SHATAYAMA Daisuke 	size_t size = vma->vm_end - vma->vm_start;
57883086978SHATAYAMA Daisuke 	u64 start, end, len, tsz;
57983086978SHATAYAMA Daisuke 	struct vmcore *m;
58083086978SHATAYAMA Daisuke 
58183086978SHATAYAMA Daisuke 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
58283086978SHATAYAMA Daisuke 	end = start + size;
58383086978SHATAYAMA Daisuke 
58483086978SHATAYAMA Daisuke 	if (size > vmcore_size || end > vmcore_size)
58583086978SHATAYAMA Daisuke 		return -EINVAL;
58683086978SHATAYAMA Daisuke 
58783086978SHATAYAMA Daisuke 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
58883086978SHATAYAMA Daisuke 		return -EPERM;
58983086978SHATAYAMA Daisuke 
5901c71222eSSuren Baghdasaryan 	vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
5919cb21813SMichael Holzheu 	vma->vm_ops = &vmcore_mmap_ops;
59283086978SHATAYAMA Daisuke 
59383086978SHATAYAMA Daisuke 	len = 0;
59483086978SHATAYAMA Daisuke 
59583086978SHATAYAMA Daisuke 	if (start < elfcorebuf_sz) {
59683086978SHATAYAMA Daisuke 		u64 pfn;
59783086978SHATAYAMA Daisuke 
59883086978SHATAYAMA Daisuke 		tsz = min(elfcorebuf_sz - (size_t)start, size);
59983086978SHATAYAMA Daisuke 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
60083086978SHATAYAMA Daisuke 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
60183086978SHATAYAMA Daisuke 				    vma->vm_page_prot))
60283086978SHATAYAMA Daisuke 			return -EAGAIN;
60383086978SHATAYAMA Daisuke 		size -= tsz;
60483086978SHATAYAMA Daisuke 		start += tsz;
60583086978SHATAYAMA Daisuke 		len += tsz;
60683086978SHATAYAMA Daisuke 
60783086978SHATAYAMA Daisuke 		if (size == 0)
60883086978SHATAYAMA Daisuke 			return 0;
60983086978SHATAYAMA Daisuke 	}
61083086978SHATAYAMA Daisuke 
61183086978SHATAYAMA Daisuke 	if (start < elfcorebuf_sz + elfnotes_sz) {
61283086978SHATAYAMA Daisuke 		void *kaddr;
61383086978SHATAYAMA Daisuke 
6147efe48dfSRahul Lakkireddy 		/* We add device dumps before other elf notes because the
6157efe48dfSRahul Lakkireddy 		 * other elf notes may not fill the elf notes buffer
6167efe48dfSRahul Lakkireddy 		 * completely and we will end up with zero-filled data
6177efe48dfSRahul Lakkireddy 		 * between the elf notes and the device dumps. Tools will
6187efe48dfSRahul Lakkireddy 		 * then try to decode this zero-filled data as valid notes
6197efe48dfSRahul Lakkireddy 		 * and we don't want that. Hence, adding device dumps before
6207efe48dfSRahul Lakkireddy 		 * the other elf notes ensure that zero-filled data can be
6217efe48dfSRahul Lakkireddy 		 * avoided. This also ensures that the device dumps and
6227efe48dfSRahul Lakkireddy 		 * other elf notes can be properly mmaped at page aligned
6237efe48dfSRahul Lakkireddy 		 * address.
6247efe48dfSRahul Lakkireddy 		 */
6257efe48dfSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
6267efe48dfSRahul Lakkireddy 		/* Read device dumps */
6277efe48dfSRahul Lakkireddy 		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
6287efe48dfSRahul Lakkireddy 			u64 start_off;
6297efe48dfSRahul Lakkireddy 
6307efe48dfSRahul Lakkireddy 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
6317efe48dfSRahul Lakkireddy 				  (size_t)start, size);
6327efe48dfSRahul Lakkireddy 			start_off = start - elfcorebuf_sz;
6337efe48dfSRahul Lakkireddy 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
6347efe48dfSRahul Lakkireddy 						start_off, tsz))
6357efe48dfSRahul Lakkireddy 				goto fail;
6367efe48dfSRahul Lakkireddy 
6377efe48dfSRahul Lakkireddy 			size -= tsz;
6387efe48dfSRahul Lakkireddy 			start += tsz;
6397efe48dfSRahul Lakkireddy 			len += tsz;
6407efe48dfSRahul Lakkireddy 
6417efe48dfSRahul Lakkireddy 			/* leave now if filled buffer already */
6427efe48dfSRahul Lakkireddy 			if (!size)
6437efe48dfSRahul Lakkireddy 				return 0;
6447efe48dfSRahul Lakkireddy 		}
6457efe48dfSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
6467efe48dfSRahul Lakkireddy 
6477efe48dfSRahul Lakkireddy 		/* Read remaining elf notes */
64883086978SHATAYAMA Daisuke 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
6497efe48dfSRahul Lakkireddy 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
65083086978SHATAYAMA Daisuke 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
651bdebd6a2SJann Horn 						kaddr, 0, tsz))
65283086978SHATAYAMA Daisuke 			goto fail;
6537efe48dfSRahul Lakkireddy 
65483086978SHATAYAMA Daisuke 		size -= tsz;
65583086978SHATAYAMA Daisuke 		start += tsz;
65683086978SHATAYAMA Daisuke 		len += tsz;
65783086978SHATAYAMA Daisuke 
65883086978SHATAYAMA Daisuke 		if (size == 0)
65983086978SHATAYAMA Daisuke 			return 0;
66083086978SHATAYAMA Daisuke 	}
66183086978SHATAYAMA Daisuke 
66283086978SHATAYAMA Daisuke 	list_for_each_entry(m, &vmcore_list, list) {
66383086978SHATAYAMA Daisuke 		if (start < m->offset + m->size) {
66483086978SHATAYAMA Daisuke 			u64 paddr = 0;
66583086978SHATAYAMA Daisuke 
6660b50a2d8SDave Young 			tsz = (size_t)min_t(unsigned long long,
6670b50a2d8SDave Young 					    m->offset + m->size - start, size);
66883086978SHATAYAMA Daisuke 			paddr = m->paddr + start - m->offset;
6690692dedcSVitaly Kuznetsov 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
67083086978SHATAYAMA Daisuke 						    paddr >> PAGE_SHIFT, tsz,
67183086978SHATAYAMA Daisuke 						    vma->vm_page_prot))
67283086978SHATAYAMA Daisuke 				goto fail;
67383086978SHATAYAMA Daisuke 			size -= tsz;
67483086978SHATAYAMA Daisuke 			start += tsz;
67583086978SHATAYAMA Daisuke 			len += tsz;
67683086978SHATAYAMA Daisuke 
67783086978SHATAYAMA Daisuke 			if (size == 0)
67883086978SHATAYAMA Daisuke 				return 0;
67983086978SHATAYAMA Daisuke 		}
68083086978SHATAYAMA Daisuke 	}
68183086978SHATAYAMA Daisuke 
68283086978SHATAYAMA Daisuke 	return 0;
68383086978SHATAYAMA Daisuke fail:
684897ab3e0SMike Rapoport 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
68583086978SHATAYAMA Daisuke 	return -EAGAIN;
68683086978SHATAYAMA Daisuke }
68783086978SHATAYAMA Daisuke #else
mmap_vmcore(struct file * file,struct vm_area_struct * vma)68883086978SHATAYAMA Daisuke static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
68983086978SHATAYAMA Daisuke {
69083086978SHATAYAMA Daisuke 	return -ENOSYS;
69183086978SHATAYAMA Daisuke }
69283086978SHATAYAMA Daisuke #endif
69383086978SHATAYAMA Daisuke 
69497a32539SAlexey Dobriyan static const struct proc_ops vmcore_proc_ops = {
695cc5f2704SDavid Hildenbrand 	.proc_open	= open_vmcore,
6964a22fd20SMatthew Wilcox (Oracle) 	.proc_read_iter	= read_vmcore,
69797a32539SAlexey Dobriyan 	.proc_lseek	= default_llseek,
69897a32539SAlexey Dobriyan 	.proc_mmap	= mmap_vmcore,
699666bfddbSVivek Goyal };
700666bfddbSVivek Goyal 
get_new_element(void)701666bfddbSVivek Goyal static struct vmcore* __init get_new_element(void)
702666bfddbSVivek Goyal {
7032f6d3110SCyrill Gorcunov 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
704666bfddbSVivek Goyal }
705666bfddbSVivek Goyal 
get_vmcore_size(size_t elfsz,size_t elfnotesegsz,struct list_head * vc_list)70644c752feSRahul Lakkireddy static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
707591ff716SHATAYAMA Daisuke 			   struct list_head *vc_list)
708666bfddbSVivek Goyal {
709666bfddbSVivek Goyal 	u64 size;
710591ff716SHATAYAMA Daisuke 	struct vmcore *m;
711666bfddbSVivek Goyal 
712591ff716SHATAYAMA Daisuke 	size = elfsz + elfnotesegsz;
713591ff716SHATAYAMA Daisuke 	list_for_each_entry(m, vc_list, list) {
714591ff716SHATAYAMA Daisuke 		size += m->size;
71572658e9dSVivek Goyal 	}
71672658e9dSVivek Goyal 	return size;
71772658e9dSVivek Goyal }
71872658e9dSVivek Goyal 
719087350c9SHATAYAMA Daisuke /**
720087350c9SHATAYAMA Daisuke  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
721087350c9SHATAYAMA Daisuke  *
722087350c9SHATAYAMA Daisuke  * @ehdr_ptr: ELF header
723087350c9SHATAYAMA Daisuke  *
724087350c9SHATAYAMA Daisuke  * This function updates p_memsz member of each PT_NOTE entry in the
725087350c9SHATAYAMA Daisuke  * program header table pointed to by @ehdr_ptr to real size of ELF
726087350c9SHATAYAMA Daisuke  * note segment.
727087350c9SHATAYAMA Daisuke  */
update_note_header_size_elf64(const Elf64_Ehdr * ehdr_ptr)728087350c9SHATAYAMA Daisuke static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
729666bfddbSVivek Goyal {
730087350c9SHATAYAMA Daisuke 	int i, rc=0;
731087350c9SHATAYAMA Daisuke 	Elf64_Phdr *phdr_ptr;
732666bfddbSVivek Goyal 	Elf64_Nhdr *nhdr_ptr;
733666bfddbSVivek Goyal 
734087350c9SHATAYAMA Daisuke 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
735666bfddbSVivek Goyal 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
736666bfddbSVivek Goyal 		void *notes_section;
737666bfddbSVivek Goyal 		u64 offset, max_sz, sz, real_sz = 0;
738666bfddbSVivek Goyal 		if (phdr_ptr->p_type != PT_NOTE)
739666bfddbSVivek Goyal 			continue;
740666bfddbSVivek Goyal 		max_sz = phdr_ptr->p_memsz;
741666bfddbSVivek Goyal 		offset = phdr_ptr->p_offset;
742666bfddbSVivek Goyal 		notes_section = kmalloc(max_sz, GFP_KERNEL);
743666bfddbSVivek Goyal 		if (!notes_section)
744666bfddbSVivek Goyal 			return -ENOMEM;
745be8a8d06SMichael Holzheu 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
746666bfddbSVivek Goyal 		if (rc < 0) {
747666bfddbSVivek Goyal 			kfree(notes_section);
748666bfddbSVivek Goyal 			return rc;
749666bfddbSVivek Goyal 		}
750666bfddbSVivek Goyal 		nhdr_ptr = notes_section;
75138dfac84SGreg Pearson 		while (nhdr_ptr->n_namesz != 0) {
752666bfddbSVivek Goyal 			sz = sizeof(Elf64_Nhdr) +
75334b47764SWANG Chao 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
75434b47764SWANG Chao 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
75538dfac84SGreg Pearson 			if ((real_sz + sz) > max_sz) {
75638dfac84SGreg Pearson 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
75738dfac84SGreg Pearson 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
75838dfac84SGreg Pearson 				break;
75938dfac84SGreg Pearson 			}
760666bfddbSVivek Goyal 			real_sz += sz;
761666bfddbSVivek Goyal 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
762666bfddbSVivek Goyal 		}
763087350c9SHATAYAMA Daisuke 		kfree(notes_section);
764087350c9SHATAYAMA Daisuke 		phdr_ptr->p_memsz = real_sz;
76538dfac84SGreg Pearson 		if (real_sz == 0) {
76638dfac84SGreg Pearson 			pr_warn("Warning: Zero PT_NOTE entries found\n");
76738dfac84SGreg Pearson 		}
768087350c9SHATAYAMA Daisuke 	}
769666bfddbSVivek Goyal 
770087350c9SHATAYAMA Daisuke 	return 0;
771087350c9SHATAYAMA Daisuke }
772087350c9SHATAYAMA Daisuke 
773087350c9SHATAYAMA Daisuke /**
774087350c9SHATAYAMA Daisuke  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
775087350c9SHATAYAMA Daisuke  * headers and sum of real size of their ELF note segment headers and
776087350c9SHATAYAMA Daisuke  * data.
777087350c9SHATAYAMA Daisuke  *
778087350c9SHATAYAMA Daisuke  * @ehdr_ptr: ELF header
779087350c9SHATAYAMA Daisuke  * @nr_ptnote: buffer for the number of PT_NOTE program headers
780087350c9SHATAYAMA Daisuke  * @sz_ptnote: buffer for size of unique PT_NOTE program header
781087350c9SHATAYAMA Daisuke  *
782087350c9SHATAYAMA Daisuke  * This function is used to merge multiple PT_NOTE program headers
783087350c9SHATAYAMA Daisuke  * into a unique single one. The resulting unique entry will have
784087350c9SHATAYAMA Daisuke  * @sz_ptnote in its phdr->p_mem.
785087350c9SHATAYAMA Daisuke  *
786087350c9SHATAYAMA Daisuke  * It is assumed that program headers with PT_NOTE type pointed to by
787087350c9SHATAYAMA Daisuke  * @ehdr_ptr has already been updated by update_note_header_size_elf64
788087350c9SHATAYAMA Daisuke  * and each of PT_NOTE program headers has actual ELF note segment
789087350c9SHATAYAMA Daisuke  * size in its p_memsz member.
790087350c9SHATAYAMA Daisuke  */
get_note_number_and_size_elf64(const Elf64_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)791087350c9SHATAYAMA Daisuke static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
792087350c9SHATAYAMA Daisuke 						 int *nr_ptnote, u64 *sz_ptnote)
793087350c9SHATAYAMA Daisuke {
794087350c9SHATAYAMA Daisuke 	int i;
795087350c9SHATAYAMA Daisuke 	Elf64_Phdr *phdr_ptr;
796087350c9SHATAYAMA Daisuke 
797087350c9SHATAYAMA Daisuke 	*nr_ptnote = *sz_ptnote = 0;
798087350c9SHATAYAMA Daisuke 
799087350c9SHATAYAMA Daisuke 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
800087350c9SHATAYAMA Daisuke 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
801087350c9SHATAYAMA Daisuke 		if (phdr_ptr->p_type != PT_NOTE)
802087350c9SHATAYAMA Daisuke 			continue;
803087350c9SHATAYAMA Daisuke 		*nr_ptnote += 1;
804087350c9SHATAYAMA Daisuke 		*sz_ptnote += phdr_ptr->p_memsz;
805087350c9SHATAYAMA Daisuke 	}
806087350c9SHATAYAMA Daisuke 
807087350c9SHATAYAMA Daisuke 	return 0;
808087350c9SHATAYAMA Daisuke }
809087350c9SHATAYAMA Daisuke 
810087350c9SHATAYAMA Daisuke /**
811087350c9SHATAYAMA Daisuke  * copy_notes_elf64 - copy ELF note segments in a given buffer
812087350c9SHATAYAMA Daisuke  *
813087350c9SHATAYAMA Daisuke  * @ehdr_ptr: ELF header
814087350c9SHATAYAMA Daisuke  * @notes_buf: buffer into which ELF note segments are copied
815087350c9SHATAYAMA Daisuke  *
816087350c9SHATAYAMA Daisuke  * This function is used to copy ELF note segment in the 1st kernel
817087350c9SHATAYAMA Daisuke  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
818087350c9SHATAYAMA Daisuke  * size of the buffer @notes_buf is equal to or larger than sum of the
819087350c9SHATAYAMA Daisuke  * real ELF note segment headers and data.
820087350c9SHATAYAMA Daisuke  *
821087350c9SHATAYAMA Daisuke  * It is assumed that program headers with PT_NOTE type pointed to by
822087350c9SHATAYAMA Daisuke  * @ehdr_ptr has already been updated by update_note_header_size_elf64
823087350c9SHATAYAMA Daisuke  * and each of PT_NOTE program headers has actual ELF note segment
824087350c9SHATAYAMA Daisuke  * size in its p_memsz member.
825087350c9SHATAYAMA Daisuke  */
copy_notes_elf64(const Elf64_Ehdr * ehdr_ptr,char * notes_buf)826087350c9SHATAYAMA Daisuke static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
827087350c9SHATAYAMA Daisuke {
828087350c9SHATAYAMA Daisuke 	int i, rc=0;
829087350c9SHATAYAMA Daisuke 	Elf64_Phdr *phdr_ptr;
830087350c9SHATAYAMA Daisuke 
831087350c9SHATAYAMA Daisuke 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
832087350c9SHATAYAMA Daisuke 
833087350c9SHATAYAMA Daisuke 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
834087350c9SHATAYAMA Daisuke 		u64 offset;
835087350c9SHATAYAMA Daisuke 		if (phdr_ptr->p_type != PT_NOTE)
836087350c9SHATAYAMA Daisuke 			continue;
837087350c9SHATAYAMA Daisuke 		offset = phdr_ptr->p_offset;
838be8a8d06SMichael Holzheu 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
839be8a8d06SMichael Holzheu 					   &offset);
840087350c9SHATAYAMA Daisuke 		if (rc < 0)
841087350c9SHATAYAMA Daisuke 			return rc;
842087350c9SHATAYAMA Daisuke 		notes_buf += phdr_ptr->p_memsz;
843087350c9SHATAYAMA Daisuke 	}
844087350c9SHATAYAMA Daisuke 
845087350c9SHATAYAMA Daisuke 	return 0;
846087350c9SHATAYAMA Daisuke }
847087350c9SHATAYAMA Daisuke 
848087350c9SHATAYAMA Daisuke /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf64(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)849087350c9SHATAYAMA Daisuke static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
850087350c9SHATAYAMA Daisuke 					   char **notes_buf, size_t *notes_sz)
851087350c9SHATAYAMA Daisuke {
852087350c9SHATAYAMA Daisuke 	int i, nr_ptnote=0, rc=0;
853087350c9SHATAYAMA Daisuke 	char *tmp;
854087350c9SHATAYAMA Daisuke 	Elf64_Ehdr *ehdr_ptr;
855087350c9SHATAYAMA Daisuke 	Elf64_Phdr phdr;
856087350c9SHATAYAMA Daisuke 	u64 phdr_sz = 0, note_off;
857087350c9SHATAYAMA Daisuke 
858087350c9SHATAYAMA Daisuke 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
859087350c9SHATAYAMA Daisuke 
860087350c9SHATAYAMA Daisuke 	rc = update_note_header_size_elf64(ehdr_ptr);
861087350c9SHATAYAMA Daisuke 	if (rc < 0)
862087350c9SHATAYAMA Daisuke 		return rc;
863087350c9SHATAYAMA Daisuke 
864087350c9SHATAYAMA Daisuke 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
865087350c9SHATAYAMA Daisuke 	if (rc < 0)
866087350c9SHATAYAMA Daisuke 		return rc;
867087350c9SHATAYAMA Daisuke 
868087350c9SHATAYAMA Daisuke 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
8692724273eSRahul Lakkireddy 	*notes_buf = vmcore_alloc_buf(*notes_sz);
870087350c9SHATAYAMA Daisuke 	if (!*notes_buf)
871666bfddbSVivek Goyal 		return -ENOMEM;
872087350c9SHATAYAMA Daisuke 
873087350c9SHATAYAMA Daisuke 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
874087350c9SHATAYAMA Daisuke 	if (rc < 0)
875087350c9SHATAYAMA Daisuke 		return rc;
876666bfddbSVivek Goyal 
877666bfddbSVivek Goyal 	/* Prepare merged PT_NOTE program header. */
878666bfddbSVivek Goyal 	phdr.p_type    = PT_NOTE;
879666bfddbSVivek Goyal 	phdr.p_flags   = 0;
880666bfddbSVivek Goyal 	note_off = sizeof(Elf64_Ehdr) +
881666bfddbSVivek Goyal 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
882087350c9SHATAYAMA Daisuke 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
883666bfddbSVivek Goyal 	phdr.p_vaddr   = phdr.p_paddr = 0;
884666bfddbSVivek Goyal 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
88560592fb6SFangrui Song 	phdr.p_align   = 4;
886666bfddbSVivek Goyal 
887666bfddbSVivek Goyal 	/* Add merged PT_NOTE program header*/
888666bfddbSVivek Goyal 	tmp = elfptr + sizeof(Elf64_Ehdr);
889666bfddbSVivek Goyal 	memcpy(tmp, &phdr, sizeof(phdr));
890666bfddbSVivek Goyal 	tmp += sizeof(phdr);
891666bfddbSVivek Goyal 
892666bfddbSVivek Goyal 	/* Remove unwanted PT_NOTE program headers. */
893666bfddbSVivek Goyal 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
894666bfddbSVivek Goyal 	*elfsz = *elfsz - i;
895666bfddbSVivek Goyal 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
896f2bdacddSHATAYAMA Daisuke 	memset(elfptr + *elfsz, 0, i);
897f2bdacddSHATAYAMA Daisuke 	*elfsz = roundup(*elfsz, PAGE_SIZE);
898666bfddbSVivek Goyal 
899666bfddbSVivek Goyal 	/* Modify e_phnum to reflect merged headers. */
900666bfddbSVivek Goyal 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
901666bfddbSVivek Goyal 
9027efe48dfSRahul Lakkireddy 	/* Store the size of all notes.  We need this to update the note
9037efe48dfSRahul Lakkireddy 	 * header when the device dumps will be added.
9047efe48dfSRahul Lakkireddy 	 */
9057efe48dfSRahul Lakkireddy 	elfnotes_orig_sz = phdr.p_memsz;
9067efe48dfSRahul Lakkireddy 
907666bfddbSVivek Goyal 	return 0;
908666bfddbSVivek Goyal }
909666bfddbSVivek Goyal 
910087350c9SHATAYAMA Daisuke /**
911087350c9SHATAYAMA Daisuke  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
912087350c9SHATAYAMA Daisuke  *
913087350c9SHATAYAMA Daisuke  * @ehdr_ptr: ELF header
914087350c9SHATAYAMA Daisuke  *
915087350c9SHATAYAMA Daisuke  * This function updates p_memsz member of each PT_NOTE entry in the
916087350c9SHATAYAMA Daisuke  * program header table pointed to by @ehdr_ptr to real size of ELF
917087350c9SHATAYAMA Daisuke  * note segment.
918087350c9SHATAYAMA Daisuke  */
update_note_header_size_elf32(const Elf32_Ehdr * ehdr_ptr)919087350c9SHATAYAMA Daisuke static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
92072658e9dSVivek Goyal {
921087350c9SHATAYAMA Daisuke 	int i, rc=0;
922087350c9SHATAYAMA Daisuke 	Elf32_Phdr *phdr_ptr;
92372658e9dSVivek Goyal 	Elf32_Nhdr *nhdr_ptr;
92472658e9dSVivek Goyal 
925087350c9SHATAYAMA Daisuke 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
92672658e9dSVivek Goyal 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
92772658e9dSVivek Goyal 		void *notes_section;
92872658e9dSVivek Goyal 		u64 offset, max_sz, sz, real_sz = 0;
92972658e9dSVivek Goyal 		if (phdr_ptr->p_type != PT_NOTE)
93072658e9dSVivek Goyal 			continue;
93172658e9dSVivek Goyal 		max_sz = phdr_ptr->p_memsz;
93272658e9dSVivek Goyal 		offset = phdr_ptr->p_offset;
93372658e9dSVivek Goyal 		notes_section = kmalloc(max_sz, GFP_KERNEL);
93472658e9dSVivek Goyal 		if (!notes_section)
93572658e9dSVivek Goyal 			return -ENOMEM;
936be8a8d06SMichael Holzheu 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
93772658e9dSVivek Goyal 		if (rc < 0) {
93872658e9dSVivek Goyal 			kfree(notes_section);
93972658e9dSVivek Goyal 			return rc;
94072658e9dSVivek Goyal 		}
94172658e9dSVivek Goyal 		nhdr_ptr = notes_section;
94238dfac84SGreg Pearson 		while (nhdr_ptr->n_namesz != 0) {
94372658e9dSVivek Goyal 			sz = sizeof(Elf32_Nhdr) +
94434b47764SWANG Chao 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
94534b47764SWANG Chao 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
94638dfac84SGreg Pearson 			if ((real_sz + sz) > max_sz) {
94738dfac84SGreg Pearson 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
94838dfac84SGreg Pearson 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
94938dfac84SGreg Pearson 				break;
95038dfac84SGreg Pearson 			}
95172658e9dSVivek Goyal 			real_sz += sz;
95272658e9dSVivek Goyal 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
95372658e9dSVivek Goyal 		}
954087350c9SHATAYAMA Daisuke 		kfree(notes_section);
955087350c9SHATAYAMA Daisuke 		phdr_ptr->p_memsz = real_sz;
95638dfac84SGreg Pearson 		if (real_sz == 0) {
95738dfac84SGreg Pearson 			pr_warn("Warning: Zero PT_NOTE entries found\n");
95838dfac84SGreg Pearson 		}
959087350c9SHATAYAMA Daisuke 	}
96072658e9dSVivek Goyal 
961087350c9SHATAYAMA Daisuke 	return 0;
962087350c9SHATAYAMA Daisuke }
963087350c9SHATAYAMA Daisuke 
964087350c9SHATAYAMA Daisuke /**
965087350c9SHATAYAMA Daisuke  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
966087350c9SHATAYAMA Daisuke  * headers and sum of real size of their ELF note segment headers and
967087350c9SHATAYAMA Daisuke  * data.
968087350c9SHATAYAMA Daisuke  *
969087350c9SHATAYAMA Daisuke  * @ehdr_ptr: ELF header
970087350c9SHATAYAMA Daisuke  * @nr_ptnote: buffer for the number of PT_NOTE program headers
971087350c9SHATAYAMA Daisuke  * @sz_ptnote: buffer for size of unique PT_NOTE program header
972087350c9SHATAYAMA Daisuke  *
973087350c9SHATAYAMA Daisuke  * This function is used to merge multiple PT_NOTE program headers
974087350c9SHATAYAMA Daisuke  * into a unique single one. The resulting unique entry will have
975087350c9SHATAYAMA Daisuke  * @sz_ptnote in its phdr->p_mem.
976087350c9SHATAYAMA Daisuke  *
977087350c9SHATAYAMA Daisuke  * It is assumed that program headers with PT_NOTE type pointed to by
978087350c9SHATAYAMA Daisuke  * @ehdr_ptr has already been updated by update_note_header_size_elf32
979087350c9SHATAYAMA Daisuke  * and each of PT_NOTE program headers has actual ELF note segment
980087350c9SHATAYAMA Daisuke  * size in its p_memsz member.
981087350c9SHATAYAMA Daisuke  */
get_note_number_and_size_elf32(const Elf32_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)982087350c9SHATAYAMA Daisuke static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
983087350c9SHATAYAMA Daisuke 						 int *nr_ptnote, u64 *sz_ptnote)
984087350c9SHATAYAMA Daisuke {
985087350c9SHATAYAMA Daisuke 	int i;
986087350c9SHATAYAMA Daisuke 	Elf32_Phdr *phdr_ptr;
987087350c9SHATAYAMA Daisuke 
988087350c9SHATAYAMA Daisuke 	*nr_ptnote = *sz_ptnote = 0;
989087350c9SHATAYAMA Daisuke 
990087350c9SHATAYAMA Daisuke 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
991087350c9SHATAYAMA Daisuke 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
992087350c9SHATAYAMA Daisuke 		if (phdr_ptr->p_type != PT_NOTE)
993087350c9SHATAYAMA Daisuke 			continue;
994087350c9SHATAYAMA Daisuke 		*nr_ptnote += 1;
995087350c9SHATAYAMA Daisuke 		*sz_ptnote += phdr_ptr->p_memsz;
996087350c9SHATAYAMA Daisuke 	}
997087350c9SHATAYAMA Daisuke 
998087350c9SHATAYAMA Daisuke 	return 0;
999087350c9SHATAYAMA Daisuke }
1000087350c9SHATAYAMA Daisuke 
1001087350c9SHATAYAMA Daisuke /**
1002087350c9SHATAYAMA Daisuke  * copy_notes_elf32 - copy ELF note segments in a given buffer
1003087350c9SHATAYAMA Daisuke  *
1004087350c9SHATAYAMA Daisuke  * @ehdr_ptr: ELF header
1005087350c9SHATAYAMA Daisuke  * @notes_buf: buffer into which ELF note segments are copied
1006087350c9SHATAYAMA Daisuke  *
1007087350c9SHATAYAMA Daisuke  * This function is used to copy ELF note segment in the 1st kernel
1008087350c9SHATAYAMA Daisuke  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1009087350c9SHATAYAMA Daisuke  * size of the buffer @notes_buf is equal to or larger than sum of the
1010087350c9SHATAYAMA Daisuke  * real ELF note segment headers and data.
1011087350c9SHATAYAMA Daisuke  *
1012087350c9SHATAYAMA Daisuke  * It is assumed that program headers with PT_NOTE type pointed to by
1013087350c9SHATAYAMA Daisuke  * @ehdr_ptr has already been updated by update_note_header_size_elf32
1014087350c9SHATAYAMA Daisuke  * and each of PT_NOTE program headers has actual ELF note segment
1015087350c9SHATAYAMA Daisuke  * size in its p_memsz member.
1016087350c9SHATAYAMA Daisuke  */
copy_notes_elf32(const Elf32_Ehdr * ehdr_ptr,char * notes_buf)1017087350c9SHATAYAMA Daisuke static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1018087350c9SHATAYAMA Daisuke {
1019087350c9SHATAYAMA Daisuke 	int i, rc=0;
1020087350c9SHATAYAMA Daisuke 	Elf32_Phdr *phdr_ptr;
1021087350c9SHATAYAMA Daisuke 
1022087350c9SHATAYAMA Daisuke 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1023087350c9SHATAYAMA Daisuke 
1024087350c9SHATAYAMA Daisuke 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1025087350c9SHATAYAMA Daisuke 		u64 offset;
1026087350c9SHATAYAMA Daisuke 		if (phdr_ptr->p_type != PT_NOTE)
1027087350c9SHATAYAMA Daisuke 			continue;
1028087350c9SHATAYAMA Daisuke 		offset = phdr_ptr->p_offset;
1029be8a8d06SMichael Holzheu 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1030be8a8d06SMichael Holzheu 					   &offset);
1031087350c9SHATAYAMA Daisuke 		if (rc < 0)
1032087350c9SHATAYAMA Daisuke 			return rc;
1033087350c9SHATAYAMA Daisuke 		notes_buf += phdr_ptr->p_memsz;
1034087350c9SHATAYAMA Daisuke 	}
1035087350c9SHATAYAMA Daisuke 
1036087350c9SHATAYAMA Daisuke 	return 0;
1037087350c9SHATAYAMA Daisuke }
1038087350c9SHATAYAMA Daisuke 
1039087350c9SHATAYAMA Daisuke /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf32(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)1040087350c9SHATAYAMA Daisuke static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1041087350c9SHATAYAMA Daisuke 					   char **notes_buf, size_t *notes_sz)
1042087350c9SHATAYAMA Daisuke {
1043087350c9SHATAYAMA Daisuke 	int i, nr_ptnote=0, rc=0;
1044087350c9SHATAYAMA Daisuke 	char *tmp;
1045087350c9SHATAYAMA Daisuke 	Elf32_Ehdr *ehdr_ptr;
1046087350c9SHATAYAMA Daisuke 	Elf32_Phdr phdr;
1047087350c9SHATAYAMA Daisuke 	u64 phdr_sz = 0, note_off;
1048087350c9SHATAYAMA Daisuke 
1049087350c9SHATAYAMA Daisuke 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1050087350c9SHATAYAMA Daisuke 
1051087350c9SHATAYAMA Daisuke 	rc = update_note_header_size_elf32(ehdr_ptr);
1052087350c9SHATAYAMA Daisuke 	if (rc < 0)
1053087350c9SHATAYAMA Daisuke 		return rc;
1054087350c9SHATAYAMA Daisuke 
1055087350c9SHATAYAMA Daisuke 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1056087350c9SHATAYAMA Daisuke 	if (rc < 0)
1057087350c9SHATAYAMA Daisuke 		return rc;
1058087350c9SHATAYAMA Daisuke 
1059087350c9SHATAYAMA Daisuke 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
10602724273eSRahul Lakkireddy 	*notes_buf = vmcore_alloc_buf(*notes_sz);
1061087350c9SHATAYAMA Daisuke 	if (!*notes_buf)
106272658e9dSVivek Goyal 		return -ENOMEM;
1063087350c9SHATAYAMA Daisuke 
1064087350c9SHATAYAMA Daisuke 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1065087350c9SHATAYAMA Daisuke 	if (rc < 0)
1066087350c9SHATAYAMA Daisuke 		return rc;
106772658e9dSVivek Goyal 
106872658e9dSVivek Goyal 	/* Prepare merged PT_NOTE program header. */
106972658e9dSVivek Goyal 	phdr.p_type    = PT_NOTE;
107072658e9dSVivek Goyal 	phdr.p_flags   = 0;
107172658e9dSVivek Goyal 	note_off = sizeof(Elf32_Ehdr) +
107272658e9dSVivek Goyal 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1073087350c9SHATAYAMA Daisuke 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
107472658e9dSVivek Goyal 	phdr.p_vaddr   = phdr.p_paddr = 0;
107572658e9dSVivek Goyal 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
107660592fb6SFangrui Song 	phdr.p_align   = 4;
107772658e9dSVivek Goyal 
107872658e9dSVivek Goyal 	/* Add merged PT_NOTE program header*/
107972658e9dSVivek Goyal 	tmp = elfptr + sizeof(Elf32_Ehdr);
108072658e9dSVivek Goyal 	memcpy(tmp, &phdr, sizeof(phdr));
108172658e9dSVivek Goyal 	tmp += sizeof(phdr);
108272658e9dSVivek Goyal 
108372658e9dSVivek Goyal 	/* Remove unwanted PT_NOTE program headers. */
108472658e9dSVivek Goyal 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
108572658e9dSVivek Goyal 	*elfsz = *elfsz - i;
108672658e9dSVivek Goyal 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1087f2bdacddSHATAYAMA Daisuke 	memset(elfptr + *elfsz, 0, i);
1088f2bdacddSHATAYAMA Daisuke 	*elfsz = roundup(*elfsz, PAGE_SIZE);
108972658e9dSVivek Goyal 
109072658e9dSVivek Goyal 	/* Modify e_phnum to reflect merged headers. */
109172658e9dSVivek Goyal 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
109272658e9dSVivek Goyal 
10937efe48dfSRahul Lakkireddy 	/* Store the size of all notes.  We need this to update the note
10947efe48dfSRahul Lakkireddy 	 * header when the device dumps will be added.
10957efe48dfSRahul Lakkireddy 	 */
10967efe48dfSRahul Lakkireddy 	elfnotes_orig_sz = phdr.p_memsz;
10977efe48dfSRahul Lakkireddy 
109872658e9dSVivek Goyal 	return 0;
109972658e9dSVivek Goyal }
110072658e9dSVivek Goyal 
1101666bfddbSVivek Goyal /* Add memory chunks represented by program headers to vmcore list. Also update
1102666bfddbSVivek Goyal  * the new offset fields of exported program headers. */
process_ptload_program_headers_elf64(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1103666bfddbSVivek Goyal static int __init process_ptload_program_headers_elf64(char *elfptr,
1104666bfddbSVivek Goyal 						size_t elfsz,
1105087350c9SHATAYAMA Daisuke 						size_t elfnotes_sz,
1106666bfddbSVivek Goyal 						struct list_head *vc_list)
1107666bfddbSVivek Goyal {
1108666bfddbSVivek Goyal 	int i;
1109666bfddbSVivek Goyal 	Elf64_Ehdr *ehdr_ptr;
1110666bfddbSVivek Goyal 	Elf64_Phdr *phdr_ptr;
1111666bfddbSVivek Goyal 	loff_t vmcore_off;
1112666bfddbSVivek Goyal 	struct vmcore *new;
1113666bfddbSVivek Goyal 
1114666bfddbSVivek Goyal 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1115666bfddbSVivek Goyal 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1116666bfddbSVivek Goyal 
111770e79866SAlexey Dobriyan 	/* Skip ELF header, program headers and ELF note segment. */
1118087350c9SHATAYAMA Daisuke 	vmcore_off = elfsz + elfnotes_sz;
1119666bfddbSVivek Goyal 
1120666bfddbSVivek Goyal 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
11217f614cd1SHATAYAMA Daisuke 		u64 paddr, start, end, size;
11227f614cd1SHATAYAMA Daisuke 
1123666bfddbSVivek Goyal 		if (phdr_ptr->p_type != PT_LOAD)
1124666bfddbSVivek Goyal 			continue;
1125666bfddbSVivek Goyal 
11267f614cd1SHATAYAMA Daisuke 		paddr = phdr_ptr->p_offset;
11277f614cd1SHATAYAMA Daisuke 		start = rounddown(paddr, PAGE_SIZE);
11287f614cd1SHATAYAMA Daisuke 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
11297f614cd1SHATAYAMA Daisuke 		size = end - start;
11307f614cd1SHATAYAMA Daisuke 
1131666bfddbSVivek Goyal 		/* Add this contiguous chunk of memory to vmcore list.*/
1132666bfddbSVivek Goyal 		new = get_new_element();
1133666bfddbSVivek Goyal 		if (!new)
1134666bfddbSVivek Goyal 			return -ENOMEM;
11357f614cd1SHATAYAMA Daisuke 		new->paddr = start;
11367f614cd1SHATAYAMA Daisuke 		new->size = size;
1137666bfddbSVivek Goyal 		list_add_tail(&new->list, vc_list);
1138666bfddbSVivek Goyal 
1139666bfddbSVivek Goyal 		/* Update the program header offset. */
11407f614cd1SHATAYAMA Daisuke 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
11417f614cd1SHATAYAMA Daisuke 		vmcore_off = vmcore_off + size;
1142666bfddbSVivek Goyal 	}
1143666bfddbSVivek Goyal 	return 0;
1144666bfddbSVivek Goyal }
1145666bfddbSVivek Goyal 
process_ptload_program_headers_elf32(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)114672658e9dSVivek Goyal static int __init process_ptload_program_headers_elf32(char *elfptr,
114772658e9dSVivek Goyal 						size_t elfsz,
1148087350c9SHATAYAMA Daisuke 						size_t elfnotes_sz,
114972658e9dSVivek Goyal 						struct list_head *vc_list)
115072658e9dSVivek Goyal {
115172658e9dSVivek Goyal 	int i;
115272658e9dSVivek Goyal 	Elf32_Ehdr *ehdr_ptr;
115372658e9dSVivek Goyal 	Elf32_Phdr *phdr_ptr;
115472658e9dSVivek Goyal 	loff_t vmcore_off;
115572658e9dSVivek Goyal 	struct vmcore *new;
115672658e9dSVivek Goyal 
115772658e9dSVivek Goyal 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
115872658e9dSVivek Goyal 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
115972658e9dSVivek Goyal 
116070e79866SAlexey Dobriyan 	/* Skip ELF header, program headers and ELF note segment. */
1161087350c9SHATAYAMA Daisuke 	vmcore_off = elfsz + elfnotes_sz;
116272658e9dSVivek Goyal 
116372658e9dSVivek Goyal 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
11647f614cd1SHATAYAMA Daisuke 		u64 paddr, start, end, size;
11657f614cd1SHATAYAMA Daisuke 
116672658e9dSVivek Goyal 		if (phdr_ptr->p_type != PT_LOAD)
116772658e9dSVivek Goyal 			continue;
116872658e9dSVivek Goyal 
11697f614cd1SHATAYAMA Daisuke 		paddr = phdr_ptr->p_offset;
11707f614cd1SHATAYAMA Daisuke 		start = rounddown(paddr, PAGE_SIZE);
11717f614cd1SHATAYAMA Daisuke 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
11727f614cd1SHATAYAMA Daisuke 		size = end - start;
11737f614cd1SHATAYAMA Daisuke 
117472658e9dSVivek Goyal 		/* Add this contiguous chunk of memory to vmcore list.*/
117572658e9dSVivek Goyal 		new = get_new_element();
117672658e9dSVivek Goyal 		if (!new)
117772658e9dSVivek Goyal 			return -ENOMEM;
11787f614cd1SHATAYAMA Daisuke 		new->paddr = start;
11797f614cd1SHATAYAMA Daisuke 		new->size = size;
118072658e9dSVivek Goyal 		list_add_tail(&new->list, vc_list);
118172658e9dSVivek Goyal 
118272658e9dSVivek Goyal 		/* Update the program header offset */
11837f614cd1SHATAYAMA Daisuke 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
11847f614cd1SHATAYAMA Daisuke 		vmcore_off = vmcore_off + size;
118572658e9dSVivek Goyal 	}
118672658e9dSVivek Goyal 	return 0;
118772658e9dSVivek Goyal }
118872658e9dSVivek Goyal 
1189666bfddbSVivek Goyal /* Sets offset fields of vmcore elements. */
set_vmcore_list_offsets(size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)11907efe48dfSRahul Lakkireddy static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1191666bfddbSVivek Goyal 				    struct list_head *vc_list)
1192666bfddbSVivek Goyal {
1193666bfddbSVivek Goyal 	loff_t vmcore_off;
1194666bfddbSVivek Goyal 	struct vmcore *m;
1195666bfddbSVivek Goyal 
119670e79866SAlexey Dobriyan 	/* Skip ELF header, program headers and ELF note segment. */
1197087350c9SHATAYAMA Daisuke 	vmcore_off = elfsz + elfnotes_sz;
1198666bfddbSVivek Goyal 
1199666bfddbSVivek Goyal 	list_for_each_entry(m, vc_list, list) {
1200666bfddbSVivek Goyal 		m->offset = vmcore_off;
1201666bfddbSVivek Goyal 		vmcore_off += m->size;
1202666bfddbSVivek Goyal 	}
1203666bfddbSVivek Goyal }
1204666bfddbSVivek Goyal 
free_elfcorebuf(void)1205f2bdacddSHATAYAMA Daisuke static void free_elfcorebuf(void)
120672658e9dSVivek Goyal {
1207f2bdacddSHATAYAMA Daisuke 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1208f2bdacddSHATAYAMA Daisuke 	elfcorebuf = NULL;
1209087350c9SHATAYAMA Daisuke 	vfree(elfnotes_buf);
1210087350c9SHATAYAMA Daisuke 	elfnotes_buf = NULL;
121172658e9dSVivek Goyal }
121272658e9dSVivek Goyal 
parse_crash_elf64_headers(void)1213666bfddbSVivek Goyal static int __init parse_crash_elf64_headers(void)
1214666bfddbSVivek Goyal {
1215666bfddbSVivek Goyal 	int rc=0;
1216666bfddbSVivek Goyal 	Elf64_Ehdr ehdr;
1217666bfddbSVivek Goyal 	u64 addr;
1218666bfddbSVivek Goyal 
1219666bfddbSVivek Goyal 	addr = elfcorehdr_addr;
1220666bfddbSVivek Goyal 
122170e79866SAlexey Dobriyan 	/* Read ELF header */
1222be8a8d06SMichael Holzheu 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1223666bfddbSVivek Goyal 	if (rc < 0)
1224666bfddbSVivek Goyal 		return rc;
1225666bfddbSVivek Goyal 
1226666bfddbSVivek Goyal 	/* Do some basic Verification. */
1227666bfddbSVivek Goyal 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1228666bfddbSVivek Goyal 		(ehdr.e_type != ET_CORE) ||
12299833c394SMika Westerberg 		!vmcore_elf64_check_arch(&ehdr) ||
1230666bfddbSVivek Goyal 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1231666bfddbSVivek Goyal 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1232666bfddbSVivek Goyal 		ehdr.e_version != EV_CURRENT ||
1233666bfddbSVivek Goyal 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1234666bfddbSVivek Goyal 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1235666bfddbSVivek Goyal 		ehdr.e_phnum == 0) {
123687ebdc00SAndrew Morton 		pr_warn("Warning: Core image elf header is not sane\n");
1237666bfddbSVivek Goyal 		return -EINVAL;
1238666bfddbSVivek Goyal 	}
1239666bfddbSVivek Goyal 
1240666bfddbSVivek Goyal 	/* Read in all elf headers. */
1241f2bdacddSHATAYAMA Daisuke 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1242f2bdacddSHATAYAMA Daisuke 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1243f2bdacddSHATAYAMA Daisuke 	elfcorebuf_sz = elfcorebuf_sz_orig;
1244f2bdacddSHATAYAMA Daisuke 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1245f2bdacddSHATAYAMA Daisuke 					      get_order(elfcorebuf_sz_orig));
1246666bfddbSVivek Goyal 	if (!elfcorebuf)
1247666bfddbSVivek Goyal 		return -ENOMEM;
1248666bfddbSVivek Goyal 	addr = elfcorehdr_addr;
1249be8a8d06SMichael Holzheu 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1250f2bdacddSHATAYAMA Daisuke 	if (rc < 0)
1251f2bdacddSHATAYAMA Daisuke 		goto fail;
1252666bfddbSVivek Goyal 
1253666bfddbSVivek Goyal 	/* Merge all PT_NOTE headers into one. */
1254087350c9SHATAYAMA Daisuke 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1255087350c9SHATAYAMA Daisuke 				      &elfnotes_buf, &elfnotes_sz);
1256f2bdacddSHATAYAMA Daisuke 	if (rc)
1257f2bdacddSHATAYAMA Daisuke 		goto fail;
1258666bfddbSVivek Goyal 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1259087350c9SHATAYAMA Daisuke 						  elfnotes_sz, &vmcore_list);
1260f2bdacddSHATAYAMA Daisuke 	if (rc)
1261f2bdacddSHATAYAMA Daisuke 		goto fail;
1262087350c9SHATAYAMA Daisuke 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1263666bfddbSVivek Goyal 	return 0;
1264f2bdacddSHATAYAMA Daisuke fail:
1265f2bdacddSHATAYAMA Daisuke 	free_elfcorebuf();
1266f2bdacddSHATAYAMA Daisuke 	return rc;
1267666bfddbSVivek Goyal }
1268666bfddbSVivek Goyal 
parse_crash_elf32_headers(void)126972658e9dSVivek Goyal static int __init parse_crash_elf32_headers(void)
127072658e9dSVivek Goyal {
127172658e9dSVivek Goyal 	int rc=0;
127272658e9dSVivek Goyal 	Elf32_Ehdr ehdr;
127372658e9dSVivek Goyal 	u64 addr;
127472658e9dSVivek Goyal 
127572658e9dSVivek Goyal 	addr = elfcorehdr_addr;
127672658e9dSVivek Goyal 
127770e79866SAlexey Dobriyan 	/* Read ELF header */
1278be8a8d06SMichael Holzheu 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
127972658e9dSVivek Goyal 	if (rc < 0)
128072658e9dSVivek Goyal 		return rc;
128172658e9dSVivek Goyal 
128272658e9dSVivek Goyal 	/* Do some basic Verification. */
128372658e9dSVivek Goyal 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
128472658e9dSVivek Goyal 		(ehdr.e_type != ET_CORE) ||
1285e55d5312SDaniel Wagner 		!vmcore_elf32_check_arch(&ehdr) ||
128672658e9dSVivek Goyal 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
128772658e9dSVivek Goyal 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
128872658e9dSVivek Goyal 		ehdr.e_version != EV_CURRENT ||
128972658e9dSVivek Goyal 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
129072658e9dSVivek Goyal 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
129172658e9dSVivek Goyal 		ehdr.e_phnum == 0) {
129287ebdc00SAndrew Morton 		pr_warn("Warning: Core image elf header is not sane\n");
129372658e9dSVivek Goyal 		return -EINVAL;
129472658e9dSVivek Goyal 	}
129572658e9dSVivek Goyal 
129672658e9dSVivek Goyal 	/* Read in all elf headers. */
1297f2bdacddSHATAYAMA Daisuke 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1298f2bdacddSHATAYAMA Daisuke 	elfcorebuf_sz = elfcorebuf_sz_orig;
1299f2bdacddSHATAYAMA Daisuke 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1300f2bdacddSHATAYAMA Daisuke 					      get_order(elfcorebuf_sz_orig));
130172658e9dSVivek Goyal 	if (!elfcorebuf)
130272658e9dSVivek Goyal 		return -ENOMEM;
130372658e9dSVivek Goyal 	addr = elfcorehdr_addr;
1304be8a8d06SMichael Holzheu 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1305f2bdacddSHATAYAMA Daisuke 	if (rc < 0)
1306f2bdacddSHATAYAMA Daisuke 		goto fail;
130772658e9dSVivek Goyal 
130872658e9dSVivek Goyal 	/* Merge all PT_NOTE headers into one. */
1309087350c9SHATAYAMA Daisuke 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1310087350c9SHATAYAMA Daisuke 				      &elfnotes_buf, &elfnotes_sz);
1311f2bdacddSHATAYAMA Daisuke 	if (rc)
1312f2bdacddSHATAYAMA Daisuke 		goto fail;
131372658e9dSVivek Goyal 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1314087350c9SHATAYAMA Daisuke 						  elfnotes_sz, &vmcore_list);
1315f2bdacddSHATAYAMA Daisuke 	if (rc)
1316f2bdacddSHATAYAMA Daisuke 		goto fail;
1317087350c9SHATAYAMA Daisuke 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
131872658e9dSVivek Goyal 	return 0;
1319f2bdacddSHATAYAMA Daisuke fail:
1320f2bdacddSHATAYAMA Daisuke 	free_elfcorebuf();
1321f2bdacddSHATAYAMA Daisuke 	return rc;
132272658e9dSVivek Goyal }
132372658e9dSVivek Goyal 
parse_crash_elf_headers(void)1324666bfddbSVivek Goyal static int __init parse_crash_elf_headers(void)
1325666bfddbSVivek Goyal {
1326666bfddbSVivek Goyal 	unsigned char e_ident[EI_NIDENT];
1327666bfddbSVivek Goyal 	u64 addr;
1328666bfddbSVivek Goyal 	int rc=0;
1329666bfddbSVivek Goyal 
1330666bfddbSVivek Goyal 	addr = elfcorehdr_addr;
1331be8a8d06SMichael Holzheu 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1332666bfddbSVivek Goyal 	if (rc < 0)
1333666bfddbSVivek Goyal 		return rc;
1334666bfddbSVivek Goyal 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
133587ebdc00SAndrew Morton 		pr_warn("Warning: Core image elf header not found\n");
1336666bfddbSVivek Goyal 		return -EINVAL;
1337666bfddbSVivek Goyal 	}
1338666bfddbSVivek Goyal 
1339666bfddbSVivek Goyal 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1340666bfddbSVivek Goyal 		rc = parse_crash_elf64_headers();
1341666bfddbSVivek Goyal 		if (rc)
1342666bfddbSVivek Goyal 			return rc;
134372658e9dSVivek Goyal 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
134472658e9dSVivek Goyal 		rc = parse_crash_elf32_headers();
134572658e9dSVivek Goyal 		if (rc)
134672658e9dSVivek Goyal 			return rc;
1347666bfddbSVivek Goyal 	} else {
134887ebdc00SAndrew Morton 		pr_warn("Warning: Core image elf header is not sane\n");
1349666bfddbSVivek Goyal 		return -EINVAL;
1350666bfddbSVivek Goyal 	}
1351591ff716SHATAYAMA Daisuke 
1352591ff716SHATAYAMA Daisuke 	/* Determine vmcore size. */
1353591ff716SHATAYAMA Daisuke 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1354591ff716SHATAYAMA Daisuke 				      &vmcore_list);
1355591ff716SHATAYAMA Daisuke 
1356666bfddbSVivek Goyal 	return 0;
1357666bfddbSVivek Goyal }
1358666bfddbSVivek Goyal 
13592724273eSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
13602724273eSRahul Lakkireddy /**
13612724273eSRahul Lakkireddy  * vmcoredd_write_header - Write vmcore device dump header at the
13622724273eSRahul Lakkireddy  * beginning of the dump's buffer.
13632724273eSRahul Lakkireddy  * @buf: Output buffer where the note is written
13642724273eSRahul Lakkireddy  * @data: Dump info
13652724273eSRahul Lakkireddy  * @size: Size of the dump
13662724273eSRahul Lakkireddy  *
13672724273eSRahul Lakkireddy  * Fills beginning of the dump's buffer with vmcore device dump header.
13682724273eSRahul Lakkireddy  */
vmcoredd_write_header(void * buf,struct vmcoredd_data * data,u32 size)13692724273eSRahul Lakkireddy static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
13702724273eSRahul Lakkireddy 				  u32 size)
13712724273eSRahul Lakkireddy {
13722724273eSRahul Lakkireddy 	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
13732724273eSRahul Lakkireddy 
13742724273eSRahul Lakkireddy 	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
13752724273eSRahul Lakkireddy 	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
13762724273eSRahul Lakkireddy 	vdd_hdr->n_type = NT_VMCOREDD;
13772724273eSRahul Lakkireddy 
13782724273eSRahul Lakkireddy 	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
13792724273eSRahul Lakkireddy 		sizeof(vdd_hdr->name));
13802724273eSRahul Lakkireddy 	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
13812724273eSRahul Lakkireddy }
13822724273eSRahul Lakkireddy 
13832724273eSRahul Lakkireddy /**
138470e79866SAlexey Dobriyan  * vmcoredd_update_program_headers - Update all ELF program headers
13857efe48dfSRahul Lakkireddy  * @elfptr: Pointer to elf header
13867efe48dfSRahul Lakkireddy  * @elfnotesz: Size of elf notes aligned to page size
13877efe48dfSRahul Lakkireddy  * @vmcoreddsz: Size of device dumps to be added to elf note header
13887efe48dfSRahul Lakkireddy  *
138970e79866SAlexey Dobriyan  * Determine type of ELF header (Elf64 or Elf32) and update the elf note size.
13907efe48dfSRahul Lakkireddy  * Also update the offsets of all the program headers after the elf note header.
13917efe48dfSRahul Lakkireddy  */
vmcoredd_update_program_headers(char * elfptr,size_t elfnotesz,size_t vmcoreddsz)13927efe48dfSRahul Lakkireddy static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
13937efe48dfSRahul Lakkireddy 					    size_t vmcoreddsz)
13947efe48dfSRahul Lakkireddy {
13957efe48dfSRahul Lakkireddy 	unsigned char *e_ident = (unsigned char *)elfptr;
13967efe48dfSRahul Lakkireddy 	u64 start, end, size;
13977efe48dfSRahul Lakkireddy 	loff_t vmcore_off;
13987efe48dfSRahul Lakkireddy 	u32 i;
13997efe48dfSRahul Lakkireddy 
14007efe48dfSRahul Lakkireddy 	vmcore_off = elfcorebuf_sz + elfnotesz;
14017efe48dfSRahul Lakkireddy 
14027efe48dfSRahul Lakkireddy 	if (e_ident[EI_CLASS] == ELFCLASS64) {
14037efe48dfSRahul Lakkireddy 		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
14047efe48dfSRahul Lakkireddy 		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
14057efe48dfSRahul Lakkireddy 
14067efe48dfSRahul Lakkireddy 		/* Update all program headers */
14077efe48dfSRahul Lakkireddy 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
14087efe48dfSRahul Lakkireddy 			if (phdr->p_type == PT_NOTE) {
14097efe48dfSRahul Lakkireddy 				/* Update note size */
14107efe48dfSRahul Lakkireddy 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
14117efe48dfSRahul Lakkireddy 				phdr->p_filesz = phdr->p_memsz;
14127efe48dfSRahul Lakkireddy 				continue;
14137efe48dfSRahul Lakkireddy 			}
14147efe48dfSRahul Lakkireddy 
14157efe48dfSRahul Lakkireddy 			start = rounddown(phdr->p_offset, PAGE_SIZE);
14167efe48dfSRahul Lakkireddy 			end = roundup(phdr->p_offset + phdr->p_memsz,
14177efe48dfSRahul Lakkireddy 				      PAGE_SIZE);
14187efe48dfSRahul Lakkireddy 			size = end - start;
14197efe48dfSRahul Lakkireddy 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
14207efe48dfSRahul Lakkireddy 			vmcore_off += size;
14217efe48dfSRahul Lakkireddy 		}
14227efe48dfSRahul Lakkireddy 	} else {
14237efe48dfSRahul Lakkireddy 		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
14247efe48dfSRahul Lakkireddy 		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
14257efe48dfSRahul Lakkireddy 
14267efe48dfSRahul Lakkireddy 		/* Update all program headers */
14277efe48dfSRahul Lakkireddy 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
14287efe48dfSRahul Lakkireddy 			if (phdr->p_type == PT_NOTE) {
14297efe48dfSRahul Lakkireddy 				/* Update note size */
14307efe48dfSRahul Lakkireddy 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
14317efe48dfSRahul Lakkireddy 				phdr->p_filesz = phdr->p_memsz;
14327efe48dfSRahul Lakkireddy 				continue;
14337efe48dfSRahul Lakkireddy 			}
14347efe48dfSRahul Lakkireddy 
14357efe48dfSRahul Lakkireddy 			start = rounddown(phdr->p_offset, PAGE_SIZE);
14367efe48dfSRahul Lakkireddy 			end = roundup(phdr->p_offset + phdr->p_memsz,
14377efe48dfSRahul Lakkireddy 				      PAGE_SIZE);
14387efe48dfSRahul Lakkireddy 			size = end - start;
14397efe48dfSRahul Lakkireddy 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
14407efe48dfSRahul Lakkireddy 			vmcore_off += size;
14417efe48dfSRahul Lakkireddy 		}
14427efe48dfSRahul Lakkireddy 	}
14437efe48dfSRahul Lakkireddy }
14447efe48dfSRahul Lakkireddy 
14457efe48dfSRahul Lakkireddy /**
14467efe48dfSRahul Lakkireddy  * vmcoredd_update_size - Update the total size of the device dumps and update
144770e79866SAlexey Dobriyan  * ELF header
14487efe48dfSRahul Lakkireddy  * @dump_size: Size of the current device dump to be added to total size
14497efe48dfSRahul Lakkireddy  *
145070e79866SAlexey Dobriyan  * Update the total size of all the device dumps and update the ELF program
14517efe48dfSRahul Lakkireddy  * headers. Calculate the new offsets for the vmcore list and update the
14527efe48dfSRahul Lakkireddy  * total vmcore size.
14537efe48dfSRahul Lakkireddy  */
vmcoredd_update_size(size_t dump_size)14547efe48dfSRahul Lakkireddy static void vmcoredd_update_size(size_t dump_size)
14557efe48dfSRahul Lakkireddy {
14567efe48dfSRahul Lakkireddy 	vmcoredd_orig_sz += dump_size;
14577efe48dfSRahul Lakkireddy 	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
14587efe48dfSRahul Lakkireddy 	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
14597efe48dfSRahul Lakkireddy 					vmcoredd_orig_sz);
14607efe48dfSRahul Lakkireddy 
14617efe48dfSRahul Lakkireddy 	/* Update vmcore list offsets */
14627efe48dfSRahul Lakkireddy 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
14637efe48dfSRahul Lakkireddy 
14647efe48dfSRahul Lakkireddy 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
14657efe48dfSRahul Lakkireddy 				      &vmcore_list);
14667efe48dfSRahul Lakkireddy 	proc_vmcore->size = vmcore_size;
14677efe48dfSRahul Lakkireddy }
14687efe48dfSRahul Lakkireddy 
14697efe48dfSRahul Lakkireddy /**
14702724273eSRahul Lakkireddy  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
14712724273eSRahul Lakkireddy  * @data: dump info.
14722724273eSRahul Lakkireddy  *
14732724273eSRahul Lakkireddy  * Allocate a buffer and invoke the calling driver's dump collect routine.
147470e79866SAlexey Dobriyan  * Write ELF note at the beginning of the buffer to indicate vmcore device
14752724273eSRahul Lakkireddy  * dump and add the dump to global list.
14762724273eSRahul Lakkireddy  */
vmcore_add_device_dump(struct vmcoredd_data * data)14772724273eSRahul Lakkireddy int vmcore_add_device_dump(struct vmcoredd_data *data)
14782724273eSRahul Lakkireddy {
14792724273eSRahul Lakkireddy 	struct vmcoredd_node *dump;
14802724273eSRahul Lakkireddy 	void *buf = NULL;
14812724273eSRahul Lakkireddy 	size_t data_size;
14822724273eSRahul Lakkireddy 	int ret;
14832724273eSRahul Lakkireddy 
1484c6c40533SKairui Song 	if (vmcoredd_disabled) {
1485c6c40533SKairui Song 		pr_err_once("Device dump is disabled\n");
1486c6c40533SKairui Song 		return -EINVAL;
1487c6c40533SKairui Song 	}
1488c6c40533SKairui Song 
14892724273eSRahul Lakkireddy 	if (!data || !strlen(data->dump_name) ||
14902724273eSRahul Lakkireddy 	    !data->vmcoredd_callback || !data->size)
14912724273eSRahul Lakkireddy 		return -EINVAL;
14922724273eSRahul Lakkireddy 
14932724273eSRahul Lakkireddy 	dump = vzalloc(sizeof(*dump));
14942724273eSRahul Lakkireddy 	if (!dump) {
14952724273eSRahul Lakkireddy 		ret = -ENOMEM;
14962724273eSRahul Lakkireddy 		goto out_err;
14972724273eSRahul Lakkireddy 	}
14982724273eSRahul Lakkireddy 
14992724273eSRahul Lakkireddy 	/* Keep size of the buffer page aligned so that it can be mmaped */
15002724273eSRahul Lakkireddy 	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
15012724273eSRahul Lakkireddy 			    PAGE_SIZE);
15022724273eSRahul Lakkireddy 
15032724273eSRahul Lakkireddy 	/* Allocate buffer for driver's to write their dumps */
15042724273eSRahul Lakkireddy 	buf = vmcore_alloc_buf(data_size);
15052724273eSRahul Lakkireddy 	if (!buf) {
15062724273eSRahul Lakkireddy 		ret = -ENOMEM;
15072724273eSRahul Lakkireddy 		goto out_err;
15082724273eSRahul Lakkireddy 	}
15092724273eSRahul Lakkireddy 
15102724273eSRahul Lakkireddy 	vmcoredd_write_header(buf, data, data_size -
15112724273eSRahul Lakkireddy 			      sizeof(struct vmcoredd_header));
15122724273eSRahul Lakkireddy 
15132724273eSRahul Lakkireddy 	/* Invoke the driver's dump collection routing */
15142724273eSRahul Lakkireddy 	ret = data->vmcoredd_callback(data, buf +
15152724273eSRahul Lakkireddy 				      sizeof(struct vmcoredd_header));
15162724273eSRahul Lakkireddy 	if (ret)
15172724273eSRahul Lakkireddy 		goto out_err;
15182724273eSRahul Lakkireddy 
15192724273eSRahul Lakkireddy 	dump->buf = buf;
15202724273eSRahul Lakkireddy 	dump->size = data_size;
15212724273eSRahul Lakkireddy 
15222724273eSRahul Lakkireddy 	/* Add the dump to driver sysfs list */
15232724273eSRahul Lakkireddy 	mutex_lock(&vmcoredd_mutex);
15242724273eSRahul Lakkireddy 	list_add_tail(&dump->list, &vmcoredd_list);
15252724273eSRahul Lakkireddy 	mutex_unlock(&vmcoredd_mutex);
15262724273eSRahul Lakkireddy 
15277efe48dfSRahul Lakkireddy 	vmcoredd_update_size(data_size);
15282724273eSRahul Lakkireddy 	return 0;
15292724273eSRahul Lakkireddy 
15302724273eSRahul Lakkireddy out_err:
15312724273eSRahul Lakkireddy 	vfree(buf);
15322724273eSRahul Lakkireddy 	vfree(dump);
15332724273eSRahul Lakkireddy 
15342724273eSRahul Lakkireddy 	return ret;
15352724273eSRahul Lakkireddy }
15362724273eSRahul Lakkireddy EXPORT_SYMBOL(vmcore_add_device_dump);
15372724273eSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
15382724273eSRahul Lakkireddy 
15392724273eSRahul Lakkireddy /* Free all dumps in vmcore device dump list */
vmcore_free_device_dumps(void)15402724273eSRahul Lakkireddy static void vmcore_free_device_dumps(void)
15412724273eSRahul Lakkireddy {
15422724273eSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
15432724273eSRahul Lakkireddy 	mutex_lock(&vmcoredd_mutex);
15442724273eSRahul Lakkireddy 	while (!list_empty(&vmcoredd_list)) {
15452724273eSRahul Lakkireddy 		struct vmcoredd_node *dump;
15462724273eSRahul Lakkireddy 
15472724273eSRahul Lakkireddy 		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
15482724273eSRahul Lakkireddy 					list);
15492724273eSRahul Lakkireddy 		list_del(&dump->list);
15502724273eSRahul Lakkireddy 		vfree(dump->buf);
15512724273eSRahul Lakkireddy 		vfree(dump);
15522724273eSRahul Lakkireddy 	}
15532724273eSRahul Lakkireddy 	mutex_unlock(&vmcoredd_mutex);
15542724273eSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
15552724273eSRahul Lakkireddy }
15562724273eSRahul Lakkireddy 
1557666bfddbSVivek Goyal /* Init function for vmcore module. */
vmcore_init(void)1558666bfddbSVivek Goyal static int __init vmcore_init(void)
1559666bfddbSVivek Goyal {
1560666bfddbSVivek Goyal 	int rc = 0;
1561666bfddbSVivek Goyal 
1562be8a8d06SMichael Holzheu 	/* Allow architectures to allocate ELF header in 2nd kernel */
1563be8a8d06SMichael Holzheu 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1564be8a8d06SMichael Holzheu 	if (rc)
1565be8a8d06SMichael Holzheu 		return rc;
1566be8a8d06SMichael Holzheu 	/*
1567be8a8d06SMichael Holzheu 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1568be8a8d06SMichael Holzheu 	 * then capture the dump.
1569be8a8d06SMichael Holzheu 	 */
157085a0ee34SSimon Horman 	if (!(is_vmcore_usable()))
1571666bfddbSVivek Goyal 		return rc;
1572666bfddbSVivek Goyal 	rc = parse_crash_elf_headers();
1573666bfddbSVivek Goyal 	if (rc) {
157412b9d301SJianglei Nie 		elfcorehdr_free(elfcorehdr_addr);
157587ebdc00SAndrew Morton 		pr_warn("Kdump: vmcore not initialized\n");
1576666bfddbSVivek Goyal 		return rc;
1577666bfddbSVivek Goyal 	}
1578be8a8d06SMichael Holzheu 	elfcorehdr_free(elfcorehdr_addr);
1579be8a8d06SMichael Holzheu 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1580666bfddbSVivek Goyal 
158197a32539SAlexey Dobriyan 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1582666bfddbSVivek Goyal 	if (proc_vmcore)
1583666bfddbSVivek Goyal 		proc_vmcore->size = vmcore_size;
1584666bfddbSVivek Goyal 	return 0;
1585666bfddbSVivek Goyal }
1586abaf3787SPaul Gortmaker fs_initcall(vmcore_init);
158716257393SMahesh Salgaonkar 
158816257393SMahesh Salgaonkar /* Cleanup function for vmcore module. */
vmcore_cleanup(void)158916257393SMahesh Salgaonkar void vmcore_cleanup(void)
159016257393SMahesh Salgaonkar {
159116257393SMahesh Salgaonkar 	if (proc_vmcore) {
1592a8ca16eaSDavid Howells 		proc_remove(proc_vmcore);
159316257393SMahesh Salgaonkar 		proc_vmcore = NULL;
159416257393SMahesh Salgaonkar 	}
159516257393SMahesh Salgaonkar 
159616257393SMahesh Salgaonkar 	/* clear the vmcore list. */
1597593bc695SAlexey Dobriyan 	while (!list_empty(&vmcore_list)) {
159816257393SMahesh Salgaonkar 		struct vmcore *m;
159916257393SMahesh Salgaonkar 
1600593bc695SAlexey Dobriyan 		m = list_first_entry(&vmcore_list, struct vmcore, list);
160116257393SMahesh Salgaonkar 		list_del(&m->list);
160216257393SMahesh Salgaonkar 		kfree(m);
160316257393SMahesh Salgaonkar 	}
1604f2bdacddSHATAYAMA Daisuke 	free_elfcorebuf();
16052724273eSRahul Lakkireddy 
16062724273eSRahul Lakkireddy 	/* clear vmcore device dump list */
16072724273eSRahul Lakkireddy 	vmcore_free_device_dumps();
160816257393SMahesh Salgaonkar }
1609