1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
13 #include <linux/crash_core.h>
14 #include <linux/mm.h>
15 #include <linux/proc_fs.h>
16 #include <linux/kcore.h>
17 #include <linux/user.h>
18 #include <linux/capability.h>
19 #include <linux/elf.h>
20 #include <linux/elfcore.h>
21 #include <linux/vmalloc.h>
22 #include <linux/highmem.h>
23 #include <linux/printk.h>
24 #include <linux/memblock.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/uio.h>
28 #include <asm/io.h>
29 #include <linux/list.h>
30 #include <linux/ioport.h>
31 #include <linux/memory.h>
32 #include <linux/sched/task.h>
33 #include <linux/security.h>
34 #include <asm/sections.h>
35 #include "internal.h"
36
37 #define CORE_STR "CORE"
38
39 #ifndef ELF_CORE_EFLAGS
40 #define ELF_CORE_EFLAGS 0
41 #endif
42
43 static struct proc_dir_entry *proc_root_kcore;
44
45
46 #ifndef kc_vaddr_to_offset
47 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
48 #endif
49 #ifndef kc_offset_to_vaddr
50 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
51 #endif
52
53 #ifndef kc_xlate_dev_mem_ptr
54 #define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
kc_xlate_dev_mem_ptr(phys_addr_t phys)55 static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
56 {
57 return __va(phys);
58 }
59 #endif
60 #ifndef kc_unxlate_dev_mem_ptr
61 #define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
kc_unxlate_dev_mem_ptr(phys_addr_t phys,void * virt)62 static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
63 {
64 }
65 #endif
66
67 static LIST_HEAD(kclist_head);
68 static DECLARE_RWSEM(kclist_lock);
69 static int kcore_need_update = 1;
70
71 /*
72 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
73 * Same as oldmem_pfn_is_ram in vmcore
74 */
75 static int (*mem_pfn_is_ram)(unsigned long pfn);
76
register_mem_pfn_is_ram(int (* fn)(unsigned long pfn))77 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
78 {
79 if (mem_pfn_is_ram)
80 return -EBUSY;
81 mem_pfn_is_ram = fn;
82 return 0;
83 }
84
pfn_is_ram(unsigned long pfn)85 static int pfn_is_ram(unsigned long pfn)
86 {
87 if (mem_pfn_is_ram)
88 return mem_pfn_is_ram(pfn);
89 else
90 return 1;
91 }
92
93 /* This doesn't grab kclist_lock, so it should only be used at init time. */
kclist_add(struct kcore_list * new,void * addr,size_t size,int type)94 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
95 int type)
96 {
97 new->addr = (unsigned long)addr;
98 new->size = size;
99 new->type = type;
100
101 list_add_tail(&new->list, &kclist_head);
102 }
103
get_kcore_size(int * nphdr,size_t * phdrs_len,size_t * notes_len,size_t * data_offset)104 static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
105 size_t *data_offset)
106 {
107 size_t try, size;
108 struct kcore_list *m;
109
110 *nphdr = 1; /* PT_NOTE */
111 size = 0;
112
113 list_for_each_entry(m, &kclist_head, list) {
114 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
115 if (try > size)
116 size = try;
117 *nphdr = *nphdr + 1;
118 }
119
120 *phdrs_len = *nphdr * sizeof(struct elf_phdr);
121 *notes_len = (4 * sizeof(struct elf_note) +
122 3 * ALIGN(sizeof(CORE_STR), 4) +
123 VMCOREINFO_NOTE_NAME_BYTES +
124 ALIGN(sizeof(struct elf_prstatus), 4) +
125 ALIGN(sizeof(struct elf_prpsinfo), 4) +
126 ALIGN(arch_task_struct_size, 4) +
127 ALIGN(vmcoreinfo_size, 4));
128 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
129 *notes_len);
130 return *data_offset + size;
131 }
132
133 #ifdef CONFIG_HIGHMEM
134 /*
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
138 */
kcore_ram_list(struct list_head * head)139 static int kcore_ram_list(struct list_head *head)
140 {
141 struct kcore_list *ent;
142
143 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
144 if (!ent)
145 return -ENOMEM;
146 ent->addr = (unsigned long)__va(0);
147 ent->size = max_low_pfn << PAGE_SHIFT;
148 ent->type = KCORE_RAM;
149 list_add(&ent->list, head);
150 return 0;
151 }
152
153 #else /* !CONFIG_HIGHMEM */
154
155 #ifdef CONFIG_SPARSEMEM_VMEMMAP
156 /* calculate vmemmap's address from given system ram pfn and register it */
157 static int
get_sparsemem_vmemmap_info(struct kcore_list * ent,struct list_head * head)158 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
159 {
160 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
161 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
162 unsigned long start, end;
163 struct kcore_list *vmm, *tmp;
164
165
166 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
167 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
168 end = PAGE_ALIGN(end);
169 /* overlap check (because we have to align page */
170 list_for_each_entry(tmp, head, list) {
171 if (tmp->type != KCORE_VMEMMAP)
172 continue;
173 if (start < tmp->addr + tmp->size)
174 if (end > tmp->addr)
175 end = tmp->addr;
176 }
177 if (start < end) {
178 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
179 if (!vmm)
180 return 0;
181 vmm->addr = start;
182 vmm->size = end - start;
183 vmm->type = KCORE_VMEMMAP;
184 list_add_tail(&vmm->list, head);
185 }
186 return 1;
187
188 }
189 #else
190 static int
get_sparsemem_vmemmap_info(struct kcore_list * ent,struct list_head * head)191 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
192 {
193 return 1;
194 }
195
196 #endif
197
198 static int
kclist_add_private(unsigned long pfn,unsigned long nr_pages,void * arg)199 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
200 {
201 struct list_head *head = (struct list_head *)arg;
202 struct kcore_list *ent;
203 struct page *p;
204
205 if (!pfn_valid(pfn))
206 return 1;
207
208 p = pfn_to_page(pfn);
209
210 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
211 if (!ent)
212 return -ENOMEM;
213 ent->addr = (unsigned long)page_to_virt(p);
214 ent->size = nr_pages << PAGE_SHIFT;
215
216 if (!virt_addr_valid((void *)ent->addr))
217 goto free_out;
218
219 /* cut not-mapped area. ....from ppc-32 code. */
220 if (ULONG_MAX - ent->addr < ent->size)
221 ent->size = ULONG_MAX - ent->addr;
222
223 /*
224 * We've already checked virt_addr_valid so we know this address
225 * is a valid pointer, therefore we can check against it to determine
226 * if we need to trim
227 */
228 if (VMALLOC_START > ent->addr) {
229 if (VMALLOC_START - ent->addr < ent->size)
230 ent->size = VMALLOC_START - ent->addr;
231 }
232
233 ent->type = KCORE_RAM;
234 list_add_tail(&ent->list, head);
235
236 if (!get_sparsemem_vmemmap_info(ent, head)) {
237 list_del(&ent->list);
238 goto free_out;
239 }
240
241 return 0;
242 free_out:
243 kfree(ent);
244 return 1;
245 }
246
kcore_ram_list(struct list_head * list)247 static int kcore_ram_list(struct list_head *list)
248 {
249 int nid, ret;
250 unsigned long end_pfn;
251
252 /* Not inialized....update now */
253 /* find out "max pfn" */
254 end_pfn = 0;
255 for_each_node_state(nid, N_MEMORY) {
256 unsigned long node_end;
257 node_end = node_end_pfn(nid);
258 if (end_pfn < node_end)
259 end_pfn = node_end;
260 }
261 /* scan 0 to max_pfn */
262 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
263 if (ret)
264 return -ENOMEM;
265 return 0;
266 }
267 #endif /* CONFIG_HIGHMEM */
268
kcore_update_ram(void)269 static int kcore_update_ram(void)
270 {
271 LIST_HEAD(list);
272 LIST_HEAD(garbage);
273 int nphdr;
274 size_t phdrs_len, notes_len, data_offset;
275 struct kcore_list *tmp, *pos;
276 int ret = 0;
277
278 down_write(&kclist_lock);
279 if (!xchg(&kcore_need_update, 0))
280 goto out;
281
282 ret = kcore_ram_list(&list);
283 if (ret) {
284 /* Couldn't get the RAM list, try again next time. */
285 WRITE_ONCE(kcore_need_update, 1);
286 list_splice_tail(&list, &garbage);
287 goto out;
288 }
289
290 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
291 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
292 list_move(&pos->list, &garbage);
293 }
294 list_splice_tail(&list, &kclist_head);
295
296 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len,
297 &data_offset);
298
299 out:
300 up_write(&kclist_lock);
301 list_for_each_entry_safe(pos, tmp, &garbage, list) {
302 list_del(&pos->list);
303 kfree(pos);
304 }
305 return ret;
306 }
307
append_kcore_note(char * notes,size_t * i,const char * name,unsigned int type,const void * desc,size_t descsz)308 static void append_kcore_note(char *notes, size_t *i, const char *name,
309 unsigned int type, const void *desc,
310 size_t descsz)
311 {
312 struct elf_note *note = (struct elf_note *)¬es[*i];
313
314 note->n_namesz = strlen(name) + 1;
315 note->n_descsz = descsz;
316 note->n_type = type;
317 *i += sizeof(*note);
318 memcpy(¬es[*i], name, note->n_namesz);
319 *i = ALIGN(*i + note->n_namesz, 4);
320 memcpy(¬es[*i], desc, descsz);
321 *i = ALIGN(*i + descsz, 4);
322 }
323
read_kcore_iter(struct kiocb * iocb,struct iov_iter * iter)324 static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
325 {
326 struct file *file = iocb->ki_filp;
327 char *buf = file->private_data;
328 loff_t *fpos = &iocb->ki_pos;
329 size_t phdrs_offset, notes_offset, data_offset;
330 size_t page_offline_frozen = 1;
331 size_t phdrs_len, notes_len;
332 struct kcore_list *m;
333 size_t tsz;
334 int nphdr;
335 unsigned long start;
336 size_t buflen = iov_iter_count(iter);
337 size_t orig_buflen = buflen;
338 int ret = 0;
339
340 down_read(&kclist_lock);
341 /*
342 * Don't race against drivers that set PageOffline() and expect no
343 * further page access.
344 */
345 page_offline_freeze();
346
347 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset);
348 phdrs_offset = sizeof(struct elfhdr);
349 notes_offset = phdrs_offset + phdrs_len;
350
351 /* ELF file header. */
352 if (buflen && *fpos < sizeof(struct elfhdr)) {
353 struct elfhdr ehdr = {
354 .e_ident = {
355 [EI_MAG0] = ELFMAG0,
356 [EI_MAG1] = ELFMAG1,
357 [EI_MAG2] = ELFMAG2,
358 [EI_MAG3] = ELFMAG3,
359 [EI_CLASS] = ELF_CLASS,
360 [EI_DATA] = ELF_DATA,
361 [EI_VERSION] = EV_CURRENT,
362 [EI_OSABI] = ELF_OSABI,
363 },
364 .e_type = ET_CORE,
365 .e_machine = ELF_ARCH,
366 .e_version = EV_CURRENT,
367 .e_phoff = sizeof(struct elfhdr),
368 .e_flags = ELF_CORE_EFLAGS,
369 .e_ehsize = sizeof(struct elfhdr),
370 .e_phentsize = sizeof(struct elf_phdr),
371 .e_phnum = nphdr,
372 };
373
374 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
375 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
376 ret = -EFAULT;
377 goto out;
378 }
379
380 buflen -= tsz;
381 *fpos += tsz;
382 }
383
384 /* ELF program headers. */
385 if (buflen && *fpos < phdrs_offset + phdrs_len) {
386 struct elf_phdr *phdrs, *phdr;
387
388 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
389 if (!phdrs) {
390 ret = -ENOMEM;
391 goto out;
392 }
393
394 phdrs[0].p_type = PT_NOTE;
395 phdrs[0].p_offset = notes_offset;
396 phdrs[0].p_filesz = notes_len;
397
398 phdr = &phdrs[1];
399 list_for_each_entry(m, &kclist_head, list) {
400 phdr->p_type = PT_LOAD;
401 phdr->p_flags = PF_R | PF_W | PF_X;
402 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
403 phdr->p_vaddr = (size_t)m->addr;
404 if (m->type == KCORE_RAM)
405 phdr->p_paddr = __pa(m->addr);
406 else if (m->type == KCORE_TEXT)
407 phdr->p_paddr = __pa_symbol(m->addr);
408 else
409 phdr->p_paddr = (elf_addr_t)-1;
410 phdr->p_filesz = phdr->p_memsz = m->size;
411 phdr->p_align = PAGE_SIZE;
412 phdr++;
413 }
414
415 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
416 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
417 iter) != tsz) {
418 kfree(phdrs);
419 ret = -EFAULT;
420 goto out;
421 }
422 kfree(phdrs);
423
424 buflen -= tsz;
425 *fpos += tsz;
426 }
427
428 /* ELF note segment. */
429 if (buflen && *fpos < notes_offset + notes_len) {
430 struct elf_prstatus prstatus = {};
431 struct elf_prpsinfo prpsinfo = {
432 .pr_sname = 'R',
433 .pr_fname = "vmlinux",
434 };
435 char *notes;
436 size_t i = 0;
437
438 strscpy(prpsinfo.pr_psargs, saved_command_line,
439 sizeof(prpsinfo.pr_psargs));
440
441 notes = kzalloc(notes_len, GFP_KERNEL);
442 if (!notes) {
443 ret = -ENOMEM;
444 goto out;
445 }
446
447 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
448 sizeof(prstatus));
449 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
450 sizeof(prpsinfo));
451 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
452 arch_task_struct_size);
453 /*
454 * vmcoreinfo_size is mostly constant after init time, but it
455 * can be changed by crash_save_vmcoreinfo(). Racing here with a
456 * panic on another CPU before the machine goes down is insanely
457 * unlikely, but it's better to not leave potential buffer
458 * overflows lying around, regardless.
459 */
460 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
461 vmcoreinfo_data,
462 min(vmcoreinfo_size, notes_len - i));
463
464 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
465 if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
466 kfree(notes);
467 ret = -EFAULT;
468 goto out;
469 }
470 kfree(notes);
471
472 buflen -= tsz;
473 *fpos += tsz;
474 }
475
476 /*
477 * Check to see if our file offset matches with any of
478 * the addresses in the elf_phdr on our list.
479 */
480 start = kc_offset_to_vaddr(*fpos - data_offset);
481 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
482 tsz = buflen;
483
484 m = NULL;
485 while (buflen) {
486 struct page *page;
487 unsigned long pfn;
488 phys_addr_t phys;
489 void *__start;
490
491 /*
492 * If this is the first iteration or the address is not within
493 * the previous entry, search for a matching entry.
494 */
495 if (!m || start < m->addr || start >= m->addr + m->size) {
496 struct kcore_list *iter;
497
498 m = NULL;
499 list_for_each_entry(iter, &kclist_head, list) {
500 if (start >= iter->addr &&
501 start < iter->addr + iter->size) {
502 m = iter;
503 break;
504 }
505 }
506 }
507
508 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
509 page_offline_thaw();
510 cond_resched();
511 page_offline_freeze();
512 }
513
514 if (!m) {
515 if (iov_iter_zero(tsz, iter) != tsz) {
516 ret = -EFAULT;
517 goto out;
518 }
519 goto skip;
520 }
521
522 switch (m->type) {
523 case KCORE_VMALLOC:
524 {
525 const char *src = (char *)start;
526 size_t read = 0, left = tsz;
527
528 /*
529 * vmalloc uses spinlocks, so we optimistically try to
530 * read memory. If this fails, fault pages in and try
531 * again until we are done.
532 */
533 while (true) {
534 read += vread_iter(iter, src, left);
535 if (read == tsz)
536 break;
537
538 src += read;
539 left -= read;
540
541 if (fault_in_iov_iter_writeable(iter, left)) {
542 ret = -EFAULT;
543 goto out;
544 }
545 }
546 break;
547 }
548 case KCORE_USER:
549 /* User page is handled prior to normal kernel page: */
550 if (copy_to_iter((char *)start, tsz, iter) != tsz) {
551 ret = -EFAULT;
552 goto out;
553 }
554 break;
555 case KCORE_RAM:
556 phys = __pa(start);
557 pfn = phys >> PAGE_SHIFT;
558 page = pfn_to_online_page(pfn);
559
560 /*
561 * Don't read offline sections, logically offline pages
562 * (e.g., inflated in a balloon), hwpoisoned pages,
563 * and explicitly excluded physical ranges.
564 */
565 if (!page || PageOffline(page) ||
566 is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
567 if (iov_iter_zero(tsz, iter) != tsz) {
568 ret = -EFAULT;
569 goto out;
570 }
571 break;
572 }
573 fallthrough;
574 case KCORE_VMEMMAP:
575 case KCORE_TEXT:
576 if (m->type == KCORE_RAM) {
577 __start = kc_xlate_dev_mem_ptr(phys);
578 if (!__start) {
579 ret = -ENOMEM;
580 if (iov_iter_zero(tsz, iter) != tsz)
581 ret = -EFAULT;
582 goto out;
583 }
584 } else {
585 __start = (void *)start;
586 }
587
588 /*
589 * Sadly we must use a bounce buffer here to be able to
590 * make use of copy_from_kernel_nofault(), as these
591 * memory regions might not always be mapped on all
592 * architectures.
593 */
594 ret = copy_from_kernel_nofault(buf, __start, tsz);
595 if (m->type == KCORE_RAM)
596 kc_unxlate_dev_mem_ptr(phys, __start);
597 if (ret) {
598 if (iov_iter_zero(tsz, iter) != tsz) {
599 ret = -EFAULT;
600 goto out;
601 }
602 /*
603 * We know the bounce buffer is safe to copy from, so
604 * use _copy_to_iter() directly.
605 */
606 } else if (_copy_to_iter(buf, tsz, iter) != tsz) {
607 ret = -EFAULT;
608 goto out;
609 }
610 break;
611 default:
612 pr_warn_once("Unhandled KCORE type: %d\n", m->type);
613 if (iov_iter_zero(tsz, iter) != tsz) {
614 ret = -EFAULT;
615 goto out;
616 }
617 }
618 skip:
619 buflen -= tsz;
620 *fpos += tsz;
621 start += tsz;
622 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
623 }
624
625 out:
626 page_offline_thaw();
627 up_read(&kclist_lock);
628 if (ret)
629 return ret;
630 return orig_buflen - buflen;
631 }
632
open_kcore(struct inode * inode,struct file * filp)633 static int open_kcore(struct inode *inode, struct file *filp)
634 {
635 int ret = security_locked_down(LOCKDOWN_KCORE);
636
637 if (!capable(CAP_SYS_RAWIO))
638 return -EPERM;
639
640 if (ret)
641 return ret;
642
643 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
644 if (!filp->private_data)
645 return -ENOMEM;
646
647 if (kcore_need_update)
648 kcore_update_ram();
649 if (i_size_read(inode) != proc_root_kcore->size) {
650 inode_lock(inode);
651 i_size_write(inode, proc_root_kcore->size);
652 inode_unlock(inode);
653 }
654 return 0;
655 }
656
release_kcore(struct inode * inode,struct file * file)657 static int release_kcore(struct inode *inode, struct file *file)
658 {
659 kfree(file->private_data);
660 return 0;
661 }
662
663 static const struct proc_ops kcore_proc_ops = {
664 .proc_read_iter = read_kcore_iter,
665 .proc_open = open_kcore,
666 .proc_release = release_kcore,
667 .proc_lseek = default_llseek,
668 };
669
670 /* just remember that we have to update kcore */
kcore_callback(struct notifier_block * self,unsigned long action,void * arg)671 static int __meminit kcore_callback(struct notifier_block *self,
672 unsigned long action, void *arg)
673 {
674 switch (action) {
675 case MEM_ONLINE:
676 case MEM_OFFLINE:
677 kcore_need_update = 1;
678 break;
679 }
680 return NOTIFY_OK;
681 }
682
683
684 static struct kcore_list kcore_vmalloc;
685
686 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
687 static struct kcore_list kcore_text;
688 /*
689 * If defined, special segment is used for mapping kernel text instead of
690 * direct-map area. We need to create special TEXT section.
691 */
proc_kcore_text_init(void)692 static void __init proc_kcore_text_init(void)
693 {
694 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
695 }
696 #else
proc_kcore_text_init(void)697 static void __init proc_kcore_text_init(void)
698 {
699 }
700 #endif
701
702 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
703 /*
704 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
705 */
706 static struct kcore_list kcore_modules;
add_modules_range(void)707 static void __init add_modules_range(void)
708 {
709 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
710 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
711 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
712 }
713 }
714 #else
add_modules_range(void)715 static void __init add_modules_range(void)
716 {
717 }
718 #endif
719
proc_kcore_init(void)720 static int __init proc_kcore_init(void)
721 {
722 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
723 if (!proc_root_kcore) {
724 pr_err("couldn't create /proc/kcore\n");
725 return 0; /* Always returns 0. */
726 }
727 /* Store text area if it's special */
728 proc_kcore_text_init();
729 /* Store vmalloc area */
730 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
731 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
732 add_modules_range();
733 /* Store direct-map area from physical memory map */
734 kcore_update_ram();
735 hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
736
737 return 0;
738 }
739 fs_initcall(proc_kcore_init);
740