1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 4 * <benh@kernel.crashing.org> 5 * Copyright (C) 2012 ARM Limited 6 * Copyright (C) 2015 Regents of the University of California 7 */ 8 9 #include <linux/elf.h> 10 #include <linux/mm.h> 11 #include <linux/slab.h> 12 #include <linux/binfmts.h> 13 #include <linux/err.h> 14 #include <asm/page.h> 15 #ifdef CONFIG_GENERIC_TIME_VSYSCALL 16 #include <vdso/datapage.h> 17 #else 18 #include <asm/vdso.h> 19 #endif 20 21 extern char vdso_start[], vdso_end[]; 22 23 static unsigned int vdso_pages; 24 static struct page **vdso_pagelist; 25 26 /* 27 * The vDSO data page. 28 */ 29 static union { 30 struct vdso_data data; 31 u8 page[PAGE_SIZE]; 32 } vdso_data_store __page_aligned_data; 33 struct vdso_data *vdso_data = &vdso_data_store.data; 34 35 static int __init vdso_init(void) 36 { 37 unsigned int i; 38 39 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; 40 vdso_pagelist = 41 kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); 42 if (unlikely(vdso_pagelist == NULL)) { 43 pr_err("vdso: pagelist allocation failed\n"); 44 return -ENOMEM; 45 } 46 47 for (i = 0; i < vdso_pages; i++) { 48 struct page *pg; 49 50 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); 51 vdso_pagelist[i] = pg; 52 } 53 vdso_pagelist[i] = virt_to_page(vdso_data); 54 55 return 0; 56 } 57 arch_initcall(vdso_init); 58 59 int arch_setup_additional_pages(struct linux_binprm *bprm, 60 int uses_interp) 61 { 62 struct mm_struct *mm = current->mm; 63 unsigned long vdso_base, vdso_len; 64 int ret; 65 66 vdso_len = (vdso_pages + 1) << PAGE_SHIFT; 67 68 mmap_write_lock(mm); 69 vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); 70 if (IS_ERR_VALUE(vdso_base)) { 71 ret = vdso_base; 72 goto end; 73 } 74 75 /* 76 * Put vDSO base into mm struct. We need to do this before calling 77 * install_special_mapping or the perf counter mmap tracking code 78 * will fail to recognise it as a vDSO (since arch_vma_name fails). 79 */ 80 mm->context.vdso = (void *)vdso_base; 81 82 ret = 83 install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 84 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), 85 vdso_pagelist); 86 87 if (unlikely(ret)) { 88 mm->context.vdso = NULL; 89 goto end; 90 } 91 92 vdso_base += (vdso_pages << PAGE_SHIFT); 93 ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, 94 (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); 95 96 if (unlikely(ret)) 97 mm->context.vdso = NULL; 98 end: 99 mmap_write_unlock(mm); 100 return ret; 101 } 102 103 const char *arch_vma_name(struct vm_area_struct *vma) 104 { 105 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) 106 return "[vdso]"; 107 if (vma->vm_mm && (vma->vm_start == 108 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) 109 return "[vdso_data]"; 110 return NULL; 111 } 112