1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 4 * <benh@kernel.crashing.org> 5 * Copyright (C) 2012 ARM Limited 6 * Copyright (C) 2015 Regents of the University of California 7 */ 8 9 #include <linux/elf.h> 10 #include <linux/mm.h> 11 #include <linux/slab.h> 12 #include <linux/binfmts.h> 13 #include <linux/err.h> 14 #include <asm/page.h> 15 #include <asm/vdso.h> 16 #include <linux/time_namespace.h> 17 18 #ifdef CONFIG_GENERIC_TIME_VSYSCALL 19 #include <vdso/datapage.h> 20 #else 21 struct vdso_data { 22 }; 23 #endif 24 25 extern char vdso_start[], vdso_end[]; 26 #ifdef CONFIG_COMPAT 27 extern char compat_vdso_start[], compat_vdso_end[]; 28 #endif 29 30 enum vvar_pages { 31 VVAR_DATA_PAGE_OFFSET, 32 VVAR_TIMENS_PAGE_OFFSET, 33 VVAR_NR_PAGES, 34 }; 35 36 enum rv_vdso_map { 37 RV_VDSO_MAP_VVAR, 38 RV_VDSO_MAP_VDSO, 39 }; 40 41 #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) 42 43 /* 44 * The vDSO data page. 45 */ 46 static union { 47 struct vdso_data data; 48 u8 page[PAGE_SIZE]; 49 } vdso_data_store __page_aligned_data; 50 struct vdso_data *vdso_data = &vdso_data_store.data; 51 52 struct __vdso_info { 53 const char *name; 54 const char *vdso_code_start; 55 const char *vdso_code_end; 56 unsigned long vdso_pages; 57 /* Data Mapping */ 58 struct vm_special_mapping *dm; 59 /* Code Mapping */ 60 struct vm_special_mapping *cm; 61 }; 62 63 static struct __vdso_info vdso_info; 64 #ifdef CONFIG_COMPAT 65 static struct __vdso_info compat_vdso_info; 66 #endif 67 68 static int vdso_mremap(const struct vm_special_mapping *sm, 69 struct vm_area_struct *new_vma) 70 { 71 current->mm->context.vdso = (void *)new_vma->vm_start; 72 73 return 0; 74 } 75 76 static void __init __vdso_init(struct __vdso_info *vdso_info) 77 { 78 unsigned int i; 79 struct page **vdso_pagelist; 80 unsigned long pfn; 81 82 if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4)) 83 panic("vDSO is not a valid ELF object!\n"); 84 85 vdso_info->vdso_pages = ( 86 vdso_info->vdso_code_end - 87 vdso_info->vdso_code_start) >> 88 PAGE_SHIFT; 89 90 vdso_pagelist = kcalloc(vdso_info->vdso_pages, 91 sizeof(struct page *), 92 GFP_KERNEL); 93 if (vdso_pagelist == NULL) 94 panic("vDSO kcalloc failed!\n"); 95 96 /* Grab the vDSO code pages. */ 97 pfn = sym_to_pfn(vdso_info->vdso_code_start); 98 99 for (i = 0; i < vdso_info->vdso_pages; i++) 100 vdso_pagelist[i] = pfn_to_page(pfn + i); 101 102 vdso_info->cm->pages = vdso_pagelist; 103 } 104 105 #ifdef CONFIG_TIME_NS 106 struct vdso_data *arch_get_vdso_data(void *vvar_page) 107 { 108 return (struct vdso_data *)(vvar_page); 109 } 110 111 /* 112 * The vvar mapping contains data for a specific time namespace, so when a task 113 * changes namespace we must unmap its vvar data for the old namespace. 114 * Subsequent faults will map in data for the new namespace. 115 * 116 * For more details see timens_setup_vdso_data(). 117 */ 118 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 119 { 120 struct mm_struct *mm = task->mm; 121 struct vm_area_struct *vma; 122 VMA_ITERATOR(vmi, mm, 0); 123 124 mmap_read_lock(mm); 125 126 for_each_vma(vmi, vma) { 127 if (vma_is_special_mapping(vma, vdso_info.dm)) 128 zap_vma_pages(vma); 129 #ifdef CONFIG_COMPAT 130 if (vma_is_special_mapping(vma, compat_vdso_info.dm)) 131 zap_vma_pages(vma); 132 #endif 133 } 134 135 mmap_read_unlock(mm); 136 return 0; 137 } 138 #endif 139 140 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 141 struct vm_area_struct *vma, struct vm_fault *vmf) 142 { 143 struct page *timens_page = find_timens_vvar_page(vma); 144 unsigned long pfn; 145 146 switch (vmf->pgoff) { 147 case VVAR_DATA_PAGE_OFFSET: 148 if (timens_page) 149 pfn = page_to_pfn(timens_page); 150 else 151 pfn = sym_to_pfn(vdso_data); 152 break; 153 #ifdef CONFIG_TIME_NS 154 case VVAR_TIMENS_PAGE_OFFSET: 155 /* 156 * If a task belongs to a time namespace then a namespace 157 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and 158 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET 159 * offset. 160 * See also the comment near timens_setup_vdso_data(). 161 */ 162 if (!timens_page) 163 return VM_FAULT_SIGBUS; 164 pfn = sym_to_pfn(vdso_data); 165 break; 166 #endif /* CONFIG_TIME_NS */ 167 default: 168 return VM_FAULT_SIGBUS; 169 } 170 171 return vmf_insert_pfn(vma, vmf->address, pfn); 172 } 173 174 static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = { 175 [RV_VDSO_MAP_VVAR] = { 176 .name = "[vvar]", 177 .fault = vvar_fault, 178 }, 179 [RV_VDSO_MAP_VDSO] = { 180 .name = "[vdso]", 181 .mremap = vdso_mremap, 182 }, 183 }; 184 185 static struct __vdso_info vdso_info __ro_after_init = { 186 .name = "vdso", 187 .vdso_code_start = vdso_start, 188 .vdso_code_end = vdso_end, 189 .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR], 190 .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO], 191 }; 192 193 #ifdef CONFIG_COMPAT 194 static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = { 195 [RV_VDSO_MAP_VVAR] = { 196 .name = "[vvar]", 197 .fault = vvar_fault, 198 }, 199 [RV_VDSO_MAP_VDSO] = { 200 .name = "[vdso]", 201 .mremap = vdso_mremap, 202 }, 203 }; 204 205 static struct __vdso_info compat_vdso_info __ro_after_init = { 206 .name = "compat_vdso", 207 .vdso_code_start = compat_vdso_start, 208 .vdso_code_end = compat_vdso_end, 209 .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR], 210 .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO], 211 }; 212 #endif 213 214 static int __init vdso_init(void) 215 { 216 __vdso_init(&vdso_info); 217 #ifdef CONFIG_COMPAT 218 __vdso_init(&compat_vdso_info); 219 #endif 220 221 return 0; 222 } 223 arch_initcall(vdso_init); 224 225 static int __setup_additional_pages(struct mm_struct *mm, 226 struct linux_binprm *bprm, 227 int uses_interp, 228 struct __vdso_info *vdso_info) 229 { 230 unsigned long vdso_base, vdso_text_len, vdso_mapping_len; 231 void *ret; 232 233 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); 234 235 vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT; 236 /* Be sure to map the data page */ 237 vdso_mapping_len = vdso_text_len + VVAR_SIZE; 238 239 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 240 if (IS_ERR_VALUE(vdso_base)) { 241 ret = ERR_PTR(vdso_base); 242 goto up_fail; 243 } 244 245 ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE, 246 (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm); 247 if (IS_ERR(ret)) 248 goto up_fail; 249 250 vdso_base += VVAR_SIZE; 251 mm->context.vdso = (void *)vdso_base; 252 253 ret = 254 _install_special_mapping(mm, vdso_base, vdso_text_len, 255 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), 256 vdso_info->cm); 257 258 if (IS_ERR(ret)) 259 goto up_fail; 260 261 return 0; 262 263 up_fail: 264 mm->context.vdso = NULL; 265 return PTR_ERR(ret); 266 } 267 268 #ifdef CONFIG_COMPAT 269 int compat_arch_setup_additional_pages(struct linux_binprm *bprm, 270 int uses_interp) 271 { 272 struct mm_struct *mm = current->mm; 273 int ret; 274 275 if (mmap_write_lock_killable(mm)) 276 return -EINTR; 277 278 ret = __setup_additional_pages(mm, bprm, uses_interp, 279 &compat_vdso_info); 280 mmap_write_unlock(mm); 281 282 return ret; 283 } 284 #endif 285 286 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 287 { 288 struct mm_struct *mm = current->mm; 289 int ret; 290 291 if (mmap_write_lock_killable(mm)) 292 return -EINTR; 293 294 ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info); 295 mmap_write_unlock(mm); 296 297 return ret; 298 } 299