1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015 Imagination Technologies 4 * Author: Alex Smith <alex.smith@imgtec.com> 5 */ 6 7 #include <linux/binfmts.h> 8 #include <linux/elf.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/ioport.h> 12 #include <linux/kernel.h> 13 #include <linux/mm.h> 14 #include <linux/random.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/timekeeper_internal.h> 18 19 #include <asm/abi.h> 20 #include <asm/mips-cps.h> 21 #include <asm/page.h> 22 #include <asm/vdso.h> 23 24 /* Kernel-provided data used by the VDSO. */ 25 static union mips_vdso_data vdso_data __page_aligned_data; 26 27 /* 28 * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as 29 * what we map and where within the area they are mapped is determined at 30 * runtime. 31 */ 32 static struct page *no_pages[] = { NULL }; 33 static struct vm_special_mapping vdso_vvar_mapping = { 34 .name = "[vvar]", 35 .pages = no_pages, 36 }; 37 38 static void __init init_vdso_image(struct mips_vdso_image *image) 39 { 40 unsigned long num_pages, i; 41 unsigned long data_pfn; 42 43 BUG_ON(!PAGE_ALIGNED(image->data)); 44 BUG_ON(!PAGE_ALIGNED(image->size)); 45 46 num_pages = image->size / PAGE_SIZE; 47 48 data_pfn = __phys_to_pfn(__pa_symbol(image->data)); 49 for (i = 0; i < num_pages; i++) 50 image->mapping.pages[i] = pfn_to_page(data_pfn + i); 51 } 52 53 static int __init init_vdso(void) 54 { 55 init_vdso_image(&vdso_image); 56 57 #ifdef CONFIG_MIPS32_O32 58 init_vdso_image(&vdso_image_o32); 59 #endif 60 61 #ifdef CONFIG_MIPS32_N32 62 init_vdso_image(&vdso_image_n32); 63 #endif 64 65 return 0; 66 } 67 subsys_initcall(init_vdso); 68 69 void update_vsyscall(struct timekeeper *tk) 70 { 71 vdso_data_write_begin(&vdso_data); 72 73 vdso_data.xtime_sec = tk->xtime_sec; 74 vdso_data.xtime_nsec = tk->tkr_mono.xtime_nsec; 75 vdso_data.wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; 76 vdso_data.wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; 77 vdso_data.cs_shift = tk->tkr_mono.shift; 78 79 vdso_data.clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode; 80 if (vdso_data.clock_mode != VDSO_CLOCK_NONE) { 81 vdso_data.cs_mult = tk->tkr_mono.mult; 82 vdso_data.cs_cycle_last = tk->tkr_mono.cycle_last; 83 vdso_data.cs_mask = tk->tkr_mono.mask; 84 } 85 86 vdso_data_write_end(&vdso_data); 87 } 88 89 void update_vsyscall_tz(void) 90 { 91 if (vdso_data.clock_mode != VDSO_CLOCK_NONE) { 92 vdso_data.tz_minuteswest = sys_tz.tz_minuteswest; 93 vdso_data.tz_dsttime = sys_tz.tz_dsttime; 94 } 95 } 96 97 static unsigned long vdso_base(void) 98 { 99 unsigned long base; 100 101 /* Skip the delay slot emulation page */ 102 base = STACK_TOP + PAGE_SIZE; 103 104 if (current->flags & PF_RANDOMIZE) { 105 base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); 106 base = PAGE_ALIGN(base); 107 } 108 109 return base; 110 } 111 112 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 113 { 114 struct mips_vdso_image *image = current->thread.abi->vdso; 115 struct mm_struct *mm = current->mm; 116 unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; 117 struct vm_area_struct *vma; 118 int ret; 119 120 if (down_write_killable(&mm->mmap_sem)) 121 return -EINTR; 122 123 /* Map delay slot emulation page */ 124 base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, 125 VM_READ | VM_EXEC | 126 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, 127 0, NULL); 128 if (IS_ERR_VALUE(base)) { 129 ret = base; 130 goto out; 131 } 132 133 /* 134 * Determine total area size. This includes the VDSO data itself, the 135 * data page, and the GIC user page if present. Always create a mapping 136 * for the GIC user area if the GIC is present regardless of whether it 137 * is the current clocksource, in case it comes into use later on. We 138 * only map a page even though the total area is 64K, as we only need 139 * the counter registers at the start. 140 */ 141 gic_size = mips_gic_present() ? PAGE_SIZE : 0; 142 vvar_size = gic_size + PAGE_SIZE; 143 size = vvar_size + image->size; 144 145 /* 146 * Find a region that's large enough for us to perform the 147 * colour-matching alignment below. 148 */ 149 if (cpu_has_dc_aliases) 150 size += shm_align_mask + 1; 151 152 base = get_unmapped_area(NULL, vdso_base(), size, 0, 0); 153 if (IS_ERR_VALUE(base)) { 154 ret = base; 155 goto out; 156 } 157 158 /* 159 * If we suffer from dcache aliasing, ensure that the VDSO data page 160 * mapping is coloured the same as the kernel's mapping of that memory. 161 * This ensures that when the kernel updates the VDSO data userland 162 * will observe it without requiring cache invalidations. 163 */ 164 if (cpu_has_dc_aliases) { 165 base = __ALIGN_MASK(base, shm_align_mask); 166 base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; 167 } 168 169 data_addr = base + gic_size; 170 vdso_addr = data_addr + PAGE_SIZE; 171 172 vma = _install_special_mapping(mm, base, vvar_size, 173 VM_READ | VM_MAYREAD, 174 &vdso_vvar_mapping); 175 if (IS_ERR(vma)) { 176 ret = PTR_ERR(vma); 177 goto out; 178 } 179 180 /* Map GIC user page. */ 181 if (gic_size) { 182 gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; 183 184 ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, 185 pgprot_noncached(PAGE_READONLY)); 186 if (ret) 187 goto out; 188 } 189 190 /* Map data page. */ 191 ret = remap_pfn_range(vma, data_addr, 192 virt_to_phys(&vdso_data) >> PAGE_SHIFT, 193 PAGE_SIZE, PAGE_READONLY); 194 if (ret) 195 goto out; 196 197 /* Map VDSO image. */ 198 vma = _install_special_mapping(mm, vdso_addr, image->size, 199 VM_READ | VM_EXEC | 200 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, 201 &image->mapping); 202 if (IS_ERR(vma)) { 203 ret = PTR_ERR(vma); 204 goto out; 205 } 206 207 mm->context.vdso = (void *)vdso_addr; 208 ret = 0; 209 210 out: 211 up_write(&mm->mmap_sem); 212 return ret; 213 } 214