xref: /openbmc/linux/arch/s390/kernel/vdso.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8 
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/slab.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/memblock.h>
22 #include <linux/compat.h>
23 #include <linux/binfmts.h>
24 #include <vdso/datapage.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/processor.h>
27 #include <asm/mmu.h>
28 #include <asm/mmu_context.h>
29 #include <asm/sections.h>
30 #include <asm/vdso.h>
31 #include <asm/facility.h>
32 
33 extern char vdso64_start, vdso64_end;
34 static void *vdso64_kbase = &vdso64_start;
35 static unsigned int vdso64_pages;
36 static struct page **vdso64_pagelist;
37 
38 /*
39  * Should the kernel map a VDSO page into processes and pass its
40  * address down to glibc upon exec()?
41  */
42 unsigned int __read_mostly vdso_enabled = 1;
43 
44 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
45 		      struct vm_area_struct *vma, struct vm_fault *vmf)
46 {
47 	struct page **vdso_pagelist;
48 	unsigned long vdso_pages;
49 
50 	vdso_pagelist = vdso64_pagelist;
51 	vdso_pages = vdso64_pages;
52 
53 	if (vmf->pgoff >= vdso_pages)
54 		return VM_FAULT_SIGBUS;
55 
56 	vmf->page = vdso_pagelist[vmf->pgoff];
57 	get_page(vmf->page);
58 	return 0;
59 }
60 
61 static int vdso_mremap(const struct vm_special_mapping *sm,
62 		       struct vm_area_struct *vma)
63 {
64 	unsigned long vdso_pages;
65 
66 	vdso_pages = vdso64_pages;
67 
68 	if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
69 		return -EINVAL;
70 
71 	if (WARN_ON_ONCE(current->mm != vma->vm_mm))
72 		return -EFAULT;
73 
74 	current->mm->context.vdso_base = vma->vm_start;
75 	return 0;
76 }
77 
78 static const struct vm_special_mapping vdso_mapping = {
79 	.name = "[vdso]",
80 	.fault = vdso_fault,
81 	.mremap = vdso_mremap,
82 };
83 
84 static int __init vdso_setup(char *str)
85 {
86 	bool enabled;
87 
88 	if (!kstrtobool(str, &enabled))
89 		vdso_enabled = enabled;
90 	return 1;
91 }
92 __setup("vdso=", vdso_setup);
93 
94 /*
95  * The vdso data page
96  */
97 static union {
98 	struct vdso_data	data;
99 	u8			page[PAGE_SIZE];
100 } vdso_data_store __page_aligned_data;
101 struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
102 /*
103  * Allocate/free per cpu vdso data.
104  */
105 #define SEGMENT_ORDER	2
106 
107 int vdso_alloc_per_cpu(struct lowcore *lowcore)
108 {
109 	unsigned long segment_table, page_table, page_frame;
110 	struct vdso_per_cpu_data *vd;
111 
112 	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
113 	page_table = get_zeroed_page(GFP_KERNEL);
114 	page_frame = get_zeroed_page(GFP_KERNEL);
115 	if (!segment_table || !page_table || !page_frame)
116 		goto out;
117 	arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
118 	arch_set_page_dat(virt_to_page(page_table), 0);
119 
120 	/* Initialize per-cpu vdso data page */
121 	vd = (struct vdso_per_cpu_data *) page_frame;
122 	vd->cpu_nr = lowcore->cpu_nr;
123 	vd->node_id = cpu_to_node(vd->cpu_nr);
124 
125 	/* Set up page table for the vdso address space */
126 	memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
127 	memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
128 
129 	*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
130 	*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
131 
132 	lowcore->vdso_asce = segment_table +
133 		_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
134 	lowcore->vdso_per_cpu_data = page_frame;
135 
136 	return 0;
137 
138 out:
139 	free_page(page_frame);
140 	free_page(page_table);
141 	free_pages(segment_table, SEGMENT_ORDER);
142 	return -ENOMEM;
143 }
144 
145 void vdso_free_per_cpu(struct lowcore *lowcore)
146 {
147 	unsigned long segment_table, page_table, page_frame;
148 
149 	segment_table = lowcore->vdso_asce & PAGE_MASK;
150 	page_table = *(unsigned long *) segment_table;
151 	page_frame = *(unsigned long *) page_table;
152 
153 	free_page(page_frame);
154 	free_page(page_table);
155 	free_pages(segment_table, SEGMENT_ORDER);
156 }
157 
158 /*
159  * This is called from binfmt_elf, we create the special vma for the
160  * vDSO and insert it into the mm struct tree
161  */
162 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
163 {
164 	struct mm_struct *mm = current->mm;
165 	struct vm_area_struct *vma;
166 	unsigned long vdso_pages;
167 	unsigned long vdso_base;
168 	int rc;
169 
170 	if (!vdso_enabled)
171 		return 0;
172 
173 	if (is_compat_task())
174 		return 0;
175 
176 	vdso_pages = vdso64_pages;
177 	/*
178 	 * vDSO has a problem and was disabled, just don't "enable" it for
179 	 * the process
180 	 */
181 	if (vdso_pages == 0)
182 		return 0;
183 
184 	/*
185 	 * pick a base address for the vDSO in process space. We try to put
186 	 * it at vdso_base which is the "natural" base for it, but we might
187 	 * fail and end up putting it elsewhere.
188 	 */
189 	if (mmap_write_lock_killable(mm))
190 		return -EINTR;
191 	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
192 	if (IS_ERR_VALUE(vdso_base)) {
193 		rc = vdso_base;
194 		goto out_up;
195 	}
196 
197 	/*
198 	 * our vma flags don't have VM_WRITE so by default, the process
199 	 * isn't allowed to write those pages.
200 	 * gdb can break that with ptrace interface, and thus trigger COW
201 	 * on those pages but it's then your responsibility to never do that
202 	 * on the "data" page of the vDSO or you'll stop getting kernel
203 	 * updates and your nice userland gettimeofday will be totally dead.
204 	 * It's fine to use that for setting breakpoints in the vDSO code
205 	 * pages though.
206 	 */
207 	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
208 				       VM_READ|VM_EXEC|
209 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
210 				       &vdso_mapping);
211 	if (IS_ERR(vma)) {
212 		rc = PTR_ERR(vma);
213 		goto out_up;
214 	}
215 
216 	current->mm->context.vdso_base = vdso_base;
217 	rc = 0;
218 
219 out_up:
220 	mmap_write_unlock(mm);
221 	return rc;
222 }
223 
224 static int __init vdso_init(void)
225 {
226 	int i;
227 
228 	/* Calculate the size of the 64 bit vDSO */
229 	vdso64_pages = ((&vdso64_end - &vdso64_start
230 			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
231 
232 	/* Make sure pages are in the correct state */
233 	vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
234 				  GFP_KERNEL);
235 	BUG_ON(vdso64_pagelist == NULL);
236 	for (i = 0; i < vdso64_pages - 1; i++) {
237 		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
238 		get_page(pg);
239 		vdso64_pagelist[i] = pg;
240 	}
241 	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
242 	vdso64_pagelist[vdso64_pages] = NULL;
243 	if (vdso_alloc_per_cpu(&S390_lowcore))
244 		BUG();
245 
246 	get_page(virt_to_page(vdso_data));
247 
248 	return 0;
249 }
250 early_initcall(vdso_init);
251