xref: /openbmc/linux/arch/s390/kernel/vdso.c (revision eef4e616)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8 
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/time_namespace.h>
19 #include <linux/random.h>
20 #include <vdso/datapage.h>
21 #include <asm/vdso.h>
22 
23 extern char vdso64_start[], vdso64_end[];
24 extern char vdso32_start[], vdso32_end[];
25 
26 static struct vm_special_mapping vvar_mapping;
27 
28 static union {
29 	struct vdso_data	data[CS_BASES];
30 	u8			page[PAGE_SIZE];
31 } vdso_data_store __page_aligned_data;
32 
33 struct vdso_data *vdso_data = vdso_data_store.data;
34 
35 enum vvar_pages {
36 	VVAR_DATA_PAGE_OFFSET,
37 	VVAR_TIMENS_PAGE_OFFSET,
38 	VVAR_NR_PAGES,
39 };
40 
41 #ifdef CONFIG_TIME_NS
42 struct vdso_data *arch_get_vdso_data(void *vvar_page)
43 {
44 	return (struct vdso_data *)(vvar_page);
45 }
46 
47 /*
48  * The VVAR page layout depends on whether a task belongs to the root or
49  * non-root time namespace. Whenever a task changes its namespace, the VVAR
50  * page tables are cleared and then they will be re-faulted with a
51  * corresponding layout.
52  * See also the comment near timens_setup_vdso_data() for details.
53  */
54 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
55 {
56 	struct mm_struct *mm = task->mm;
57 	VMA_ITERATOR(vmi, mm, 0);
58 	struct vm_area_struct *vma;
59 
60 	mmap_read_lock(mm);
61 	for_each_vma(vmi, vma) {
62 		unsigned long size = vma->vm_end - vma->vm_start;
63 
64 		if (!vma_is_special_mapping(vma, &vvar_mapping))
65 			continue;
66 		zap_page_range(vma, vma->vm_start, size);
67 		break;
68 	}
69 	mmap_read_unlock(mm);
70 	return 0;
71 }
72 #endif
73 
74 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
75 			     struct vm_area_struct *vma, struct vm_fault *vmf)
76 {
77 	struct page *timens_page = find_timens_vvar_page(vma);
78 	unsigned long addr, pfn;
79 	vm_fault_t err;
80 
81 	switch (vmf->pgoff) {
82 	case VVAR_DATA_PAGE_OFFSET:
83 		pfn = virt_to_pfn(vdso_data);
84 		if (timens_page) {
85 			/*
86 			 * Fault in VVAR page too, since it will be accessed
87 			 * to get clock data anyway.
88 			 */
89 			addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
90 			err = vmf_insert_pfn(vma, addr, pfn);
91 			if (unlikely(err & VM_FAULT_ERROR))
92 				return err;
93 			pfn = page_to_pfn(timens_page);
94 		}
95 		break;
96 #ifdef CONFIG_TIME_NS
97 	case VVAR_TIMENS_PAGE_OFFSET:
98 		/*
99 		 * If a task belongs to a time namespace then a namespace
100 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
101 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
102 		 * offset.
103 		 * See also the comment near timens_setup_vdso_data().
104 		 */
105 		if (!timens_page)
106 			return VM_FAULT_SIGBUS;
107 		pfn = virt_to_pfn(vdso_data);
108 		break;
109 #endif /* CONFIG_TIME_NS */
110 	default:
111 		return VM_FAULT_SIGBUS;
112 	}
113 	return vmf_insert_pfn(vma, vmf->address, pfn);
114 }
115 
116 static int vdso_mremap(const struct vm_special_mapping *sm,
117 		       struct vm_area_struct *vma)
118 {
119 	current->mm->context.vdso_base = vma->vm_start;
120 	return 0;
121 }
122 
123 static struct vm_special_mapping vvar_mapping = {
124 	.name = "[vvar]",
125 	.fault = vvar_fault,
126 };
127 
128 static struct vm_special_mapping vdso64_mapping = {
129 	.name = "[vdso]",
130 	.mremap = vdso_mremap,
131 };
132 
133 static struct vm_special_mapping vdso32_mapping = {
134 	.name = "[vdso]",
135 	.mremap = vdso_mremap,
136 };
137 
138 int vdso_getcpu_init(void)
139 {
140 	set_tod_programmable_field(smp_processor_id());
141 	return 0;
142 }
143 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
144 
145 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
146 {
147 	unsigned long vvar_start, vdso_text_start, vdso_text_len;
148 	struct vm_special_mapping *vdso_mapping;
149 	struct mm_struct *mm = current->mm;
150 	struct vm_area_struct *vma;
151 	int rc;
152 
153 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
154 	if (mmap_write_lock_killable(mm))
155 		return -EINTR;
156 
157 	if (is_compat_task()) {
158 		vdso_text_len = vdso32_end - vdso32_start;
159 		vdso_mapping = &vdso32_mapping;
160 	} else {
161 		vdso_text_len = vdso64_end - vdso64_start;
162 		vdso_mapping = &vdso64_mapping;
163 	}
164 	vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
165 	rc = vvar_start;
166 	if (IS_ERR_VALUE(vvar_start))
167 		goto out;
168 	vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
169 				       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
170 				       VM_PFNMAP,
171 				       &vvar_mapping);
172 	rc = PTR_ERR(vma);
173 	if (IS_ERR(vma))
174 		goto out;
175 	vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
176 	/* VM_MAYWRITE for COW so gdb can set breakpoints */
177 	vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
178 				       VM_READ|VM_EXEC|
179 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
180 				       vdso_mapping);
181 	if (IS_ERR(vma)) {
182 		do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
183 		rc = PTR_ERR(vma);
184 	} else {
185 		current->mm->context.vdso_base = vdso_text_start;
186 		rc = 0;
187 	}
188 out:
189 	mmap_write_unlock(mm);
190 	return rc;
191 }
192 
193 static unsigned long vdso_addr(unsigned long start, unsigned long len)
194 {
195 	unsigned long addr, end, offset;
196 
197 	/*
198 	 * Round up the start address. It can start out unaligned as a result
199 	 * of stack start randomization.
200 	 */
201 	start = PAGE_ALIGN(start);
202 
203 	/* Round the lowest possible end address up to a PMD boundary. */
204 	end = (start + len + PMD_SIZE - 1) & PMD_MASK;
205 	if (end >= VDSO_BASE)
206 		end = VDSO_BASE;
207 	end -= len;
208 
209 	if (end > start) {
210 		offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
211 		addr = start + (offset << PAGE_SHIFT);
212 	} else {
213 		addr = start;
214 	}
215 	return addr;
216 }
217 
218 unsigned long vdso_size(void)
219 {
220 	unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
221 
222 	if (is_compat_task())
223 		size += vdso32_end - vdso32_start;
224 	else
225 		size += vdso64_end - vdso64_start;
226 	return PAGE_ALIGN(size);
227 }
228 
229 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
230 {
231 	unsigned long addr = VDSO_BASE;
232 	unsigned long size = vdso_size();
233 
234 	if (current->flags & PF_RANDOMIZE)
235 		addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
236 	return map_vdso(addr, size);
237 }
238 
239 static struct page ** __init vdso_setup_pages(void *start, void *end)
240 {
241 	int pages = (end - start) >> PAGE_SHIFT;
242 	struct page **pagelist;
243 	int i;
244 
245 	pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
246 	if (!pagelist)
247 		panic("%s: Cannot allocate page list for VDSO", __func__);
248 	for (i = 0; i < pages; i++)
249 		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
250 	return pagelist;
251 }
252 
253 static int __init vdso_init(void)
254 {
255 	vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
256 	if (IS_ENABLED(CONFIG_COMPAT))
257 		vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
258 	return 0;
259 }
260 arch_initcall(vdso_init);
261