xref: /openbmc/linux/arch/s390/mm/mmap.c (revision 01042607)
1 /*
2  *  flexible mmap layout support
3  *
4  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
5  * All Rights Reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  *
21  *
22  * Started by Ingo Molnar <mingo@elte.hu>
23  */
24 
25 #include <linux/elf-randomize.h>
26 #include <linux/personality.h>
27 #include <linux/mm.h>
28 #include <linux/mman.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/mm.h>
31 #include <linux/random.h>
32 #include <linux/compat.h>
33 #include <linux/security.h>
34 #include <asm/pgalloc.h>
35 #include <asm/elf.h>
36 
37 static unsigned long stack_maxrandom_size(void)
38 {
39 	if (!(current->flags & PF_RANDOMIZE))
40 		return 0;
41 	if (current->personality & ADDR_NO_RANDOMIZE)
42 		return 0;
43 	return STACK_RND_MASK << PAGE_SHIFT;
44 }
45 
46 /*
47  * Top of mmap area (just below the process stack).
48  *
49  * Leave at least a ~32 MB hole.
50  */
51 #define MIN_GAP (32*1024*1024)
52 #define MAX_GAP (STACK_TOP/6*5)
53 
54 static inline int mmap_is_legacy(void)
55 {
56 	if (current->personality & ADDR_COMPAT_LAYOUT)
57 		return 1;
58 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
59 		return 1;
60 	return sysctl_legacy_va_layout;
61 }
62 
63 unsigned long arch_mmap_rnd(void)
64 {
65 	return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
66 }
67 
68 static unsigned long mmap_base_legacy(unsigned long rnd)
69 {
70 	return TASK_UNMAPPED_BASE + rnd;
71 }
72 
73 static inline unsigned long mmap_base(unsigned long rnd)
74 {
75 	unsigned long gap = rlimit(RLIMIT_STACK);
76 
77 	if (gap < MIN_GAP)
78 		gap = MIN_GAP;
79 	else if (gap > MAX_GAP)
80 		gap = MAX_GAP;
81 	gap &= PAGE_MASK;
82 	return STACK_TOP - stack_maxrandom_size() - rnd - gap;
83 }
84 
85 unsigned long
86 arch_get_unmapped_area(struct file *filp, unsigned long addr,
87 		unsigned long len, unsigned long pgoff, unsigned long flags)
88 {
89 	struct mm_struct *mm = current->mm;
90 	struct vm_area_struct *vma;
91 	struct vm_unmapped_area_info info;
92 
93 	if (len > TASK_SIZE - mmap_min_addr)
94 		return -ENOMEM;
95 
96 	if (flags & MAP_FIXED)
97 		return addr;
98 
99 	if (addr) {
100 		addr = PAGE_ALIGN(addr);
101 		vma = find_vma(mm, addr);
102 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
103 		    (!vma || addr + len <= vma->vm_start))
104 			return addr;
105 	}
106 
107 	info.flags = 0;
108 	info.length = len;
109 	info.low_limit = mm->mmap_base;
110 	info.high_limit = TASK_SIZE;
111 	if (filp || (flags & MAP_SHARED))
112 		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
113 	else
114 		info.align_mask = 0;
115 	info.align_offset = pgoff << PAGE_SHIFT;
116 	return vm_unmapped_area(&info);
117 }
118 
119 unsigned long
120 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
121 			  const unsigned long len, const unsigned long pgoff,
122 			  const unsigned long flags)
123 {
124 	struct vm_area_struct *vma;
125 	struct mm_struct *mm = current->mm;
126 	unsigned long addr = addr0;
127 	struct vm_unmapped_area_info info;
128 
129 	/* requested length too big for entire address space */
130 	if (len > TASK_SIZE - mmap_min_addr)
131 		return -ENOMEM;
132 
133 	if (flags & MAP_FIXED)
134 		return addr;
135 
136 	/* requesting a specific address */
137 	if (addr) {
138 		addr = PAGE_ALIGN(addr);
139 		vma = find_vma(mm, addr);
140 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
141 				(!vma || addr + len <= vma->vm_start))
142 			return addr;
143 	}
144 
145 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
146 	info.length = len;
147 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
148 	info.high_limit = mm->mmap_base;
149 	if (filp || (flags & MAP_SHARED))
150 		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
151 	else
152 		info.align_mask = 0;
153 	info.align_offset = pgoff << PAGE_SHIFT;
154 	addr = vm_unmapped_area(&info);
155 
156 	/*
157 	 * A failed mmap() very likely causes application failure,
158 	 * so fall back to the bottom-up function here. This scenario
159 	 * can happen with large stack limits and large mmap()
160 	 * allocations.
161 	 */
162 	if (addr & ~PAGE_MASK) {
163 		VM_BUG_ON(addr != -ENOMEM);
164 		info.flags = 0;
165 		info.low_limit = TASK_UNMAPPED_BASE;
166 		info.high_limit = TASK_SIZE;
167 		addr = vm_unmapped_area(&info);
168 	}
169 
170 	return addr;
171 }
172 
173 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
174 {
175 	if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE)
176 		return 0;
177 	if (!(flags & MAP_FIXED))
178 		addr = 0;
179 	if ((addr + len) >= TASK_SIZE)
180 		return crst_table_upgrade(current->mm);
181 	return 0;
182 }
183 
184 static unsigned long
185 s390_get_unmapped_area(struct file *filp, unsigned long addr,
186 		unsigned long len, unsigned long pgoff, unsigned long flags)
187 {
188 	struct mm_struct *mm = current->mm;
189 	unsigned long area;
190 	int rc;
191 
192 	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
193 	if (!(area & ~PAGE_MASK))
194 		return area;
195 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
196 		/* Upgrade the page table to 4 levels and retry. */
197 		rc = crst_table_upgrade(mm);
198 		if (rc)
199 			return (unsigned long) rc;
200 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
201 	}
202 	return area;
203 }
204 
205 static unsigned long
206 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
207 			  const unsigned long len, const unsigned long pgoff,
208 			  const unsigned long flags)
209 {
210 	struct mm_struct *mm = current->mm;
211 	unsigned long area;
212 	int rc;
213 
214 	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
215 	if (!(area & ~PAGE_MASK))
216 		return area;
217 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
218 		/* Upgrade the page table to 4 levels and retry. */
219 		rc = crst_table_upgrade(mm);
220 		if (rc)
221 			return (unsigned long) rc;
222 		area = arch_get_unmapped_area_topdown(filp, addr, len,
223 						      pgoff, flags);
224 	}
225 	return area;
226 }
227 /*
228  * This function, called very early during the creation of a new
229  * process VM image, sets up which VM layout function to use:
230  */
231 void arch_pick_mmap_layout(struct mm_struct *mm)
232 {
233 	unsigned long random_factor = 0UL;
234 
235 	if (current->flags & PF_RANDOMIZE)
236 		random_factor = arch_mmap_rnd();
237 
238 	/*
239 	 * Fall back to the standard layout if the personality
240 	 * bit is set, or if the expected stack growth is unlimited:
241 	 */
242 	if (mmap_is_legacy()) {
243 		mm->mmap_base = mmap_base_legacy(random_factor);
244 		mm->get_unmapped_area = s390_get_unmapped_area;
245 	} else {
246 		mm->mmap_base = mmap_base(random_factor);
247 		mm->get_unmapped_area = s390_get_unmapped_area_topdown;
248 	}
249 }
250