xref: /openbmc/linux/arch/mips/mm/mmap.c (revision 9c1f8594)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2011 Wind River Systems,
7  *   written by Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/errno.h>
10 #include <linux/mm.h>
11 #include <linux/mman.h>
12 #include <linux/module.h>
13 #include <linux/personality.h>
14 #include <linux/random.h>
15 #include <linux/sched.h>
16 
17 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
18 
19 EXPORT_SYMBOL(shm_align_mask);
20 
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP        ((TASK_SIZE)/6*5)
24 
25 static int mmap_is_legacy(void)
26 {
27 	if (current->personality & ADDR_COMPAT_LAYOUT)
28 		return 1;
29 
30 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
31 		return 1;
32 
33 	return sysctl_legacy_va_layout;
34 }
35 
36 static unsigned long mmap_base(unsigned long rnd)
37 {
38 	unsigned long gap = rlimit(RLIMIT_STACK);
39 
40 	if (gap < MIN_GAP)
41 		gap = MIN_GAP;
42 	else if (gap > MAX_GAP)
43 		gap = MAX_GAP;
44 
45 	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46 }
47 
48 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
49 					      unsigned long pgoff)
50 {
51 	unsigned long base = addr & ~shm_align_mask;
52 	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
53 
54 	if (base + off <= addr)
55 		return base + off;
56 
57 	return base - off;
58 }
59 
60 #define COLOUR_ALIGN(addr,pgoff)				\
61 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
62 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
63 
64 enum mmap_allocation_direction {UP, DOWN};
65 
66 static unsigned long arch_get_unmapped_area_foo(struct file *filp,
67 	unsigned long addr0, unsigned long len, unsigned long pgoff,
68 	unsigned long flags, enum mmap_allocation_direction dir)
69 {
70 	struct mm_struct *mm = current->mm;
71 	struct vm_area_struct *vma;
72 	unsigned long addr = addr0;
73 	int do_color_align;
74 
75 	if (unlikely(len > TASK_SIZE))
76 		return -ENOMEM;
77 
78 	if (flags & MAP_FIXED) {
79 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
80 		if (TASK_SIZE - len < addr)
81 			return -EINVAL;
82 
83 		/*
84 		 * We do not accept a shared mapping if it would violate
85 		 * cache aliasing constraints.
86 		 */
87 		if ((flags & MAP_SHARED) &&
88 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
89 			return -EINVAL;
90 		return addr;
91 	}
92 
93 	do_color_align = 0;
94 	if (filp || (flags & MAP_SHARED))
95 		do_color_align = 1;
96 
97 	/* requesting a specific address */
98 	if (addr) {
99 		if (do_color_align)
100 			addr = COLOUR_ALIGN(addr, pgoff);
101 		else
102 			addr = PAGE_ALIGN(addr);
103 
104 		vma = find_vma(mm, addr);
105 		if (TASK_SIZE - len >= addr &&
106 		   (!vma || addr + len <= vma->vm_start))
107 			return addr;
108 	}
109 
110 	if (dir == UP) {
111 		addr = mm->mmap_base;
112 			if (do_color_align)
113 				addr = COLOUR_ALIGN(addr, pgoff);
114 			else
115 				addr = PAGE_ALIGN(addr);
116 
117 		for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
118 			/* At this point:  (!vma || addr < vma->vm_end). */
119 			if (TASK_SIZE - len < addr)
120 				return -ENOMEM;
121 			if (!vma || addr + len <= vma->vm_start)
122 				return addr;
123 			addr = vma->vm_end;
124 			if (do_color_align)
125 				addr = COLOUR_ALIGN(addr, pgoff);
126 		 }
127 	 } else {
128 		/* check if free_area_cache is useful for us */
129 		if (len <= mm->cached_hole_size) {
130 			mm->cached_hole_size = 0;
131 			mm->free_area_cache = mm->mmap_base;
132 		}
133 
134 		/* either no address requested or can't fit in requested address hole */
135 		addr = mm->free_area_cache;
136 			if (do_color_align) {
137 				unsigned long base =
138 					COLOUR_ALIGN_DOWN(addr - len, pgoff);
139 
140 			addr = base + len;
141 		 }
142 
143 		/* make sure it can fit in the remaining address space */
144 		if (likely(addr > len)) {
145 			vma = find_vma(mm, addr - len);
146 			if (!vma || addr <= vma->vm_start) {
147 				/* remember the address as a hint for next time */
148 				return mm->free_area_cache = addr-len;
149 			}
150 		}
151 
152 		if (unlikely(mm->mmap_base < len))
153 			goto bottomup;
154 
155 		addr = mm->mmap_base-len;
156 		if (do_color_align)
157 			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
158 
159 		do {
160 			/*
161 			 * Lookup failure means no vma is above this address,
162 			 * else if new region fits below vma->vm_start,
163 			 * return with success:
164 			 */
165 			vma = find_vma(mm, addr);
166 			if (likely(!vma || addr+len <= vma->vm_start)) {
167 				/* remember the address as a hint for next time */
168 				return mm->free_area_cache = addr;
169 			}
170 
171 			/* remember the largest hole we saw so far */
172 			if (addr + mm->cached_hole_size < vma->vm_start)
173 				mm->cached_hole_size = vma->vm_start - addr;
174 
175 			/* try just below the current vma->vm_start */
176 			addr = vma->vm_start-len;
177 			if (do_color_align)
178 				addr = COLOUR_ALIGN_DOWN(addr, pgoff);
179 		} while (likely(len < vma->vm_start));
180 
181 bottomup:
182 		/*
183 		 * A failed mmap() very likely causes application failure,
184 		 * so fall back to the bottom-up function here. This scenario
185 		 * can happen with large stack limits and large mmap()
186 		 * allocations.
187 		 */
188 		mm->cached_hole_size = ~0UL;
189 		mm->free_area_cache = TASK_UNMAPPED_BASE;
190 		addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
191 		/*
192 		 * Restore the topdown base:
193 		 */
194 		mm->free_area_cache = mm->mmap_base;
195 		mm->cached_hole_size = ~0UL;
196 
197 		return addr;
198 	}
199 }
200 
201 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
202 	unsigned long len, unsigned long pgoff, unsigned long flags)
203 {
204 	return arch_get_unmapped_area_foo(filp,
205 			addr0, len, pgoff, flags, UP);
206 }
207 
208 /*
209  * There is no need to export this but sched.h declares the function as
210  * extern so making it static here results in an error.
211  */
212 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
213 	unsigned long addr0, unsigned long len, unsigned long pgoff,
214 	unsigned long flags)
215 {
216 	return arch_get_unmapped_area_foo(filp,
217 			addr0, len, pgoff, flags, DOWN);
218 }
219 
220 void arch_pick_mmap_layout(struct mm_struct *mm)
221 {
222 	unsigned long random_factor = 0UL;
223 
224 	if (current->flags & PF_RANDOMIZE) {
225 		random_factor = get_random_int();
226 		random_factor = random_factor << PAGE_SHIFT;
227 		if (TASK_IS_32BIT_ADDR)
228 			random_factor &= 0xfffffful;
229 		else
230 			random_factor &= 0xffffffful;
231 	}
232 
233 	if (mmap_is_legacy()) {
234 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
235 		mm->get_unmapped_area = arch_get_unmapped_area;
236 		mm->unmap_area = arch_unmap_area;
237 	} else {
238 		mm->mmap_base = mmap_base(random_factor);
239 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
240 		mm->unmap_area = arch_unmap_area_topdown;
241 	}
242 }
243 
244 static inline unsigned long brk_rnd(void)
245 {
246 	unsigned long rnd = get_random_int();
247 
248 	rnd = rnd << PAGE_SHIFT;
249 	/* 8MB for 32bit, 256MB for 64bit */
250 	if (TASK_IS_32BIT_ADDR)
251 		rnd = rnd & 0x7ffffful;
252 	else
253 		rnd = rnd & 0xffffffful;
254 
255 	return rnd;
256 }
257 
258 unsigned long arch_randomize_brk(struct mm_struct *mm)
259 {
260 	unsigned long base = mm->brk;
261 	unsigned long ret;
262 
263 	ret = PAGE_ALIGN(base + brk_rnd());
264 
265 	if (ret < mm->brk)
266 		return mm->brk;
267 
268 	return ret;
269 }
270