xref: /openbmc/linux/arch/mips/mm/mmap.c (revision d2574c33)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2011 Wind River Systems,
7  *   written by Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/compiler.h>
10 #include <linux/elf-randomize.h>
11 #include <linux/errno.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/export.h>
15 #include <linux/personality.h>
16 #include <linux/random.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/mm.h>
19 
20 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
21 EXPORT_SYMBOL(shm_align_mask);
22 
23 /* gap between mmap and stack */
24 #define MIN_GAP (128*1024*1024UL)
25 #define MAX_GAP ((TASK_SIZE)/6*5)
26 
27 static int mmap_is_legacy(struct rlimit *rlim_stack)
28 {
29 	if (current->personality & ADDR_COMPAT_LAYOUT)
30 		return 1;
31 
32 	if (rlim_stack->rlim_cur == RLIM_INFINITY)
33 		return 1;
34 
35 	return sysctl_legacy_va_layout;
36 }
37 
38 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
39 {
40 	unsigned long gap = rlim_stack->rlim_cur;
41 
42 	if (gap < MIN_GAP)
43 		gap = MIN_GAP;
44 	else if (gap > MAX_GAP)
45 		gap = MAX_GAP;
46 
47 	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
48 }
49 
50 #define COLOUR_ALIGN(addr, pgoff)				\
51 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
52 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
53 
54 enum mmap_allocation_direction {UP, DOWN};
55 
56 static unsigned long arch_get_unmapped_area_common(struct file *filp,
57 	unsigned long addr0, unsigned long len, unsigned long pgoff,
58 	unsigned long flags, enum mmap_allocation_direction dir)
59 {
60 	struct mm_struct *mm = current->mm;
61 	struct vm_area_struct *vma;
62 	unsigned long addr = addr0;
63 	int do_color_align;
64 	struct vm_unmapped_area_info info;
65 
66 	if (unlikely(len > TASK_SIZE))
67 		return -ENOMEM;
68 
69 	if (flags & MAP_FIXED) {
70 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
71 		if (TASK_SIZE - len < addr)
72 			return -EINVAL;
73 
74 		/*
75 		 * We do not accept a shared mapping if it would violate
76 		 * cache aliasing constraints.
77 		 */
78 		if ((flags & MAP_SHARED) &&
79 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
80 			return -EINVAL;
81 		return addr;
82 	}
83 
84 	do_color_align = 0;
85 	if (filp || (flags & MAP_SHARED))
86 		do_color_align = 1;
87 
88 	/* requesting a specific address */
89 	if (addr) {
90 		if (do_color_align)
91 			addr = COLOUR_ALIGN(addr, pgoff);
92 		else
93 			addr = PAGE_ALIGN(addr);
94 
95 		vma = find_vma(mm, addr);
96 		if (TASK_SIZE - len >= addr &&
97 		    (!vma || addr + len <= vm_start_gap(vma)))
98 			return addr;
99 	}
100 
101 	info.length = len;
102 	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
103 	info.align_offset = pgoff << PAGE_SHIFT;
104 
105 	if (dir == DOWN) {
106 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
107 		info.low_limit = PAGE_SIZE;
108 		info.high_limit = mm->mmap_base;
109 		addr = vm_unmapped_area(&info);
110 
111 		if (!(addr & ~PAGE_MASK))
112 			return addr;
113 
114 		/*
115 		 * A failed mmap() very likely causes application failure,
116 		 * so fall back to the bottom-up function here. This scenario
117 		 * can happen with large stack limits and large mmap()
118 		 * allocations.
119 		 */
120 	}
121 
122 	info.flags = 0;
123 	info.low_limit = mm->mmap_base;
124 	info.high_limit = TASK_SIZE;
125 	return vm_unmapped_area(&info);
126 }
127 
128 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
129 	unsigned long len, unsigned long pgoff, unsigned long flags)
130 {
131 	return arch_get_unmapped_area_common(filp,
132 			addr0, len, pgoff, flags, UP);
133 }
134 
135 /*
136  * There is no need to export this but sched.h declares the function as
137  * extern so making it static here results in an error.
138  */
139 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
140 	unsigned long addr0, unsigned long len, unsigned long pgoff,
141 	unsigned long flags)
142 {
143 	return arch_get_unmapped_area_common(filp,
144 			addr0, len, pgoff, flags, DOWN);
145 }
146 
147 unsigned long arch_mmap_rnd(void)
148 {
149 	unsigned long rnd;
150 
151 #ifdef CONFIG_COMPAT
152 	if (TASK_IS_32BIT_ADDR)
153 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
154 	else
155 #endif /* CONFIG_COMPAT */
156 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
157 
158 	return rnd << PAGE_SHIFT;
159 }
160 
161 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
162 {
163 	unsigned long random_factor = 0UL;
164 
165 	if (current->flags & PF_RANDOMIZE)
166 		random_factor = arch_mmap_rnd();
167 
168 	if (mmap_is_legacy(rlim_stack)) {
169 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
170 		mm->get_unmapped_area = arch_get_unmapped_area;
171 	} else {
172 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
173 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
174 	}
175 }
176 
177 static inline unsigned long brk_rnd(void)
178 {
179 	unsigned long rnd = get_random_long();
180 
181 	rnd = rnd << PAGE_SHIFT;
182 	/* 8MB for 32bit, 256MB for 64bit */
183 	if (TASK_IS_32BIT_ADDR)
184 		rnd = rnd & 0x7ffffful;
185 	else
186 		rnd = rnd & 0xffffffful;
187 
188 	return rnd;
189 }
190 
191 unsigned long arch_randomize_brk(struct mm_struct *mm)
192 {
193 	unsigned long base = mm->brk;
194 	unsigned long ret;
195 
196 	ret = PAGE_ALIGN(base + brk_rnd());
197 
198 	if (ret < mm->brk)
199 		return mm->brk;
200 
201 	return ret;
202 }
203 
204 int __virt_addr_valid(const volatile void *kaddr)
205 {
206 	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
207 }
208 EXPORT_SYMBOL_GPL(__virt_addr_valid);
209