xref: /openbmc/linux/arch/arm/mm/mmap.c (revision 171fa692)
1 /*
2  *  linux/arch/arm/mm/mmap.c
3  */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/mm.h>
10 #include <linux/io.h>
11 #include <linux/personality.h>
12 #include <linux/random.h>
13 #include <asm/cachetype.h>
14 
15 #define COLOUR_ALIGN(addr,pgoff)		\
16 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
17 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
18 
19 /* gap between mmap and stack */
20 #define MIN_GAP (128*1024*1024UL)
21 #define MAX_GAP ((TASK_SIZE)/6*5)
22 
23 static int mmap_is_legacy(void)
24 {
25 	if (current->personality & ADDR_COMPAT_LAYOUT)
26 		return 1;
27 
28 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
29 		return 1;
30 
31 	return sysctl_legacy_va_layout;
32 }
33 
34 static unsigned long mmap_base(unsigned long rnd)
35 {
36 	unsigned long gap = rlimit(RLIMIT_STACK);
37 
38 	if (gap < MIN_GAP)
39 		gap = MIN_GAP;
40 	else if (gap > MAX_GAP)
41 		gap = MAX_GAP;
42 
43 	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
44 }
45 
46 /*
47  * We need to ensure that shared mappings are correctly aligned to
48  * avoid aliasing issues with VIPT caches.  We need to ensure that
49  * a specific page of an object is always mapped at a multiple of
50  * SHMLBA bytes.
51  *
52  * We unconditionally provide this function for all cases, however
53  * in the VIVT case, we optimise out the alignment rules.
54  */
55 unsigned long
56 arch_get_unmapped_area(struct file *filp, unsigned long addr,
57 		unsigned long len, unsigned long pgoff, unsigned long flags)
58 {
59 	struct mm_struct *mm = current->mm;
60 	struct vm_area_struct *vma;
61 	int do_align = 0;
62 	int aliasing = cache_is_vipt_aliasing();
63 	struct vm_unmapped_area_info info;
64 
65 	/*
66 	 * We only need to do colour alignment if either the I or D
67 	 * caches alias.
68 	 */
69 	if (aliasing)
70 		do_align = filp || (flags & MAP_SHARED);
71 
72 	/*
73 	 * We enforce the MAP_FIXED case.
74 	 */
75 	if (flags & MAP_FIXED) {
76 		if (aliasing && flags & MAP_SHARED &&
77 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
78 			return -EINVAL;
79 		return addr;
80 	}
81 
82 	if (len > TASK_SIZE)
83 		return -ENOMEM;
84 
85 	if (addr) {
86 		if (do_align)
87 			addr = COLOUR_ALIGN(addr, pgoff);
88 		else
89 			addr = PAGE_ALIGN(addr);
90 
91 		vma = find_vma(mm, addr);
92 		if (TASK_SIZE - len >= addr &&
93 		    (!vma || addr + len <= vma->vm_start))
94 			return addr;
95 	}
96 
97 	info.flags = 0;
98 	info.length = len;
99 	info.low_limit = mm->mmap_base;
100 	info.high_limit = TASK_SIZE;
101 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
102 	info.align_offset = pgoff << PAGE_SHIFT;
103 	return vm_unmapped_area(&info);
104 }
105 
106 unsigned long
107 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
108 			const unsigned long len, const unsigned long pgoff,
109 			const unsigned long flags)
110 {
111 	struct vm_area_struct *vma;
112 	struct mm_struct *mm = current->mm;
113 	unsigned long addr = addr0;
114 	int do_align = 0;
115 	int aliasing = cache_is_vipt_aliasing();
116 	struct vm_unmapped_area_info info;
117 
118 	/*
119 	 * We only need to do colour alignment if either the I or D
120 	 * caches alias.
121 	 */
122 	if (aliasing)
123 		do_align = filp || (flags & MAP_SHARED);
124 
125 	/* requested length too big for entire address space */
126 	if (len > TASK_SIZE)
127 		return -ENOMEM;
128 
129 	if (flags & MAP_FIXED) {
130 		if (aliasing && flags & MAP_SHARED &&
131 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
132 			return -EINVAL;
133 		return addr;
134 	}
135 
136 	/* requesting a specific address */
137 	if (addr) {
138 		if (do_align)
139 			addr = COLOUR_ALIGN(addr, pgoff);
140 		else
141 			addr = PAGE_ALIGN(addr);
142 		vma = find_vma(mm, addr);
143 		if (TASK_SIZE - len >= addr &&
144 				(!vma || addr + len <= vma->vm_start))
145 			return addr;
146 	}
147 
148 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
149 	info.length = len;
150 	info.low_limit = FIRST_USER_ADDRESS;
151 	info.high_limit = mm->mmap_base;
152 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
153 	info.align_offset = pgoff << PAGE_SHIFT;
154 	addr = vm_unmapped_area(&info);
155 
156 	/*
157 	 * A failed mmap() very likely causes application failure,
158 	 * so fall back to the bottom-up function here. This scenario
159 	 * can happen with large stack limits and large mmap()
160 	 * allocations.
161 	 */
162 	if (addr & ~PAGE_MASK) {
163 		VM_BUG_ON(addr != -ENOMEM);
164 		info.flags = 0;
165 		info.low_limit = mm->mmap_base;
166 		info.high_limit = TASK_SIZE;
167 		addr = vm_unmapped_area(&info);
168 	}
169 
170 	return addr;
171 }
172 
173 unsigned long arch_mmap_rnd(void)
174 {
175 	unsigned long rnd;
176 
177 	rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
178 
179 	return rnd << PAGE_SHIFT;
180 }
181 
182 void arch_pick_mmap_layout(struct mm_struct *mm)
183 {
184 	unsigned long random_factor = 0UL;
185 
186 	if (current->flags & PF_RANDOMIZE)
187 		random_factor = arch_mmap_rnd();
188 
189 	if (mmap_is_legacy()) {
190 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
191 		mm->get_unmapped_area = arch_get_unmapped_area;
192 	} else {
193 		mm->mmap_base = mmap_base(random_factor);
194 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
195 	}
196 }
197 
198 /*
199  * You really shouldn't be using read() or write() on /dev/mem.  This
200  * might go away in the future.
201  */
202 int valid_phys_addr_range(phys_addr_t addr, size_t size)
203 {
204 	if (addr < PHYS_OFFSET)
205 		return 0;
206 	if (addr + size > __pa(high_memory - 1) + 1)
207 		return 0;
208 
209 	return 1;
210 }
211 
212 /*
213  * Do not allow /dev/mem mappings beyond the supported physical range.
214  */
215 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
216 {
217 	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
218 }
219 
220 #ifdef CONFIG_STRICT_DEVMEM
221 
222 #include <linux/ioport.h>
223 
224 /*
225  * devmem_is_allowed() checks to see if /dev/mem access to a certain
226  * address is valid. The argument is a physical page number.
227  * We mimic x86 here by disallowing access to system RAM as well as
228  * device-exclusive MMIO regions. This effectively disable read()/write()
229  * on /dev/mem.
230  */
231 int devmem_is_allowed(unsigned long pfn)
232 {
233 	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
234 		return 0;
235 	if (!page_is_ram(pfn))
236 		return 1;
237 	return 0;
238 }
239 
240 #endif
241