xref: /openbmc/linux/mm/mprotect.c (revision 7da4d641c58d201c3cc1835c05ca1a7fa26f0856)
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30 
31 #ifndef pgprot_modify
32 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33 {
34 	return newprot;
35 }
36 #endif
37 
38 static unsigned long change_pte_range(struct mm_struct *mm, pmd_t *pmd,
39 		unsigned long addr, unsigned long end, pgprot_t newprot,
40 		int dirty_accountable)
41 {
42 	pte_t *pte, oldpte;
43 	spinlock_t *ptl;
44 	unsigned long pages = 0;
45 
46 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
47 	arch_enter_lazy_mmu_mode();
48 	do {
49 		oldpte = *pte;
50 		if (pte_present(oldpte)) {
51 			pte_t ptent;
52 
53 			ptent = ptep_modify_prot_start(mm, addr, pte);
54 			ptent = pte_modify(ptent, newprot);
55 
56 			/*
57 			 * Avoid taking write faults for pages we know to be
58 			 * dirty.
59 			 */
60 			if (dirty_accountable && pte_dirty(ptent))
61 				ptent = pte_mkwrite(ptent);
62 
63 			ptep_modify_prot_commit(mm, addr, pte, ptent);
64 			pages++;
65 		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
66 			swp_entry_t entry = pte_to_swp_entry(oldpte);
67 
68 			if (is_write_migration_entry(entry)) {
69 				/*
70 				 * A protection check is difficult so
71 				 * just be safe and disable write
72 				 */
73 				make_migration_entry_read(&entry);
74 				set_pte_at(mm, addr, pte,
75 					swp_entry_to_pte(entry));
76 			}
77 			pages++;
78 		}
79 	} while (pte++, addr += PAGE_SIZE, addr != end);
80 	arch_leave_lazy_mmu_mode();
81 	pte_unmap_unlock(pte - 1, ptl);
82 
83 	return pages;
84 }
85 
86 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
87 		unsigned long addr, unsigned long end, pgprot_t newprot,
88 		int dirty_accountable)
89 {
90 	pmd_t *pmd;
91 	unsigned long next;
92 	unsigned long pages = 0;
93 
94 	pmd = pmd_offset(pud, addr);
95 	do {
96 		next = pmd_addr_end(addr, end);
97 		if (pmd_trans_huge(*pmd)) {
98 			if (next - addr != HPAGE_PMD_SIZE)
99 				split_huge_page_pmd(vma->vm_mm, pmd);
100 			else if (change_huge_pmd(vma, pmd, addr, newprot)) {
101 				pages += HPAGE_PMD_NR;
102 				continue;
103 			}
104 			/* fall through */
105 		}
106 		if (pmd_none_or_clear_bad(pmd))
107 			continue;
108 		pages += change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
109 				 dirty_accountable);
110 	} while (pmd++, addr = next, addr != end);
111 
112 	return pages;
113 }
114 
115 static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
116 		unsigned long addr, unsigned long end, pgprot_t newprot,
117 		int dirty_accountable)
118 {
119 	pud_t *pud;
120 	unsigned long next;
121 	unsigned long pages = 0;
122 
123 	pud = pud_offset(pgd, addr);
124 	do {
125 		next = pud_addr_end(addr, end);
126 		if (pud_none_or_clear_bad(pud))
127 			continue;
128 		pages += change_pmd_range(vma, pud, addr, next, newprot,
129 				 dirty_accountable);
130 	} while (pud++, addr = next, addr != end);
131 
132 	return pages;
133 }
134 
135 static unsigned long change_protection_range(struct vm_area_struct *vma,
136 		unsigned long addr, unsigned long end, pgprot_t newprot,
137 		int dirty_accountable)
138 {
139 	struct mm_struct *mm = vma->vm_mm;
140 	pgd_t *pgd;
141 	unsigned long next;
142 	unsigned long start = addr;
143 	unsigned long pages = 0;
144 
145 	BUG_ON(addr >= end);
146 	pgd = pgd_offset(mm, addr);
147 	flush_cache_range(vma, addr, end);
148 	do {
149 		next = pgd_addr_end(addr, end);
150 		if (pgd_none_or_clear_bad(pgd))
151 			continue;
152 		pages += change_pud_range(vma, pgd, addr, next, newprot,
153 				 dirty_accountable);
154 	} while (pgd++, addr = next, addr != end);
155 
156 	flush_tlb_range(vma, start, end);
157 
158 	return pages;
159 }
160 
161 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
162 		       unsigned long end, pgprot_t newprot,
163 		       int dirty_accountable)
164 {
165 	struct mm_struct *mm = vma->vm_mm;
166 	unsigned long pages;
167 
168 	mmu_notifier_invalidate_range_start(mm, start, end);
169 	if (is_vm_hugetlb_page(vma))
170 		pages = hugetlb_change_protection(vma, start, end, newprot);
171 	else
172 		pages = change_protection_range(vma, start, end, newprot, dirty_accountable);
173 	mmu_notifier_invalidate_range_end(mm, start, end);
174 
175 	return pages;
176 }
177 
178 int
179 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
180 	unsigned long start, unsigned long end, unsigned long newflags)
181 {
182 	struct mm_struct *mm = vma->vm_mm;
183 	unsigned long oldflags = vma->vm_flags;
184 	long nrpages = (end - start) >> PAGE_SHIFT;
185 	unsigned long charged = 0;
186 	pgoff_t pgoff;
187 	int error;
188 	int dirty_accountable = 0;
189 
190 	if (newflags == oldflags) {
191 		*pprev = vma;
192 		return 0;
193 	}
194 
195 	/*
196 	 * If we make a private mapping writable we increase our commit;
197 	 * but (without finer accounting) cannot reduce our commit if we
198 	 * make it unwritable again. hugetlb mapping were accounted for
199 	 * even if read-only so there is no need to account for them here
200 	 */
201 	if (newflags & VM_WRITE) {
202 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
203 						VM_SHARED|VM_NORESERVE))) {
204 			charged = nrpages;
205 			if (security_vm_enough_memory_mm(mm, charged))
206 				return -ENOMEM;
207 			newflags |= VM_ACCOUNT;
208 		}
209 	}
210 
211 	/*
212 	 * First try to merge with previous and/or next vma.
213 	 */
214 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
215 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
216 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
217 	if (*pprev) {
218 		vma = *pprev;
219 		goto success;
220 	}
221 
222 	*pprev = vma;
223 
224 	if (start != vma->vm_start) {
225 		error = split_vma(mm, vma, start, 1);
226 		if (error)
227 			goto fail;
228 	}
229 
230 	if (end != vma->vm_end) {
231 		error = split_vma(mm, vma, end, 0);
232 		if (error)
233 			goto fail;
234 	}
235 
236 success:
237 	/*
238 	 * vm_flags and vm_page_prot are protected by the mmap_sem
239 	 * held in write mode.
240 	 */
241 	vma->vm_flags = newflags;
242 	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
243 					  vm_get_page_prot(newflags));
244 
245 	if (vma_wants_writenotify(vma)) {
246 		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
247 		dirty_accountable = 1;
248 	}
249 
250 	change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
251 
252 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
253 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
254 	perf_event_mmap(vma);
255 	return 0;
256 
257 fail:
258 	vm_unacct_memory(charged);
259 	return error;
260 }
261 
262 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
263 		unsigned long, prot)
264 {
265 	unsigned long vm_flags, nstart, end, tmp, reqprot;
266 	struct vm_area_struct *vma, *prev;
267 	int error = -EINVAL;
268 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
269 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
270 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
271 		return -EINVAL;
272 
273 	if (start & ~PAGE_MASK)
274 		return -EINVAL;
275 	if (!len)
276 		return 0;
277 	len = PAGE_ALIGN(len);
278 	end = start + len;
279 	if (end <= start)
280 		return -ENOMEM;
281 	if (!arch_validate_prot(prot))
282 		return -EINVAL;
283 
284 	reqprot = prot;
285 	/*
286 	 * Does the application expect PROT_READ to imply PROT_EXEC:
287 	 */
288 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
289 		prot |= PROT_EXEC;
290 
291 	vm_flags = calc_vm_prot_bits(prot);
292 
293 	down_write(&current->mm->mmap_sem);
294 
295 	vma = find_vma(current->mm, start);
296 	error = -ENOMEM;
297 	if (!vma)
298 		goto out;
299 	prev = vma->vm_prev;
300 	if (unlikely(grows & PROT_GROWSDOWN)) {
301 		if (vma->vm_start >= end)
302 			goto out;
303 		start = vma->vm_start;
304 		error = -EINVAL;
305 		if (!(vma->vm_flags & VM_GROWSDOWN))
306 			goto out;
307 	}
308 	else {
309 		if (vma->vm_start > start)
310 			goto out;
311 		if (unlikely(grows & PROT_GROWSUP)) {
312 			end = vma->vm_end;
313 			error = -EINVAL;
314 			if (!(vma->vm_flags & VM_GROWSUP))
315 				goto out;
316 		}
317 	}
318 	if (start > vma->vm_start)
319 		prev = vma;
320 
321 	for (nstart = start ; ; ) {
322 		unsigned long newflags;
323 
324 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
325 
326 		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
327 
328 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
329 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
330 			error = -EACCES;
331 			goto out;
332 		}
333 
334 		error = security_file_mprotect(vma, reqprot, prot);
335 		if (error)
336 			goto out;
337 
338 		tmp = vma->vm_end;
339 		if (tmp > end)
340 			tmp = end;
341 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
342 		if (error)
343 			goto out;
344 		nstart = tmp;
345 
346 		if (nstart < prev->vm_end)
347 			nstart = prev->vm_end;
348 		if (nstart >= end)
349 			goto out;
350 
351 		vma = prev->vm_next;
352 		if (!vma || vma->vm_start != nstart) {
353 			error = -ENOMEM;
354 			goto out;
355 		}
356 	}
357 out:
358 	up_write(&current->mm->mmap_sem);
359 	return error;
360 }
361