xref: /openbmc/linux/mm/mprotect.c (revision 8b036556)
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/fs.h>
16 #include <linux/highmem.h>
17 #include <linux/security.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
20 #include <linux/syscalls.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/migrate.h>
25 #include <linux/perf_event.h>
26 #include <linux/ksm.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgtable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 
32 /*
33  * For a prot_numa update we only hold mmap_sem for read so there is a
34  * potential race with faulting where a pmd was temporarily none. This
35  * function checks for a transhuge pmd under the appropriate lock. It
36  * returns a pte if it was successfully locked or NULL if it raced with
37  * a transhuge insertion.
38  */
39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
40 			unsigned long addr, int prot_numa, spinlock_t **ptl)
41 {
42 	pte_t *pte;
43 	spinlock_t *pmdl;
44 
45 	/* !prot_numa is protected by mmap_sem held for write */
46 	if (!prot_numa)
47 		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
48 
49 	pmdl = pmd_lock(vma->vm_mm, pmd);
50 	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
51 		spin_unlock(pmdl);
52 		return NULL;
53 	}
54 
55 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
56 	spin_unlock(pmdl);
57 	return pte;
58 }
59 
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
61 		unsigned long addr, unsigned long end, pgprot_t newprot,
62 		int dirty_accountable, int prot_numa)
63 {
64 	struct mm_struct *mm = vma->vm_mm;
65 	pte_t *pte, oldpte;
66 	spinlock_t *ptl;
67 	unsigned long pages = 0;
68 
69 	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
70 	if (!pte)
71 		return 0;
72 
73 	arch_enter_lazy_mmu_mode();
74 	do {
75 		oldpte = *pte;
76 		if (pte_present(oldpte)) {
77 			pte_t ptent;
78 
79 			/*
80 			 * Avoid trapping faults against the zero or KSM
81 			 * pages. See similar comment in change_huge_pmd.
82 			 */
83 			if (prot_numa) {
84 				struct page *page;
85 
86 				page = vm_normal_page(vma, addr, oldpte);
87 				if (!page || PageKsm(page))
88 					continue;
89 
90 				/* Avoid TLB flush if possible */
91 				if (pte_protnone(oldpte))
92 					continue;
93 			}
94 
95 			ptent = ptep_modify_prot_start(mm, addr, pte);
96 			ptent = pte_modify(ptent, newprot);
97 
98 			/* Avoid taking write faults for known dirty pages */
99 			if (dirty_accountable && pte_dirty(ptent) &&
100 					(pte_soft_dirty(ptent) ||
101 					 !(vma->vm_flags & VM_SOFTDIRTY))) {
102 				ptent = pte_mkwrite(ptent);
103 			}
104 			ptep_modify_prot_commit(mm, addr, pte, ptent);
105 			pages++;
106 		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
107 			swp_entry_t entry = pte_to_swp_entry(oldpte);
108 
109 			if (is_write_migration_entry(entry)) {
110 				pte_t newpte;
111 				/*
112 				 * A protection check is difficult so
113 				 * just be safe and disable write
114 				 */
115 				make_migration_entry_read(&entry);
116 				newpte = swp_entry_to_pte(entry);
117 				if (pte_swp_soft_dirty(oldpte))
118 					newpte = pte_swp_mksoft_dirty(newpte);
119 				set_pte_at(mm, addr, pte, newpte);
120 
121 				pages++;
122 			}
123 		}
124 	} while (pte++, addr += PAGE_SIZE, addr != end);
125 	arch_leave_lazy_mmu_mode();
126 	pte_unmap_unlock(pte - 1, ptl);
127 
128 	return pages;
129 }
130 
131 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
132 		pud_t *pud, unsigned long addr, unsigned long end,
133 		pgprot_t newprot, int dirty_accountable, int prot_numa)
134 {
135 	pmd_t *pmd;
136 	struct mm_struct *mm = vma->vm_mm;
137 	unsigned long next;
138 	unsigned long pages = 0;
139 	unsigned long nr_huge_updates = 0;
140 	unsigned long mni_start = 0;
141 
142 	pmd = pmd_offset(pud, addr);
143 	do {
144 		unsigned long this_pages;
145 
146 		next = pmd_addr_end(addr, end);
147 		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
148 			continue;
149 
150 		/* invoke the mmu notifier if the pmd is populated */
151 		if (!mni_start) {
152 			mni_start = addr;
153 			mmu_notifier_invalidate_range_start(mm, mni_start, end);
154 		}
155 
156 		if (pmd_trans_huge(*pmd)) {
157 			if (next - addr != HPAGE_PMD_SIZE)
158 				split_huge_page_pmd(vma, addr, pmd);
159 			else {
160 				int nr_ptes = change_huge_pmd(vma, pmd, addr,
161 						newprot, prot_numa);
162 
163 				if (nr_ptes) {
164 					if (nr_ptes == HPAGE_PMD_NR) {
165 						pages += HPAGE_PMD_NR;
166 						nr_huge_updates++;
167 					}
168 
169 					/* huge pmd was handled */
170 					continue;
171 				}
172 			}
173 			/* fall through, the trans huge pmd just split */
174 		}
175 		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
176 				 dirty_accountable, prot_numa);
177 		pages += this_pages;
178 	} while (pmd++, addr = next, addr != end);
179 
180 	if (mni_start)
181 		mmu_notifier_invalidate_range_end(mm, mni_start, end);
182 
183 	if (nr_huge_updates)
184 		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
185 	return pages;
186 }
187 
188 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
189 		pgd_t *pgd, unsigned long addr, unsigned long end,
190 		pgprot_t newprot, int dirty_accountable, int prot_numa)
191 {
192 	pud_t *pud;
193 	unsigned long next;
194 	unsigned long pages = 0;
195 
196 	pud = pud_offset(pgd, addr);
197 	do {
198 		next = pud_addr_end(addr, end);
199 		if (pud_none_or_clear_bad(pud))
200 			continue;
201 		pages += change_pmd_range(vma, pud, addr, next, newprot,
202 				 dirty_accountable, prot_numa);
203 	} while (pud++, addr = next, addr != end);
204 
205 	return pages;
206 }
207 
208 static unsigned long change_protection_range(struct vm_area_struct *vma,
209 		unsigned long addr, unsigned long end, pgprot_t newprot,
210 		int dirty_accountable, int prot_numa)
211 {
212 	struct mm_struct *mm = vma->vm_mm;
213 	pgd_t *pgd;
214 	unsigned long next;
215 	unsigned long start = addr;
216 	unsigned long pages = 0;
217 
218 	BUG_ON(addr >= end);
219 	pgd = pgd_offset(mm, addr);
220 	flush_cache_range(vma, addr, end);
221 	set_tlb_flush_pending(mm);
222 	do {
223 		next = pgd_addr_end(addr, end);
224 		if (pgd_none_or_clear_bad(pgd))
225 			continue;
226 		pages += change_pud_range(vma, pgd, addr, next, newprot,
227 				 dirty_accountable, prot_numa);
228 	} while (pgd++, addr = next, addr != end);
229 
230 	/* Only flush the TLB if we actually modified any entries: */
231 	if (pages)
232 		flush_tlb_range(vma, start, end);
233 	clear_tlb_flush_pending(mm);
234 
235 	return pages;
236 }
237 
238 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
239 		       unsigned long end, pgprot_t newprot,
240 		       int dirty_accountable, int prot_numa)
241 {
242 	unsigned long pages;
243 
244 	if (is_vm_hugetlb_page(vma))
245 		pages = hugetlb_change_protection(vma, start, end, newprot);
246 	else
247 		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
248 
249 	return pages;
250 }
251 
252 int
253 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
254 	unsigned long start, unsigned long end, unsigned long newflags)
255 {
256 	struct mm_struct *mm = vma->vm_mm;
257 	unsigned long oldflags = vma->vm_flags;
258 	long nrpages = (end - start) >> PAGE_SHIFT;
259 	unsigned long charged = 0;
260 	pgoff_t pgoff;
261 	int error;
262 	int dirty_accountable = 0;
263 
264 	if (newflags == oldflags) {
265 		*pprev = vma;
266 		return 0;
267 	}
268 
269 	/*
270 	 * If we make a private mapping writable we increase our commit;
271 	 * but (without finer accounting) cannot reduce our commit if we
272 	 * make it unwritable again. hugetlb mapping were accounted for
273 	 * even if read-only so there is no need to account for them here
274 	 */
275 	if (newflags & VM_WRITE) {
276 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
277 						VM_SHARED|VM_NORESERVE))) {
278 			charged = nrpages;
279 			if (security_vm_enough_memory_mm(mm, charged))
280 				return -ENOMEM;
281 			newflags |= VM_ACCOUNT;
282 		}
283 	}
284 
285 	/*
286 	 * First try to merge with previous and/or next vma.
287 	 */
288 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
289 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
290 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
291 	if (*pprev) {
292 		vma = *pprev;
293 		goto success;
294 	}
295 
296 	*pprev = vma;
297 
298 	if (start != vma->vm_start) {
299 		error = split_vma(mm, vma, start, 1);
300 		if (error)
301 			goto fail;
302 	}
303 
304 	if (end != vma->vm_end) {
305 		error = split_vma(mm, vma, end, 0);
306 		if (error)
307 			goto fail;
308 	}
309 
310 success:
311 	/*
312 	 * vm_flags and vm_page_prot are protected by the mmap_sem
313 	 * held in write mode.
314 	 */
315 	vma->vm_flags = newflags;
316 	dirty_accountable = vma_wants_writenotify(vma);
317 	vma_set_page_prot(vma);
318 
319 	change_protection(vma, start, end, vma->vm_page_prot,
320 			  dirty_accountable, 0);
321 
322 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
323 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
324 	perf_event_mmap(vma);
325 	return 0;
326 
327 fail:
328 	vm_unacct_memory(charged);
329 	return error;
330 }
331 
332 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
333 		unsigned long, prot)
334 {
335 	unsigned long vm_flags, nstart, end, tmp, reqprot;
336 	struct vm_area_struct *vma, *prev;
337 	int error = -EINVAL;
338 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
339 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
340 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
341 		return -EINVAL;
342 
343 	if (start & ~PAGE_MASK)
344 		return -EINVAL;
345 	if (!len)
346 		return 0;
347 	len = PAGE_ALIGN(len);
348 	end = start + len;
349 	if (end <= start)
350 		return -ENOMEM;
351 	if (!arch_validate_prot(prot))
352 		return -EINVAL;
353 
354 	reqprot = prot;
355 	/*
356 	 * Does the application expect PROT_READ to imply PROT_EXEC:
357 	 */
358 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
359 		prot |= PROT_EXEC;
360 
361 	vm_flags = calc_vm_prot_bits(prot);
362 
363 	down_write(&current->mm->mmap_sem);
364 
365 	vma = find_vma(current->mm, start);
366 	error = -ENOMEM;
367 	if (!vma)
368 		goto out;
369 	prev = vma->vm_prev;
370 	if (unlikely(grows & PROT_GROWSDOWN)) {
371 		if (vma->vm_start >= end)
372 			goto out;
373 		start = vma->vm_start;
374 		error = -EINVAL;
375 		if (!(vma->vm_flags & VM_GROWSDOWN))
376 			goto out;
377 	} else {
378 		if (vma->vm_start > start)
379 			goto out;
380 		if (unlikely(grows & PROT_GROWSUP)) {
381 			end = vma->vm_end;
382 			error = -EINVAL;
383 			if (!(vma->vm_flags & VM_GROWSUP))
384 				goto out;
385 		}
386 	}
387 	if (start > vma->vm_start)
388 		prev = vma;
389 
390 	for (nstart = start ; ; ) {
391 		unsigned long newflags;
392 
393 		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
394 
395 		newflags = vm_flags;
396 		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
397 
398 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
399 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
400 			error = -EACCES;
401 			goto out;
402 		}
403 
404 		error = security_file_mprotect(vma, reqprot, prot);
405 		if (error)
406 			goto out;
407 
408 		tmp = vma->vm_end;
409 		if (tmp > end)
410 			tmp = end;
411 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
412 		if (error)
413 			goto out;
414 		nstart = tmp;
415 
416 		if (nstart < prev->vm_end)
417 			nstart = prev->vm_end;
418 		if (nstart >= end)
419 			goto out;
420 
421 		vma = prev->vm_next;
422 		if (!vma || vma->vm_start != nstart) {
423 			error = -ENOMEM;
424 			goto out;
425 		}
426 	}
427 out:
428 	up_write(&current->mm->mmap_sem);
429 	return error;
430 }
431