xref: /openbmc/linux/mm/mprotect.c (revision 04c71976)
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code	<alan@redhat.com>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 
29 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
30 		unsigned long addr, unsigned long end, pgprot_t newprot,
31 		int dirty_accountable)
32 {
33 	pte_t *pte, oldpte;
34 	spinlock_t *ptl;
35 
36 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
37 	arch_enter_lazy_mmu_mode();
38 	do {
39 		oldpte = *pte;
40 		if (pte_present(oldpte)) {
41 			pte_t ptent;
42 
43 			/* Avoid an SMP race with hardware updated dirty/clean
44 			 * bits by wiping the pte and then setting the new pte
45 			 * into place.
46 			 */
47 			ptent = ptep_get_and_clear(mm, addr, pte);
48 			ptent = pte_modify(ptent, newprot);
49 			/*
50 			 * Avoid taking write faults for pages we know to be
51 			 * dirty.
52 			 */
53 			if (dirty_accountable && pte_dirty(ptent))
54 				ptent = pte_mkwrite(ptent);
55 			set_pte_at(mm, addr, pte, ptent);
56 #ifdef CONFIG_MIGRATION
57 		} else if (!pte_file(oldpte)) {
58 			swp_entry_t entry = pte_to_swp_entry(oldpte);
59 
60 			if (is_write_migration_entry(entry)) {
61 				/*
62 				 * A protection check is difficult so
63 				 * just be safe and disable write
64 				 */
65 				make_migration_entry_read(&entry);
66 				set_pte_at(mm, addr, pte,
67 					swp_entry_to_pte(entry));
68 			}
69 #endif
70 		}
71 
72 	} while (pte++, addr += PAGE_SIZE, addr != end);
73 	arch_leave_lazy_mmu_mode();
74 	pte_unmap_unlock(pte - 1, ptl);
75 }
76 
77 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
78 		unsigned long addr, unsigned long end, pgprot_t newprot,
79 		int dirty_accountable)
80 {
81 	pmd_t *pmd;
82 	unsigned long next;
83 
84 	pmd = pmd_offset(pud, addr);
85 	do {
86 		next = pmd_addr_end(addr, end);
87 		if (pmd_none_or_clear_bad(pmd))
88 			continue;
89 		change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
90 	} while (pmd++, addr = next, addr != end);
91 }
92 
93 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
94 		unsigned long addr, unsigned long end, pgprot_t newprot,
95 		int dirty_accountable)
96 {
97 	pud_t *pud;
98 	unsigned long next;
99 
100 	pud = pud_offset(pgd, addr);
101 	do {
102 		next = pud_addr_end(addr, end);
103 		if (pud_none_or_clear_bad(pud))
104 			continue;
105 		change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
106 	} while (pud++, addr = next, addr != end);
107 }
108 
109 static void change_protection(struct vm_area_struct *vma,
110 		unsigned long addr, unsigned long end, pgprot_t newprot,
111 		int dirty_accountable)
112 {
113 	struct mm_struct *mm = vma->vm_mm;
114 	pgd_t *pgd;
115 	unsigned long next;
116 	unsigned long start = addr;
117 
118 	BUG_ON(addr >= end);
119 	pgd = pgd_offset(mm, addr);
120 	flush_cache_range(vma, addr, end);
121 	do {
122 		next = pgd_addr_end(addr, end);
123 		if (pgd_none_or_clear_bad(pgd))
124 			continue;
125 		change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
126 	} while (pgd++, addr = next, addr != end);
127 	flush_tlb_range(vma, start, end);
128 }
129 
130 int
131 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
132 	unsigned long start, unsigned long end, unsigned long newflags)
133 {
134 	struct mm_struct *mm = vma->vm_mm;
135 	unsigned long oldflags = vma->vm_flags;
136 	long nrpages = (end - start) >> PAGE_SHIFT;
137 	unsigned long charged = 0;
138 	pgoff_t pgoff;
139 	int error;
140 	int dirty_accountable = 0;
141 
142 	if (newflags == oldflags) {
143 		*pprev = vma;
144 		return 0;
145 	}
146 
147 	/*
148 	 * If we make a private mapping writable we increase our commit;
149 	 * but (without finer accounting) cannot reduce our commit if we
150 	 * make it unwritable again.
151 	 *
152 	 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
153 	 * a MAP_NORESERVE private mapping to writable will now reserve.
154 	 */
155 	if (newflags & VM_WRITE) {
156 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
157 			charged = nrpages;
158 			if (security_vm_enough_memory(charged))
159 				return -ENOMEM;
160 			newflags |= VM_ACCOUNT;
161 		}
162 	}
163 
164 	/*
165 	 * First try to merge with previous and/or next vma.
166 	 */
167 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
168 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
169 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
170 	if (*pprev) {
171 		vma = *pprev;
172 		goto success;
173 	}
174 
175 	*pprev = vma;
176 
177 	if (start != vma->vm_start) {
178 		error = split_vma(mm, vma, start, 1);
179 		if (error)
180 			goto fail;
181 	}
182 
183 	if (end != vma->vm_end) {
184 		error = split_vma(mm, vma, end, 0);
185 		if (error)
186 			goto fail;
187 	}
188 
189 success:
190 	/*
191 	 * vm_flags and vm_page_prot are protected by the mmap_sem
192 	 * held in write mode.
193 	 */
194 	vma->vm_flags = newflags;
195 	vma->vm_page_prot = protection_map[newflags &
196 		(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
197 	if (vma_wants_writenotify(vma)) {
198 		vma->vm_page_prot = protection_map[newflags &
199 			(VM_READ|VM_WRITE|VM_EXEC)];
200 		dirty_accountable = 1;
201 	}
202 
203 	if (is_vm_hugetlb_page(vma))
204 		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
205 	else
206 		change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
207 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
208 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
209 	return 0;
210 
211 fail:
212 	vm_unacct_memory(charged);
213 	return error;
214 }
215 
216 asmlinkage long
217 sys_mprotect(unsigned long start, size_t len, unsigned long prot)
218 {
219 	unsigned long vm_flags, nstart, end, tmp, reqprot;
220 	struct vm_area_struct *vma, *prev;
221 	int error = -EINVAL;
222 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
223 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
224 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
225 		return -EINVAL;
226 
227 	if (start & ~PAGE_MASK)
228 		return -EINVAL;
229 	if (!len)
230 		return 0;
231 	len = PAGE_ALIGN(len);
232 	end = start + len;
233 	if (end <= start)
234 		return -ENOMEM;
235 	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
236 		return -EINVAL;
237 
238 	reqprot = prot;
239 	/*
240 	 * Does the application expect PROT_READ to imply PROT_EXEC:
241 	 */
242 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
243 		prot |= PROT_EXEC;
244 
245 	vm_flags = calc_vm_prot_bits(prot);
246 
247 	down_write(&current->mm->mmap_sem);
248 
249 	vma = find_vma_prev(current->mm, start, &prev);
250 	error = -ENOMEM;
251 	if (!vma)
252 		goto out;
253 	if (unlikely(grows & PROT_GROWSDOWN)) {
254 		if (vma->vm_start >= end)
255 			goto out;
256 		start = vma->vm_start;
257 		error = -EINVAL;
258 		if (!(vma->vm_flags & VM_GROWSDOWN))
259 			goto out;
260 	}
261 	else {
262 		if (vma->vm_start > start)
263 			goto out;
264 		if (unlikely(grows & PROT_GROWSUP)) {
265 			end = vma->vm_end;
266 			error = -EINVAL;
267 			if (!(vma->vm_flags & VM_GROWSUP))
268 				goto out;
269 		}
270 	}
271 	if (start > vma->vm_start)
272 		prev = vma;
273 
274 	for (nstart = start ; ; ) {
275 		unsigned long newflags;
276 
277 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
278 
279 		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
280 
281 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
282 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
283 			error = -EACCES;
284 			goto out;
285 		}
286 
287 		error = security_file_mprotect(vma, reqprot, prot);
288 		if (error)
289 			goto out;
290 
291 		tmp = vma->vm_end;
292 		if (tmp > end)
293 			tmp = end;
294 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
295 		if (error)
296 			goto out;
297 		nstart = tmp;
298 
299 		if (nstart < prev->vm_end)
300 			nstart = prev->vm_end;
301 		if (nstart >= end)
302 			goto out;
303 
304 		vma = prev->vm_next;
305 		if (!vma || vma->vm_start != nstart) {
306 			error = -ENOMEM;
307 			goto out;
308 		}
309 	}
310 out:
311 	up_write(&current->mm->mmap_sem);
312 	return error;
313 }
314