xref: /openbmc/linux/mm/mprotect.c (revision 1ad9f620c3a22fa800489455ce517c29e576934e)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  mm/mprotect.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  (C) Copyright 1994 Linus Torvalds
51da177e4SLinus Torvalds  *  (C) Copyright 2002 Christoph Hellwig
61da177e4SLinus Torvalds  *
7046c6884SAlan Cox  *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
81da177e4SLinus Torvalds  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds #include <linux/mm.h>
121da177e4SLinus Torvalds #include <linux/hugetlb.h>
131da177e4SLinus Torvalds #include <linux/shm.h>
141da177e4SLinus Torvalds #include <linux/mman.h>
151da177e4SLinus Torvalds #include <linux/fs.h>
161da177e4SLinus Torvalds #include <linux/highmem.h>
171da177e4SLinus Torvalds #include <linux/security.h>
181da177e4SLinus Torvalds #include <linux/mempolicy.h>
191da177e4SLinus Torvalds #include <linux/personality.h>
201da177e4SLinus Torvalds #include <linux/syscalls.h>
210697212aSChristoph Lameter #include <linux/swap.h>
220697212aSChristoph Lameter #include <linux/swapops.h>
23cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
2464cdd548SKOSAKI Motohiro #include <linux/migrate.h>
25cdd6c482SIngo Molnar #include <linux/perf_event.h>
2664a9a34eSMel Gorman #include <linux/ksm.h>
271da177e4SLinus Torvalds #include <asm/uaccess.h>
281da177e4SLinus Torvalds #include <asm/pgtable.h>
291da177e4SLinus Torvalds #include <asm/cacheflush.h>
301da177e4SLinus Torvalds #include <asm/tlbflush.h>
311da177e4SLinus Torvalds 
321c12c4cfSVenki Pallipadi #ifndef pgprot_modify
331c12c4cfSVenki Pallipadi static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
341c12c4cfSVenki Pallipadi {
351c12c4cfSVenki Pallipadi 	return newprot;
361c12c4cfSVenki Pallipadi }
371c12c4cfSVenki Pallipadi #endif
381c12c4cfSVenki Pallipadi 
39*1ad9f620SMel Gorman /*
40*1ad9f620SMel Gorman  * For a prot_numa update we only hold mmap_sem for read so there is a
41*1ad9f620SMel Gorman  * potential race with faulting where a pmd was temporarily none. This
42*1ad9f620SMel Gorman  * function checks for a transhuge pmd under the appropriate lock. It
43*1ad9f620SMel Gorman  * returns a pte if it was successfully locked or NULL if it raced with
44*1ad9f620SMel Gorman  * a transhuge insertion.
45*1ad9f620SMel Gorman  */
46*1ad9f620SMel Gorman static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
47*1ad9f620SMel Gorman 			unsigned long addr, int prot_numa, spinlock_t **ptl)
48*1ad9f620SMel Gorman {
49*1ad9f620SMel Gorman 	pte_t *pte;
50*1ad9f620SMel Gorman 	spinlock_t *pmdl;
51*1ad9f620SMel Gorman 
52*1ad9f620SMel Gorman 	/* !prot_numa is protected by mmap_sem held for write */
53*1ad9f620SMel Gorman 	if (!prot_numa)
54*1ad9f620SMel Gorman 		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
55*1ad9f620SMel Gorman 
56*1ad9f620SMel Gorman 	pmdl = pmd_lock(vma->vm_mm, pmd);
57*1ad9f620SMel Gorman 	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
58*1ad9f620SMel Gorman 		spin_unlock(pmdl);
59*1ad9f620SMel Gorman 		return NULL;
60*1ad9f620SMel Gorman 	}
61*1ad9f620SMel Gorman 
62*1ad9f620SMel Gorman 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
63*1ad9f620SMel Gorman 	spin_unlock(pmdl);
64*1ad9f620SMel Gorman 	return pte;
65*1ad9f620SMel Gorman }
66*1ad9f620SMel Gorman 
674b10e7d5SMel Gorman static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
68c1e6098bSPeter Zijlstra 		unsigned long addr, unsigned long end, pgprot_t newprot,
690f19c179SMel Gorman 		int dirty_accountable, int prot_numa)
701da177e4SLinus Torvalds {
714b10e7d5SMel Gorman 	struct mm_struct *mm = vma->vm_mm;
720697212aSChristoph Lameter 	pte_t *pte, oldpte;
73705e87c0SHugh Dickins 	spinlock_t *ptl;
747da4d641SPeter Zijlstra 	unsigned long pages = 0;
751da177e4SLinus Torvalds 
76*1ad9f620SMel Gorman 	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
77*1ad9f620SMel Gorman 	if (!pte)
78*1ad9f620SMel Gorman 		return 0;
79*1ad9f620SMel Gorman 
806606c3e0SZachary Amsden 	arch_enter_lazy_mmu_mode();
811da177e4SLinus Torvalds 	do {
820697212aSChristoph Lameter 		oldpte = *pte;
830697212aSChristoph Lameter 		if (pte_present(oldpte)) {
841da177e4SLinus Torvalds 			pte_t ptent;
854b10e7d5SMel Gorman 			bool updated = false;
861da177e4SLinus Torvalds 
874b10e7d5SMel Gorman 			if (!prot_numa) {
880c5f83c2SMel Gorman 				ptent = ptep_modify_prot_start(mm, addr, pte);
891667918bSMel Gorman 				if (pte_numa(ptent))
901667918bSMel Gorman 					ptent = pte_mknonnuma(ptent);
91c1e6098bSPeter Zijlstra 				ptent = pte_modify(ptent, newprot);
929d85d586SAneesh Kumar K.V 				/*
939d85d586SAneesh Kumar K.V 				 * Avoid taking write faults for pages we
949d85d586SAneesh Kumar K.V 				 * know to be dirty.
959d85d586SAneesh Kumar K.V 				 */
969d85d586SAneesh Kumar K.V 				if (dirty_accountable && pte_dirty(ptent))
979d85d586SAneesh Kumar K.V 					ptent = pte_mkwrite(ptent);
989d85d586SAneesh Kumar K.V 				ptep_modify_prot_commit(mm, addr, pte, ptent);
994b10e7d5SMel Gorman 				updated = true;
1004b10e7d5SMel Gorman 			} else {
1014b10e7d5SMel Gorman 				struct page *page;
1024b10e7d5SMel Gorman 
1034b10e7d5SMel Gorman 				page = vm_normal_page(vma, addr, oldpte);
10464a9a34eSMel Gorman 				if (page && !PageKsm(page)) {
1051bc115d8SMel Gorman 					if (!pte_numa(oldpte)) {
10656eecdb9SAneesh Kumar K.V 						ptep_set_numa(mm, addr, pte);
1074b10e7d5SMel Gorman 						updated = true;
1084b10e7d5SMel Gorman 					}
1094b10e7d5SMel Gorman 				}
1104b10e7d5SMel Gorman 			}
1114b10e7d5SMel Gorman 			if (updated)
1124b10e7d5SMel Gorman 				pages++;
113ce1744f4SKonstantin Khlebnikov 		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
1140697212aSChristoph Lameter 			swp_entry_t entry = pte_to_swp_entry(oldpte);
1150697212aSChristoph Lameter 
1160697212aSChristoph Lameter 			if (is_write_migration_entry(entry)) {
117c3d16e16SCyrill Gorcunov 				pte_t newpte;
1180697212aSChristoph Lameter 				/*
1190697212aSChristoph Lameter 				 * A protection check is difficult so
1200697212aSChristoph Lameter 				 * just be safe and disable write
1210697212aSChristoph Lameter 				 */
1220697212aSChristoph Lameter 				make_migration_entry_read(&entry);
123c3d16e16SCyrill Gorcunov 				newpte = swp_entry_to_pte(entry);
124c3d16e16SCyrill Gorcunov 				if (pte_swp_soft_dirty(oldpte))
125c3d16e16SCyrill Gorcunov 					newpte = pte_swp_mksoft_dirty(newpte);
126c3d16e16SCyrill Gorcunov 				set_pte_at(mm, addr, pte, newpte);
127e920e14cSMel Gorman 
1287da4d641SPeter Zijlstra 				pages++;
1290697212aSChristoph Lameter 			}
130e920e14cSMel Gorman 		}
1311da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
1326606c3e0SZachary Amsden 	arch_leave_lazy_mmu_mode();
133705e87c0SHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
1347da4d641SPeter Zijlstra 
1357da4d641SPeter Zijlstra 	return pages;
1361da177e4SLinus Torvalds }
1371da177e4SLinus Torvalds 
1387d12efaeSAndrew Morton static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1397d12efaeSAndrew Morton 		pud_t *pud, unsigned long addr, unsigned long end,
1407d12efaeSAndrew Morton 		pgprot_t newprot, int dirty_accountable, int prot_numa)
1411da177e4SLinus Torvalds {
1421da177e4SLinus Torvalds 	pmd_t *pmd;
1431da177e4SLinus Torvalds 	unsigned long next;
1447da4d641SPeter Zijlstra 	unsigned long pages = 0;
14572403b4aSMel Gorman 	unsigned long nr_huge_updates = 0;
1461da177e4SLinus Torvalds 
1471da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
1481da177e4SLinus Torvalds 	do {
14925cbbef1SMel Gorman 		unsigned long this_pages;
15025cbbef1SMel Gorman 
1511da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
15288a9ab6eSRik van Riel 		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
15388a9ab6eSRik van Riel 			continue;
154cd7548abSJohannes Weiner 		if (pmd_trans_huge(*pmd)) {
155cd7548abSJohannes Weiner 			if (next - addr != HPAGE_PMD_SIZE)
156e180377fSKirill A. Shutemov 				split_huge_page_pmd(vma, addr, pmd);
157f123d74aSMel Gorman 			else {
158f123d74aSMel Gorman 				int nr_ptes = change_huge_pmd(vma, pmd, addr,
159f123d74aSMel Gorman 						newprot, prot_numa);
160f123d74aSMel Gorman 
161f123d74aSMel Gorman 				if (nr_ptes) {
16272403b4aSMel Gorman 					if (nr_ptes == HPAGE_PMD_NR) {
16372403b4aSMel Gorman 						pages += HPAGE_PMD_NR;
16472403b4aSMel Gorman 						nr_huge_updates++;
16572403b4aSMel Gorman 					}
166*1ad9f620SMel Gorman 
167*1ad9f620SMel Gorman 					/* huge pmd was handled */
168cd7548abSJohannes Weiner 					continue;
1697da4d641SPeter Zijlstra 				}
170f123d74aSMel Gorman 			}
17188a9ab6eSRik van Riel 			/* fall through, the trans huge pmd just split */
172cd7548abSJohannes Weiner 		}
17325cbbef1SMel Gorman 		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
1740f19c179SMel Gorman 				 dirty_accountable, prot_numa);
17525cbbef1SMel Gorman 		pages += this_pages;
1761da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
1777da4d641SPeter Zijlstra 
17872403b4aSMel Gorman 	if (nr_huge_updates)
17972403b4aSMel Gorman 		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
1807da4d641SPeter Zijlstra 	return pages;
1811da177e4SLinus Torvalds }
1821da177e4SLinus Torvalds 
1837d12efaeSAndrew Morton static inline unsigned long change_pud_range(struct vm_area_struct *vma,
1847d12efaeSAndrew Morton 		pgd_t *pgd, unsigned long addr, unsigned long end,
1857d12efaeSAndrew Morton 		pgprot_t newprot, int dirty_accountable, int prot_numa)
1861da177e4SLinus Torvalds {
1871da177e4SLinus Torvalds 	pud_t *pud;
1881da177e4SLinus Torvalds 	unsigned long next;
1897da4d641SPeter Zijlstra 	unsigned long pages = 0;
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds 	pud = pud_offset(pgd, addr);
1921da177e4SLinus Torvalds 	do {
1931da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
1941da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
1951da177e4SLinus Torvalds 			continue;
1967da4d641SPeter Zijlstra 		pages += change_pmd_range(vma, pud, addr, next, newprot,
1974b10e7d5SMel Gorman 				 dirty_accountable, prot_numa);
1981da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
1997da4d641SPeter Zijlstra 
2007da4d641SPeter Zijlstra 	return pages;
2011da177e4SLinus Torvalds }
2021da177e4SLinus Torvalds 
2037da4d641SPeter Zijlstra static unsigned long change_protection_range(struct vm_area_struct *vma,
204c1e6098bSPeter Zijlstra 		unsigned long addr, unsigned long end, pgprot_t newprot,
2054b10e7d5SMel Gorman 		int dirty_accountable, int prot_numa)
2061da177e4SLinus Torvalds {
2071da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
2081da177e4SLinus Torvalds 	pgd_t *pgd;
2091da177e4SLinus Torvalds 	unsigned long next;
2101da177e4SLinus Torvalds 	unsigned long start = addr;
2117da4d641SPeter Zijlstra 	unsigned long pages = 0;
2121da177e4SLinus Torvalds 
2131da177e4SLinus Torvalds 	BUG_ON(addr >= end);
2141da177e4SLinus Torvalds 	pgd = pgd_offset(mm, addr);
2151da177e4SLinus Torvalds 	flush_cache_range(vma, addr, end);
21620841405SRik van Riel 	set_tlb_flush_pending(mm);
2171da177e4SLinus Torvalds 	do {
2181da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
2191da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
2201da177e4SLinus Torvalds 			continue;
2217da4d641SPeter Zijlstra 		pages += change_pud_range(vma, pgd, addr, next, newprot,
2224b10e7d5SMel Gorman 				 dirty_accountable, prot_numa);
2231da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
2247da4d641SPeter Zijlstra 
2251233d588SIngo Molnar 	/* Only flush the TLB if we actually modified any entries: */
2261233d588SIngo Molnar 	if (pages)
2271da177e4SLinus Torvalds 		flush_tlb_range(vma, start, end);
22820841405SRik van Riel 	clear_tlb_flush_pending(mm);
2297da4d641SPeter Zijlstra 
2307da4d641SPeter Zijlstra 	return pages;
2317da4d641SPeter Zijlstra }
2327da4d641SPeter Zijlstra 
2337da4d641SPeter Zijlstra unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
2347da4d641SPeter Zijlstra 		       unsigned long end, pgprot_t newprot,
2354b10e7d5SMel Gorman 		       int dirty_accountable, int prot_numa)
2367da4d641SPeter Zijlstra {
2377da4d641SPeter Zijlstra 	struct mm_struct *mm = vma->vm_mm;
2387da4d641SPeter Zijlstra 	unsigned long pages;
2397da4d641SPeter Zijlstra 
2407da4d641SPeter Zijlstra 	mmu_notifier_invalidate_range_start(mm, start, end);
2417da4d641SPeter Zijlstra 	if (is_vm_hugetlb_page(vma))
2427da4d641SPeter Zijlstra 		pages = hugetlb_change_protection(vma, start, end, newprot);
2437da4d641SPeter Zijlstra 	else
2444b10e7d5SMel Gorman 		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
2457da4d641SPeter Zijlstra 	mmu_notifier_invalidate_range_end(mm, start, end);
2467da4d641SPeter Zijlstra 
2477da4d641SPeter Zijlstra 	return pages;
2481da177e4SLinus Torvalds }
2491da177e4SLinus Torvalds 
250b6a2fea3SOllie Wild int
2511da177e4SLinus Torvalds mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
2521da177e4SLinus Torvalds 	unsigned long start, unsigned long end, unsigned long newflags)
2531da177e4SLinus Torvalds {
2541da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
2551da177e4SLinus Torvalds 	unsigned long oldflags = vma->vm_flags;
2561da177e4SLinus Torvalds 	long nrpages = (end - start) >> PAGE_SHIFT;
2571da177e4SLinus Torvalds 	unsigned long charged = 0;
2581da177e4SLinus Torvalds 	pgoff_t pgoff;
2591da177e4SLinus Torvalds 	int error;
260c1e6098bSPeter Zijlstra 	int dirty_accountable = 0;
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	if (newflags == oldflags) {
2631da177e4SLinus Torvalds 		*pprev = vma;
2641da177e4SLinus Torvalds 		return 0;
2651da177e4SLinus Torvalds 	}
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds 	/*
2681da177e4SLinus Torvalds 	 * If we make a private mapping writable we increase our commit;
2691da177e4SLinus Torvalds 	 * but (without finer accounting) cannot reduce our commit if we
2705a6fe125SMel Gorman 	 * make it unwritable again. hugetlb mapping were accounted for
2715a6fe125SMel Gorman 	 * even if read-only so there is no need to account for them here
2721da177e4SLinus Torvalds 	 */
2731da177e4SLinus Torvalds 	if (newflags & VM_WRITE) {
2745a6fe125SMel Gorman 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
275cdfd4325SAndy Whitcroft 						VM_SHARED|VM_NORESERVE))) {
2761da177e4SLinus Torvalds 			charged = nrpages;
277191c5424SAl Viro 			if (security_vm_enough_memory_mm(mm, charged))
2781da177e4SLinus Torvalds 				return -ENOMEM;
2791da177e4SLinus Torvalds 			newflags |= VM_ACCOUNT;
2801da177e4SLinus Torvalds 		}
2811da177e4SLinus Torvalds 	}
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	/*
2841da177e4SLinus Torvalds 	 * First try to merge with previous and/or next vma.
2851da177e4SLinus Torvalds 	 */
2861da177e4SLinus Torvalds 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
2871da177e4SLinus Torvalds 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
2881da177e4SLinus Torvalds 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2891da177e4SLinus Torvalds 	if (*pprev) {
2901da177e4SLinus Torvalds 		vma = *pprev;
2911da177e4SLinus Torvalds 		goto success;
2921da177e4SLinus Torvalds 	}
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds 	*pprev = vma;
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds 	if (start != vma->vm_start) {
2971da177e4SLinus Torvalds 		error = split_vma(mm, vma, start, 1);
2981da177e4SLinus Torvalds 		if (error)
2991da177e4SLinus Torvalds 			goto fail;
3001da177e4SLinus Torvalds 	}
3011da177e4SLinus Torvalds 
3021da177e4SLinus Torvalds 	if (end != vma->vm_end) {
3031da177e4SLinus Torvalds 		error = split_vma(mm, vma, end, 0);
3041da177e4SLinus Torvalds 		if (error)
3051da177e4SLinus Torvalds 			goto fail;
3061da177e4SLinus Torvalds 	}
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds success:
3091da177e4SLinus Torvalds 	/*
3101da177e4SLinus Torvalds 	 * vm_flags and vm_page_prot are protected by the mmap_sem
3111da177e4SLinus Torvalds 	 * held in write mode.
3121da177e4SLinus Torvalds 	 */
3131da177e4SLinus Torvalds 	vma->vm_flags = newflags;
3141c12c4cfSVenki Pallipadi 	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
3151c12c4cfSVenki Pallipadi 					  vm_get_page_prot(newflags));
3161c12c4cfSVenki Pallipadi 
317c1e6098bSPeter Zijlstra 	if (vma_wants_writenotify(vma)) {
3181ddd439eSHugh Dickins 		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
319c1e6098bSPeter Zijlstra 		dirty_accountable = 1;
320c1e6098bSPeter Zijlstra 	}
321d08b3851SPeter Zijlstra 
3227d12efaeSAndrew Morton 	change_protection(vma, start, end, vma->vm_page_prot,
3237d12efaeSAndrew Morton 			  dirty_accountable, 0);
3247da4d641SPeter Zijlstra 
325ab50b8edSHugh Dickins 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
326ab50b8edSHugh Dickins 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
32763bfd738SPekka Enberg 	perf_event_mmap(vma);
3281da177e4SLinus Torvalds 	return 0;
3291da177e4SLinus Torvalds 
3301da177e4SLinus Torvalds fail:
3311da177e4SLinus Torvalds 	vm_unacct_memory(charged);
3321da177e4SLinus Torvalds 	return error;
3331da177e4SLinus Torvalds }
3341da177e4SLinus Torvalds 
3356a6160a7SHeiko Carstens SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
3366a6160a7SHeiko Carstens 		unsigned long, prot)
3371da177e4SLinus Torvalds {
3381da177e4SLinus Torvalds 	unsigned long vm_flags, nstart, end, tmp, reqprot;
3391da177e4SLinus Torvalds 	struct vm_area_struct *vma, *prev;
3401da177e4SLinus Torvalds 	int error = -EINVAL;
3411da177e4SLinus Torvalds 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
3421da177e4SLinus Torvalds 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
3431da177e4SLinus Torvalds 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
3441da177e4SLinus Torvalds 		return -EINVAL;
3451da177e4SLinus Torvalds 
3461da177e4SLinus Torvalds 	if (start & ~PAGE_MASK)
3471da177e4SLinus Torvalds 		return -EINVAL;
3481da177e4SLinus Torvalds 	if (!len)
3491da177e4SLinus Torvalds 		return 0;
3501da177e4SLinus Torvalds 	len = PAGE_ALIGN(len);
3511da177e4SLinus Torvalds 	end = start + len;
3521da177e4SLinus Torvalds 	if (end <= start)
3531da177e4SLinus Torvalds 		return -ENOMEM;
354b845f313SDave Kleikamp 	if (!arch_validate_prot(prot))
3551da177e4SLinus Torvalds 		return -EINVAL;
3561da177e4SLinus Torvalds 
3571da177e4SLinus Torvalds 	reqprot = prot;
3581da177e4SLinus Torvalds 	/*
3591da177e4SLinus Torvalds 	 * Does the application expect PROT_READ to imply PROT_EXEC:
3601da177e4SLinus Torvalds 	 */
361b344e05cSHua Zhong 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
3621da177e4SLinus Torvalds 		prot |= PROT_EXEC;
3631da177e4SLinus Torvalds 
3641da177e4SLinus Torvalds 	vm_flags = calc_vm_prot_bits(prot);
3651da177e4SLinus Torvalds 
3661da177e4SLinus Torvalds 	down_write(&current->mm->mmap_sem);
3671da177e4SLinus Torvalds 
368097d5910SLinus Torvalds 	vma = find_vma(current->mm, start);
3691da177e4SLinus Torvalds 	error = -ENOMEM;
3701da177e4SLinus Torvalds 	if (!vma)
3711da177e4SLinus Torvalds 		goto out;
372097d5910SLinus Torvalds 	prev = vma->vm_prev;
3731da177e4SLinus Torvalds 	if (unlikely(grows & PROT_GROWSDOWN)) {
3741da177e4SLinus Torvalds 		if (vma->vm_start >= end)
3751da177e4SLinus Torvalds 			goto out;
3761da177e4SLinus Torvalds 		start = vma->vm_start;
3771da177e4SLinus Torvalds 		error = -EINVAL;
3781da177e4SLinus Torvalds 		if (!(vma->vm_flags & VM_GROWSDOWN))
3791da177e4SLinus Torvalds 			goto out;
3807d12efaeSAndrew Morton 	} else {
3811da177e4SLinus Torvalds 		if (vma->vm_start > start)
3821da177e4SLinus Torvalds 			goto out;
3831da177e4SLinus Torvalds 		if (unlikely(grows & PROT_GROWSUP)) {
3841da177e4SLinus Torvalds 			end = vma->vm_end;
3851da177e4SLinus Torvalds 			error = -EINVAL;
3861da177e4SLinus Torvalds 			if (!(vma->vm_flags & VM_GROWSUP))
3871da177e4SLinus Torvalds 				goto out;
3881da177e4SLinus Torvalds 		}
3891da177e4SLinus Torvalds 	}
3901da177e4SLinus Torvalds 	if (start > vma->vm_start)
3911da177e4SLinus Torvalds 		prev = vma;
3921da177e4SLinus Torvalds 
3931da177e4SLinus Torvalds 	for (nstart = start ; ; ) {
3941da177e4SLinus Torvalds 		unsigned long newflags;
3951da177e4SLinus Torvalds 
3961da177e4SLinus Torvalds 		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
3971da177e4SLinus Torvalds 
3987d12efaeSAndrew Morton 		newflags = vm_flags;
3997d12efaeSAndrew Morton 		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
4001da177e4SLinus Torvalds 
4017e2cff42SPaolo 'Blaisorblade' Giarrusso 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
4027e2cff42SPaolo 'Blaisorblade' Giarrusso 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
4031da177e4SLinus Torvalds 			error = -EACCES;
4041da177e4SLinus Torvalds 			goto out;
4051da177e4SLinus Torvalds 		}
4061da177e4SLinus Torvalds 
4071da177e4SLinus Torvalds 		error = security_file_mprotect(vma, reqprot, prot);
4081da177e4SLinus Torvalds 		if (error)
4091da177e4SLinus Torvalds 			goto out;
4101da177e4SLinus Torvalds 
4111da177e4SLinus Torvalds 		tmp = vma->vm_end;
4121da177e4SLinus Torvalds 		if (tmp > end)
4131da177e4SLinus Torvalds 			tmp = end;
4141da177e4SLinus Torvalds 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
4151da177e4SLinus Torvalds 		if (error)
4161da177e4SLinus Torvalds 			goto out;
4171da177e4SLinus Torvalds 		nstart = tmp;
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 		if (nstart < prev->vm_end)
4201da177e4SLinus Torvalds 			nstart = prev->vm_end;
4211da177e4SLinus Torvalds 		if (nstart >= end)
4221da177e4SLinus Torvalds 			goto out;
4231da177e4SLinus Torvalds 
4241da177e4SLinus Torvalds 		vma = prev->vm_next;
4251da177e4SLinus Torvalds 		if (!vma || vma->vm_start != nstart) {
4261da177e4SLinus Torvalds 			error = -ENOMEM;
4271da177e4SLinus Torvalds 			goto out;
4281da177e4SLinus Torvalds 		}
4291da177e4SLinus Torvalds 	}
4301da177e4SLinus Torvalds out:
4311da177e4SLinus Torvalds 	up_write(&current->mm->mmap_sem);
4321da177e4SLinus Torvalds 	return error;
4331da177e4SLinus Torvalds }
434