11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/mprotect.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds 51da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/hugetlb.h> 131da177e4SLinus Torvalds #include <linux/shm.h> 141da177e4SLinus Torvalds #include <linux/mman.h> 151da177e4SLinus Torvalds #include <linux/fs.h> 161da177e4SLinus Torvalds #include <linux/highmem.h> 171da177e4SLinus Torvalds #include <linux/security.h> 181da177e4SLinus Torvalds #include <linux/mempolicy.h> 191da177e4SLinus Torvalds #include <linux/personality.h> 201da177e4SLinus Torvalds #include <linux/syscalls.h> 210697212aSChristoph Lameter #include <linux/swap.h> 220697212aSChristoph Lameter #include <linux/swapops.h> 23cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 2464cdd548SKOSAKI Motohiro #include <linux/migrate.h> 25cdd6c482SIngo Molnar #include <linux/perf_event.h> 2664a9a34eSMel Gorman #include <linux/ksm.h> 271da177e4SLinus Torvalds #include <asm/uaccess.h> 281da177e4SLinus Torvalds #include <asm/pgtable.h> 291da177e4SLinus Torvalds #include <asm/cacheflush.h> 301da177e4SLinus Torvalds #include <asm/tlbflush.h> 311da177e4SLinus Torvalds 321ad9f620SMel Gorman /* 331ad9f620SMel Gorman * For a prot_numa update we only hold mmap_sem for read so there is a 341ad9f620SMel Gorman * potential race with faulting where a pmd was temporarily none. This 351ad9f620SMel Gorman * function checks for a transhuge pmd under the appropriate lock. It 361ad9f620SMel Gorman * returns a pte if it was successfully locked or NULL if it raced with 371ad9f620SMel Gorman * a transhuge insertion. 381ad9f620SMel Gorman */ 391ad9f620SMel Gorman static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, 401ad9f620SMel Gorman unsigned long addr, int prot_numa, spinlock_t **ptl) 411ad9f620SMel Gorman { 421ad9f620SMel Gorman pte_t *pte; 431ad9f620SMel Gorman spinlock_t *pmdl; 441ad9f620SMel Gorman 451ad9f620SMel Gorman /* !prot_numa is protected by mmap_sem held for write */ 461ad9f620SMel Gorman if (!prot_numa) 471ad9f620SMel Gorman return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); 481ad9f620SMel Gorman 491ad9f620SMel Gorman pmdl = pmd_lock(vma->vm_mm, pmd); 501ad9f620SMel Gorman if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { 511ad9f620SMel Gorman spin_unlock(pmdl); 521ad9f620SMel Gorman return NULL; 531ad9f620SMel Gorman } 541ad9f620SMel Gorman 551ad9f620SMel Gorman pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); 561ad9f620SMel Gorman spin_unlock(pmdl); 571ad9f620SMel Gorman return pte; 581ad9f620SMel Gorman } 591ad9f620SMel Gorman 604b10e7d5SMel Gorman static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 61c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 620f19c179SMel Gorman int dirty_accountable, int prot_numa) 631da177e4SLinus Torvalds { 644b10e7d5SMel Gorman struct mm_struct *mm = vma->vm_mm; 650697212aSChristoph Lameter pte_t *pte, oldpte; 66705e87c0SHugh Dickins spinlock_t *ptl; 677da4d641SPeter Zijlstra unsigned long pages = 0; 681da177e4SLinus Torvalds 691ad9f620SMel Gorman pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); 701ad9f620SMel Gorman if (!pte) 711ad9f620SMel Gorman return 0; 721ad9f620SMel Gorman 736606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 741da177e4SLinus Torvalds do { 750697212aSChristoph Lameter oldpte = *pte; 760697212aSChristoph Lameter if (pte_present(oldpte)) { 771da177e4SLinus Torvalds pte_t ptent; 781da177e4SLinus Torvalds 790c5f83c2SMel Gorman ptent = ptep_modify_prot_start(mm, addr, pte); 80c1e6098bSPeter Zijlstra ptent = pte_modify(ptent, newprot); 81*8a0516edSMel Gorman 82*8a0516edSMel Gorman /* Avoid taking write faults for known dirty pages */ 8364e45507SPeter Feiner if (dirty_accountable && pte_dirty(ptent) && 8464e45507SPeter Feiner (pte_soft_dirty(ptent) || 85*8a0516edSMel Gorman !(vma->vm_flags & VM_SOFTDIRTY))) { 869d85d586SAneesh Kumar K.V ptent = pte_mkwrite(ptent); 87*8a0516edSMel Gorman } 889d85d586SAneesh Kumar K.V ptep_modify_prot_commit(mm, addr, pte, ptent); 894b10e7d5SMel Gorman pages++; 900661a336SKirill A. Shutemov } else if (IS_ENABLED(CONFIG_MIGRATION)) { 910697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte); 920697212aSChristoph Lameter 930697212aSChristoph Lameter if (is_write_migration_entry(entry)) { 94c3d16e16SCyrill Gorcunov pte_t newpte; 950697212aSChristoph Lameter /* 960697212aSChristoph Lameter * A protection check is difficult so 970697212aSChristoph Lameter * just be safe and disable write 980697212aSChristoph Lameter */ 990697212aSChristoph Lameter make_migration_entry_read(&entry); 100c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry); 101c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte)) 102c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte); 103c3d16e16SCyrill Gorcunov set_pte_at(mm, addr, pte, newpte); 104e920e14cSMel Gorman 1057da4d641SPeter Zijlstra pages++; 1060697212aSChristoph Lameter } 107e920e14cSMel Gorman } 1081da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1096606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 110705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 1117da4d641SPeter Zijlstra 1127da4d641SPeter Zijlstra return pages; 1131da177e4SLinus Torvalds } 1141da177e4SLinus Torvalds 1157d12efaeSAndrew Morton static inline unsigned long change_pmd_range(struct vm_area_struct *vma, 1167d12efaeSAndrew Morton pud_t *pud, unsigned long addr, unsigned long end, 1177d12efaeSAndrew Morton pgprot_t newprot, int dirty_accountable, int prot_numa) 1181da177e4SLinus Torvalds { 1191da177e4SLinus Torvalds pmd_t *pmd; 120a5338093SRik van Riel struct mm_struct *mm = vma->vm_mm; 1211da177e4SLinus Torvalds unsigned long next; 1227da4d641SPeter Zijlstra unsigned long pages = 0; 12372403b4aSMel Gorman unsigned long nr_huge_updates = 0; 124a5338093SRik van Riel unsigned long mni_start = 0; 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 1271da177e4SLinus Torvalds do { 12825cbbef1SMel Gorman unsigned long this_pages; 12925cbbef1SMel Gorman 1301da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 13188a9ab6eSRik van Riel if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd)) 13288a9ab6eSRik van Riel continue; 133a5338093SRik van Riel 134a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */ 135a5338093SRik van Riel if (!mni_start) { 136a5338093SRik van Riel mni_start = addr; 137a5338093SRik van Riel mmu_notifier_invalidate_range_start(mm, mni_start, end); 138a5338093SRik van Riel } 139a5338093SRik van Riel 140cd7548abSJohannes Weiner if (pmd_trans_huge(*pmd)) { 141cd7548abSJohannes Weiner if (next - addr != HPAGE_PMD_SIZE) 142e180377fSKirill A. Shutemov split_huge_page_pmd(vma, addr, pmd); 143f123d74aSMel Gorman else { 144f123d74aSMel Gorman int nr_ptes = change_huge_pmd(vma, pmd, addr, 145f123d74aSMel Gorman newprot, prot_numa); 146f123d74aSMel Gorman 147f123d74aSMel Gorman if (nr_ptes) { 14872403b4aSMel Gorman if (nr_ptes == HPAGE_PMD_NR) { 14972403b4aSMel Gorman pages += HPAGE_PMD_NR; 15072403b4aSMel Gorman nr_huge_updates++; 15172403b4aSMel Gorman } 1521ad9f620SMel Gorman 1531ad9f620SMel Gorman /* huge pmd was handled */ 154cd7548abSJohannes Weiner continue; 1557da4d641SPeter Zijlstra } 156f123d74aSMel Gorman } 15788a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */ 158cd7548abSJohannes Weiner } 15925cbbef1SMel Gorman this_pages = change_pte_range(vma, pmd, addr, next, newprot, 1600f19c179SMel Gorman dirty_accountable, prot_numa); 16125cbbef1SMel Gorman pages += this_pages; 1621da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1637da4d641SPeter Zijlstra 164a5338093SRik van Riel if (mni_start) 165a5338093SRik van Riel mmu_notifier_invalidate_range_end(mm, mni_start, end); 166a5338093SRik van Riel 16772403b4aSMel Gorman if (nr_huge_updates) 16872403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); 1697da4d641SPeter Zijlstra return pages; 1701da177e4SLinus Torvalds } 1711da177e4SLinus Torvalds 1727d12efaeSAndrew Morton static inline unsigned long change_pud_range(struct vm_area_struct *vma, 1737d12efaeSAndrew Morton pgd_t *pgd, unsigned long addr, unsigned long end, 1747d12efaeSAndrew Morton pgprot_t newprot, int dirty_accountable, int prot_numa) 1751da177e4SLinus Torvalds { 1761da177e4SLinus Torvalds pud_t *pud; 1771da177e4SLinus Torvalds unsigned long next; 1787da4d641SPeter Zijlstra unsigned long pages = 0; 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 1811da177e4SLinus Torvalds do { 1821da177e4SLinus Torvalds next = pud_addr_end(addr, end); 1831da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1841da177e4SLinus Torvalds continue; 1857da4d641SPeter Zijlstra pages += change_pmd_range(vma, pud, addr, next, newprot, 1864b10e7d5SMel Gorman dirty_accountable, prot_numa); 1871da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1887da4d641SPeter Zijlstra 1897da4d641SPeter Zijlstra return pages; 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 1927da4d641SPeter Zijlstra static unsigned long change_protection_range(struct vm_area_struct *vma, 193c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 1944b10e7d5SMel Gorman int dirty_accountable, int prot_numa) 1951da177e4SLinus Torvalds { 1961da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1971da177e4SLinus Torvalds pgd_t *pgd; 1981da177e4SLinus Torvalds unsigned long next; 1991da177e4SLinus Torvalds unsigned long start = addr; 2007da4d641SPeter Zijlstra unsigned long pages = 0; 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds BUG_ON(addr >= end); 2031da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 2041da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 20520841405SRik van Riel set_tlb_flush_pending(mm); 2061da177e4SLinus Torvalds do { 2071da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 2081da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 2091da177e4SLinus Torvalds continue; 2107da4d641SPeter Zijlstra pages += change_pud_range(vma, pgd, addr, next, newprot, 2114b10e7d5SMel Gorman dirty_accountable, prot_numa); 2121da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 2137da4d641SPeter Zijlstra 2141233d588SIngo Molnar /* Only flush the TLB if we actually modified any entries: */ 2151233d588SIngo Molnar if (pages) 2161da177e4SLinus Torvalds flush_tlb_range(vma, start, end); 21720841405SRik van Riel clear_tlb_flush_pending(mm); 2187da4d641SPeter Zijlstra 2197da4d641SPeter Zijlstra return pages; 2207da4d641SPeter Zijlstra } 2217da4d641SPeter Zijlstra 2227da4d641SPeter Zijlstra unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 2237da4d641SPeter Zijlstra unsigned long end, pgprot_t newprot, 2244b10e7d5SMel Gorman int dirty_accountable, int prot_numa) 2257da4d641SPeter Zijlstra { 2267da4d641SPeter Zijlstra unsigned long pages; 2277da4d641SPeter Zijlstra 2287da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma)) 2297da4d641SPeter Zijlstra pages = hugetlb_change_protection(vma, start, end, newprot); 2307da4d641SPeter Zijlstra else 2314b10e7d5SMel Gorman pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); 2327da4d641SPeter Zijlstra 2337da4d641SPeter Zijlstra return pages; 2341da177e4SLinus Torvalds } 2351da177e4SLinus Torvalds 236b6a2fea3SOllie Wild int 2371da177e4SLinus Torvalds mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 2381da177e4SLinus Torvalds unsigned long start, unsigned long end, unsigned long newflags) 2391da177e4SLinus Torvalds { 2401da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2411da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags; 2421da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT; 2431da177e4SLinus Torvalds unsigned long charged = 0; 2441da177e4SLinus Torvalds pgoff_t pgoff; 2451da177e4SLinus Torvalds int error; 246c1e6098bSPeter Zijlstra int dirty_accountable = 0; 2471da177e4SLinus Torvalds 2481da177e4SLinus Torvalds if (newflags == oldflags) { 2491da177e4SLinus Torvalds *pprev = vma; 2501da177e4SLinus Torvalds return 0; 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds /* 2541da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit; 2551da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we 2565a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for 2575a6fe125SMel Gorman * even if read-only so there is no need to account for them here 2581da177e4SLinus Torvalds */ 2591da177e4SLinus Torvalds if (newflags & VM_WRITE) { 2605a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 261cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) { 2621da177e4SLinus Torvalds charged = nrpages; 263191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 2641da177e4SLinus Torvalds return -ENOMEM; 2651da177e4SLinus Torvalds newflags |= VM_ACCOUNT; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds } 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds /* 2701da177e4SLinus Torvalds * First try to merge with previous and/or next vma. 2711da177e4SLinus Torvalds */ 2721da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 2731da177e4SLinus Torvalds *pprev = vma_merge(mm, *pprev, start, end, newflags, 2741da177e4SLinus Torvalds vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); 2751da177e4SLinus Torvalds if (*pprev) { 2761da177e4SLinus Torvalds vma = *pprev; 2771da177e4SLinus Torvalds goto success; 2781da177e4SLinus Torvalds } 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds *pprev = vma; 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds if (start != vma->vm_start) { 2831da177e4SLinus Torvalds error = split_vma(mm, vma, start, 1); 2841da177e4SLinus Torvalds if (error) 2851da177e4SLinus Torvalds goto fail; 2861da177e4SLinus Torvalds } 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds if (end != vma->vm_end) { 2891da177e4SLinus Torvalds error = split_vma(mm, vma, end, 0); 2901da177e4SLinus Torvalds if (error) 2911da177e4SLinus Torvalds goto fail; 2921da177e4SLinus Torvalds } 2931da177e4SLinus Torvalds 2941da177e4SLinus Torvalds success: 2951da177e4SLinus Torvalds /* 2961da177e4SLinus Torvalds * vm_flags and vm_page_prot are protected by the mmap_sem 2971da177e4SLinus Torvalds * held in write mode. 2981da177e4SLinus Torvalds */ 2991da177e4SLinus Torvalds vma->vm_flags = newflags; 30064e45507SPeter Feiner dirty_accountable = vma_wants_writenotify(vma); 30164e45507SPeter Feiner vma_set_page_prot(vma); 302d08b3851SPeter Zijlstra 3037d12efaeSAndrew Morton change_protection(vma, start, end, vma->vm_page_prot, 3047d12efaeSAndrew Morton dirty_accountable, 0); 3057da4d641SPeter Zijlstra 306ab50b8edSHugh Dickins vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 307ab50b8edSHugh Dickins vm_stat_account(mm, newflags, vma->vm_file, nrpages); 30863bfd738SPekka Enberg perf_event_mmap(vma); 3091da177e4SLinus Torvalds return 0; 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds fail: 3121da177e4SLinus Torvalds vm_unacct_memory(charged); 3131da177e4SLinus Torvalds return error; 3141da177e4SLinus Torvalds } 3151da177e4SLinus Torvalds 3166a6160a7SHeiko Carstens SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, 3176a6160a7SHeiko Carstens unsigned long, prot) 3181da177e4SLinus Torvalds { 3191da177e4SLinus Torvalds unsigned long vm_flags, nstart, end, tmp, reqprot; 3201da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 3211da177e4SLinus Torvalds int error = -EINVAL; 3221da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 3231da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 3241da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 3251da177e4SLinus Torvalds return -EINVAL; 3261da177e4SLinus Torvalds 3271da177e4SLinus Torvalds if (start & ~PAGE_MASK) 3281da177e4SLinus Torvalds return -EINVAL; 3291da177e4SLinus Torvalds if (!len) 3301da177e4SLinus Torvalds return 0; 3311da177e4SLinus Torvalds len = PAGE_ALIGN(len); 3321da177e4SLinus Torvalds end = start + len; 3331da177e4SLinus Torvalds if (end <= start) 3341da177e4SLinus Torvalds return -ENOMEM; 335b845f313SDave Kleikamp if (!arch_validate_prot(prot)) 3361da177e4SLinus Torvalds return -EINVAL; 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds reqprot = prot; 3391da177e4SLinus Torvalds /* 3401da177e4SLinus Torvalds * Does the application expect PROT_READ to imply PROT_EXEC: 3411da177e4SLinus Torvalds */ 342b344e05cSHua Zhong if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 3431da177e4SLinus Torvalds prot |= PROT_EXEC; 3441da177e4SLinus Torvalds 3451da177e4SLinus Torvalds vm_flags = calc_vm_prot_bits(prot); 3461da177e4SLinus Torvalds 3471da177e4SLinus Torvalds down_write(¤t->mm->mmap_sem); 3481da177e4SLinus Torvalds 349097d5910SLinus Torvalds vma = find_vma(current->mm, start); 3501da177e4SLinus Torvalds error = -ENOMEM; 3511da177e4SLinus Torvalds if (!vma) 3521da177e4SLinus Torvalds goto out; 353097d5910SLinus Torvalds prev = vma->vm_prev; 3541da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) { 3551da177e4SLinus Torvalds if (vma->vm_start >= end) 3561da177e4SLinus Torvalds goto out; 3571da177e4SLinus Torvalds start = vma->vm_start; 3581da177e4SLinus Torvalds error = -EINVAL; 3591da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 3601da177e4SLinus Torvalds goto out; 3617d12efaeSAndrew Morton } else { 3621da177e4SLinus Torvalds if (vma->vm_start > start) 3631da177e4SLinus Torvalds goto out; 3641da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) { 3651da177e4SLinus Torvalds end = vma->vm_end; 3661da177e4SLinus Torvalds error = -EINVAL; 3671da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 3681da177e4SLinus Torvalds goto out; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds } 3711da177e4SLinus Torvalds if (start > vma->vm_start) 3721da177e4SLinus Torvalds prev = vma; 3731da177e4SLinus Torvalds 3741da177e4SLinus Torvalds for (nstart = start ; ; ) { 3751da177e4SLinus Torvalds unsigned long newflags; 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 3781da177e4SLinus Torvalds 3797d12efaeSAndrew Morton newflags = vm_flags; 3807d12efaeSAndrew Morton newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 3811da177e4SLinus Torvalds 3827e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */ 3837e2cff42SPaolo 'Blaisorblade' Giarrusso if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { 3841da177e4SLinus Torvalds error = -EACCES; 3851da177e4SLinus Torvalds goto out; 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot); 3891da177e4SLinus Torvalds if (error) 3901da177e4SLinus Torvalds goto out; 3911da177e4SLinus Torvalds 3921da177e4SLinus Torvalds tmp = vma->vm_end; 3931da177e4SLinus Torvalds if (tmp > end) 3941da177e4SLinus Torvalds tmp = end; 3951da177e4SLinus Torvalds error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 3961da177e4SLinus Torvalds if (error) 3971da177e4SLinus Torvalds goto out; 3981da177e4SLinus Torvalds nstart = tmp; 3991da177e4SLinus Torvalds 4001da177e4SLinus Torvalds if (nstart < prev->vm_end) 4011da177e4SLinus Torvalds nstart = prev->vm_end; 4021da177e4SLinus Torvalds if (nstart >= end) 4031da177e4SLinus Torvalds goto out; 4041da177e4SLinus Torvalds 4051da177e4SLinus Torvalds vma = prev->vm_next; 4061da177e4SLinus Torvalds if (!vma || vma->vm_start != nstart) { 4071da177e4SLinus Torvalds error = -ENOMEM; 4081da177e4SLinus Torvalds goto out; 4091da177e4SLinus Torvalds } 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds out: 4121da177e4SLinus Torvalds up_write(¤t->mm->mmap_sem); 4131da177e4SLinus Torvalds return error; 4141da177e4SLinus Torvalds } 415