11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/mprotect.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds 51da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/hugetlb.h> 131da177e4SLinus Torvalds #include <linux/shm.h> 141da177e4SLinus Torvalds #include <linux/mman.h> 151da177e4SLinus Torvalds #include <linux/fs.h> 161da177e4SLinus Torvalds #include <linux/highmem.h> 171da177e4SLinus Torvalds #include <linux/security.h> 181da177e4SLinus Torvalds #include <linux/mempolicy.h> 191da177e4SLinus Torvalds #include <linux/personality.h> 201da177e4SLinus Torvalds #include <linux/syscalls.h> 210697212aSChristoph Lameter #include <linux/swap.h> 220697212aSChristoph Lameter #include <linux/swapops.h> 23cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 2464cdd548SKOSAKI Motohiro #include <linux/migrate.h> 25cdd6c482SIngo Molnar #include <linux/perf_event.h> 26e8c24d3aSDave Hansen #include <linux/pkeys.h> 2764a9a34eSMel Gorman #include <linux/ksm.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 291da177e4SLinus Torvalds #include <asm/pgtable.h> 301da177e4SLinus Torvalds #include <asm/cacheflush.h> 31e8c24d3aSDave Hansen #include <asm/mmu_context.h> 321da177e4SLinus Torvalds #include <asm/tlbflush.h> 331da177e4SLinus Torvalds 3436f88188SKirill A. Shutemov #include "internal.h" 3536f88188SKirill A. Shutemov 364b10e7d5SMel Gorman static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 37c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 380f19c179SMel Gorman int dirty_accountable, int prot_numa) 391da177e4SLinus Torvalds { 404b10e7d5SMel Gorman struct mm_struct *mm = vma->vm_mm; 410697212aSChristoph Lameter pte_t *pte, oldpte; 42705e87c0SHugh Dickins spinlock_t *ptl; 437da4d641SPeter Zijlstra unsigned long pages = 0; 443e321587SAndi Kleen int target_node = NUMA_NO_NODE; 451da177e4SLinus Torvalds 46175ad4f1SAndrea Arcangeli /* 47175ad4f1SAndrea Arcangeli * Can be called with only the mmap_sem for reading by 48175ad4f1SAndrea Arcangeli * prot_numa so we must check the pmd isn't constantly 49175ad4f1SAndrea Arcangeli * changing from under us from pmd_none to pmd_trans_huge 50175ad4f1SAndrea Arcangeli * and/or the other way around. 51175ad4f1SAndrea Arcangeli */ 52175ad4f1SAndrea Arcangeli if (pmd_trans_unstable(pmd)) 53175ad4f1SAndrea Arcangeli return 0; 54175ad4f1SAndrea Arcangeli 55175ad4f1SAndrea Arcangeli /* 56175ad4f1SAndrea Arcangeli * The pmd points to a regular pte so the pmd can't change 57175ad4f1SAndrea Arcangeli * from under us even if the mmap_sem is only hold for 58175ad4f1SAndrea Arcangeli * reading. 59175ad4f1SAndrea Arcangeli */ 60175ad4f1SAndrea Arcangeli pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 611ad9f620SMel Gorman 623e321587SAndi Kleen /* Get target node for single threaded private VMAs */ 633e321587SAndi Kleen if (prot_numa && !(vma->vm_flags & VM_SHARED) && 643e321587SAndi Kleen atomic_read(&vma->vm_mm->mm_users) == 1) 653e321587SAndi Kleen target_node = numa_node_id(); 663e321587SAndi Kleen 673ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 686606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 691da177e4SLinus Torvalds do { 700697212aSChristoph Lameter oldpte = *pte; 710697212aSChristoph Lameter if (pte_present(oldpte)) { 721da177e4SLinus Torvalds pte_t ptent; 73b191f9b1SMel Gorman bool preserve_write = prot_numa && pte_write(oldpte); 741da177e4SLinus Torvalds 75e944fd67SMel Gorman /* 76e944fd67SMel Gorman * Avoid trapping faults against the zero or KSM 77e944fd67SMel Gorman * pages. See similar comment in change_huge_pmd. 78e944fd67SMel Gorman */ 79e944fd67SMel Gorman if (prot_numa) { 80e944fd67SMel Gorman struct page *page; 81e944fd67SMel Gorman 82e944fd67SMel Gorman page = vm_normal_page(vma, addr, oldpte); 83e944fd67SMel Gorman if (!page || PageKsm(page)) 84e944fd67SMel Gorman continue; 8510c1045fSMel Gorman 8610c1045fSMel Gorman /* Avoid TLB flush if possible */ 8710c1045fSMel Gorman if (pte_protnone(oldpte)) 8810c1045fSMel Gorman continue; 893e321587SAndi Kleen 903e321587SAndi Kleen /* 913e321587SAndi Kleen * Don't mess with PTEs if page is already on the node 923e321587SAndi Kleen * a single-threaded process is running on. 933e321587SAndi Kleen */ 943e321587SAndi Kleen if (target_node == page_to_nid(page)) 953e321587SAndi Kleen continue; 96e944fd67SMel Gorman } 97e944fd67SMel Gorman 980c5f83c2SMel Gorman ptent = ptep_modify_prot_start(mm, addr, pte); 99c1e6098bSPeter Zijlstra ptent = pte_modify(ptent, newprot); 100b191f9b1SMel Gorman if (preserve_write) 101288bc549SAneesh Kumar K.V ptent = pte_mk_savedwrite(ptent); 1028a0516edSMel Gorman 1038a0516edSMel Gorman /* Avoid taking write faults for known dirty pages */ 10464e45507SPeter Feiner if (dirty_accountable && pte_dirty(ptent) && 10564e45507SPeter Feiner (pte_soft_dirty(ptent) || 1068a0516edSMel Gorman !(vma->vm_flags & VM_SOFTDIRTY))) { 1079d85d586SAneesh Kumar K.V ptent = pte_mkwrite(ptent); 1088a0516edSMel Gorman } 1099d85d586SAneesh Kumar K.V ptep_modify_prot_commit(mm, addr, pte, ptent); 1104b10e7d5SMel Gorman pages++; 1110661a336SKirill A. Shutemov } else if (IS_ENABLED(CONFIG_MIGRATION)) { 1120697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte); 1130697212aSChristoph Lameter 1140697212aSChristoph Lameter if (is_write_migration_entry(entry)) { 115c3d16e16SCyrill Gorcunov pte_t newpte; 1160697212aSChristoph Lameter /* 1170697212aSChristoph Lameter * A protection check is difficult so 1180697212aSChristoph Lameter * just be safe and disable write 1190697212aSChristoph Lameter */ 1200697212aSChristoph Lameter make_migration_entry_read(&entry); 121c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry); 122c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte)) 123c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte); 124c3d16e16SCyrill Gorcunov set_pte_at(mm, addr, pte, newpte); 125e920e14cSMel Gorman 1267da4d641SPeter Zijlstra pages++; 1270697212aSChristoph Lameter } 128*5042db43SJérôme Glisse 129*5042db43SJérôme Glisse if (is_write_device_private_entry(entry)) { 130*5042db43SJérôme Glisse pte_t newpte; 131*5042db43SJérôme Glisse 132*5042db43SJérôme Glisse /* 133*5042db43SJérôme Glisse * We do not preserve soft-dirtiness. See 134*5042db43SJérôme Glisse * copy_one_pte() for explanation. 135*5042db43SJérôme Glisse */ 136*5042db43SJérôme Glisse make_device_private_entry_read(&entry); 137*5042db43SJérôme Glisse newpte = swp_entry_to_pte(entry); 138*5042db43SJérôme Glisse set_pte_at(mm, addr, pte, newpte); 139*5042db43SJérôme Glisse 140*5042db43SJérôme Glisse pages++; 141*5042db43SJérôme Glisse } 142e920e14cSMel Gorman } 1431da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1446606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 145705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 1467da4d641SPeter Zijlstra 1477da4d641SPeter Zijlstra return pages; 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 1507d12efaeSAndrew Morton static inline unsigned long change_pmd_range(struct vm_area_struct *vma, 1517d12efaeSAndrew Morton pud_t *pud, unsigned long addr, unsigned long end, 1527d12efaeSAndrew Morton pgprot_t newprot, int dirty_accountable, int prot_numa) 1531da177e4SLinus Torvalds { 1541da177e4SLinus Torvalds pmd_t *pmd; 155a5338093SRik van Riel struct mm_struct *mm = vma->vm_mm; 1561da177e4SLinus Torvalds unsigned long next; 1577da4d641SPeter Zijlstra unsigned long pages = 0; 15872403b4aSMel Gorman unsigned long nr_huge_updates = 0; 159a5338093SRik van Riel unsigned long mni_start = 0; 1601da177e4SLinus Torvalds 1611da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 1621da177e4SLinus Torvalds do { 16325cbbef1SMel Gorman unsigned long this_pages; 16425cbbef1SMel Gorman 1651da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 16684c3fc4eSZi Yan if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) 1675c7fb56eSDan Williams && pmd_none_or_clear_bad(pmd)) 16888a9ab6eSRik van Riel continue; 169a5338093SRik van Riel 170a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */ 171a5338093SRik van Riel if (!mni_start) { 172a5338093SRik van Riel mni_start = addr; 173a5338093SRik van Riel mmu_notifier_invalidate_range_start(mm, mni_start, end); 174a5338093SRik van Riel } 175a5338093SRik van Riel 17684c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1776b9116a6SKirill A. Shutemov if (next - addr != HPAGE_PMD_SIZE) { 178fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 1796b9116a6SKirill A. Shutemov } else { 180f123d74aSMel Gorman int nr_ptes = change_huge_pmd(vma, pmd, addr, 181e944fd67SMel Gorman newprot, prot_numa); 182f123d74aSMel Gorman 183f123d74aSMel Gorman if (nr_ptes) { 18472403b4aSMel Gorman if (nr_ptes == HPAGE_PMD_NR) { 18572403b4aSMel Gorman pages += HPAGE_PMD_NR; 18672403b4aSMel Gorman nr_huge_updates++; 18772403b4aSMel Gorman } 1881ad9f620SMel Gorman 1891ad9f620SMel Gorman /* huge pmd was handled */ 190cd7548abSJohannes Weiner continue; 1917da4d641SPeter Zijlstra } 192f123d74aSMel Gorman } 19388a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */ 194cd7548abSJohannes Weiner } 19525cbbef1SMel Gorman this_pages = change_pte_range(vma, pmd, addr, next, newprot, 1960f19c179SMel Gorman dirty_accountable, prot_numa); 19725cbbef1SMel Gorman pages += this_pages; 1981da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1997da4d641SPeter Zijlstra 200a5338093SRik van Riel if (mni_start) 201a5338093SRik van Riel mmu_notifier_invalidate_range_end(mm, mni_start, end); 202a5338093SRik van Riel 20372403b4aSMel Gorman if (nr_huge_updates) 20472403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); 2057da4d641SPeter Zijlstra return pages; 2061da177e4SLinus Torvalds } 2071da177e4SLinus Torvalds 2087d12efaeSAndrew Morton static inline unsigned long change_pud_range(struct vm_area_struct *vma, 209c2febafcSKirill A. Shutemov p4d_t *p4d, unsigned long addr, unsigned long end, 2107d12efaeSAndrew Morton pgprot_t newprot, int dirty_accountable, int prot_numa) 2111da177e4SLinus Torvalds { 2121da177e4SLinus Torvalds pud_t *pud; 2131da177e4SLinus Torvalds unsigned long next; 2147da4d641SPeter Zijlstra unsigned long pages = 0; 2151da177e4SLinus Torvalds 216c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 2171da177e4SLinus Torvalds do { 2181da177e4SLinus Torvalds next = pud_addr_end(addr, end); 2191da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 2201da177e4SLinus Torvalds continue; 2217da4d641SPeter Zijlstra pages += change_pmd_range(vma, pud, addr, next, newprot, 2224b10e7d5SMel Gorman dirty_accountable, prot_numa); 2231da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 2247da4d641SPeter Zijlstra 2257da4d641SPeter Zijlstra return pages; 2261da177e4SLinus Torvalds } 2271da177e4SLinus Torvalds 228c2febafcSKirill A. Shutemov static inline unsigned long change_p4d_range(struct vm_area_struct *vma, 229c2febafcSKirill A. Shutemov pgd_t *pgd, unsigned long addr, unsigned long end, 230c2febafcSKirill A. Shutemov pgprot_t newprot, int dirty_accountable, int prot_numa) 231c2febafcSKirill A. Shutemov { 232c2febafcSKirill A. Shutemov p4d_t *p4d; 233c2febafcSKirill A. Shutemov unsigned long next; 234c2febafcSKirill A. Shutemov unsigned long pages = 0; 235c2febafcSKirill A. Shutemov 236c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 237c2febafcSKirill A. Shutemov do { 238c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 239c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 240c2febafcSKirill A. Shutemov continue; 241c2febafcSKirill A. Shutemov pages += change_pud_range(vma, p4d, addr, next, newprot, 242c2febafcSKirill A. Shutemov dirty_accountable, prot_numa); 243c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 244c2febafcSKirill A. Shutemov 245c2febafcSKirill A. Shutemov return pages; 246c2febafcSKirill A. Shutemov } 247c2febafcSKirill A. Shutemov 2487da4d641SPeter Zijlstra static unsigned long change_protection_range(struct vm_area_struct *vma, 249c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 2504b10e7d5SMel Gorman int dirty_accountable, int prot_numa) 2511da177e4SLinus Torvalds { 2521da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2531da177e4SLinus Torvalds pgd_t *pgd; 2541da177e4SLinus Torvalds unsigned long next; 2551da177e4SLinus Torvalds unsigned long start = addr; 2567da4d641SPeter Zijlstra unsigned long pages = 0; 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds BUG_ON(addr >= end); 2591da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 2601da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 26116af97dcSNadav Amit inc_tlb_flush_pending(mm); 2621da177e4SLinus Torvalds do { 2631da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 2641da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 2651da177e4SLinus Torvalds continue; 266c2febafcSKirill A. Shutemov pages += change_p4d_range(vma, pgd, addr, next, newprot, 2674b10e7d5SMel Gorman dirty_accountable, prot_numa); 2681da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 2697da4d641SPeter Zijlstra 2701233d588SIngo Molnar /* Only flush the TLB if we actually modified any entries: */ 2711233d588SIngo Molnar if (pages) 2721da177e4SLinus Torvalds flush_tlb_range(vma, start, end); 27316af97dcSNadav Amit dec_tlb_flush_pending(mm); 2747da4d641SPeter Zijlstra 2757da4d641SPeter Zijlstra return pages; 2767da4d641SPeter Zijlstra } 2777da4d641SPeter Zijlstra 2787da4d641SPeter Zijlstra unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 2797da4d641SPeter Zijlstra unsigned long end, pgprot_t newprot, 2804b10e7d5SMel Gorman int dirty_accountable, int prot_numa) 2817da4d641SPeter Zijlstra { 2827da4d641SPeter Zijlstra unsigned long pages; 2837da4d641SPeter Zijlstra 2847da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma)) 2857da4d641SPeter Zijlstra pages = hugetlb_change_protection(vma, start, end, newprot); 2867da4d641SPeter Zijlstra else 2874b10e7d5SMel Gorman pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); 2887da4d641SPeter Zijlstra 2897da4d641SPeter Zijlstra return pages; 2901da177e4SLinus Torvalds } 2911da177e4SLinus Torvalds 292b6a2fea3SOllie Wild int 2931da177e4SLinus Torvalds mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 2941da177e4SLinus Torvalds unsigned long start, unsigned long end, unsigned long newflags) 2951da177e4SLinus Torvalds { 2961da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2971da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags; 2981da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT; 2991da177e4SLinus Torvalds unsigned long charged = 0; 3001da177e4SLinus Torvalds pgoff_t pgoff; 3011da177e4SLinus Torvalds int error; 302c1e6098bSPeter Zijlstra int dirty_accountable = 0; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds if (newflags == oldflags) { 3051da177e4SLinus Torvalds *pprev = vma; 3061da177e4SLinus Torvalds return 0; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds /* 3101da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit; 3111da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we 3125a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for 3135a6fe125SMel Gorman * even if read-only so there is no need to account for them here 3141da177e4SLinus Torvalds */ 3151da177e4SLinus Torvalds if (newflags & VM_WRITE) { 31684638335SKonstantin Khlebnikov /* Check space limits when area turns into data. */ 31784638335SKonstantin Khlebnikov if (!may_expand_vm(mm, newflags, nrpages) && 31884638335SKonstantin Khlebnikov may_expand_vm(mm, oldflags, nrpages)) 31984638335SKonstantin Khlebnikov return -ENOMEM; 3205a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 321cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) { 3221da177e4SLinus Torvalds charged = nrpages; 323191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 3241da177e4SLinus Torvalds return -ENOMEM; 3251da177e4SLinus Torvalds newflags |= VM_ACCOUNT; 3261da177e4SLinus Torvalds } 3271da177e4SLinus Torvalds } 3281da177e4SLinus Torvalds 3291da177e4SLinus Torvalds /* 3301da177e4SLinus Torvalds * First try to merge with previous and/or next vma. 3311da177e4SLinus Torvalds */ 3321da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 3331da177e4SLinus Torvalds *pprev = vma_merge(mm, *pprev, start, end, newflags, 33419a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 33519a809afSAndrea Arcangeli vma->vm_userfaultfd_ctx); 3361da177e4SLinus Torvalds if (*pprev) { 3371da177e4SLinus Torvalds vma = *pprev; 338e86f15eeSAndrea Arcangeli VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); 3391da177e4SLinus Torvalds goto success; 3401da177e4SLinus Torvalds } 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds *pprev = vma; 3431da177e4SLinus Torvalds 3441da177e4SLinus Torvalds if (start != vma->vm_start) { 3451da177e4SLinus Torvalds error = split_vma(mm, vma, start, 1); 3461da177e4SLinus Torvalds if (error) 3471da177e4SLinus Torvalds goto fail; 3481da177e4SLinus Torvalds } 3491da177e4SLinus Torvalds 3501da177e4SLinus Torvalds if (end != vma->vm_end) { 3511da177e4SLinus Torvalds error = split_vma(mm, vma, end, 0); 3521da177e4SLinus Torvalds if (error) 3531da177e4SLinus Torvalds goto fail; 3541da177e4SLinus Torvalds } 3551da177e4SLinus Torvalds 3561da177e4SLinus Torvalds success: 3571da177e4SLinus Torvalds /* 3581da177e4SLinus Torvalds * vm_flags and vm_page_prot are protected by the mmap_sem 3591da177e4SLinus Torvalds * held in write mode. 3601da177e4SLinus Torvalds */ 3611da177e4SLinus Torvalds vma->vm_flags = newflags; 3626d2329f8SAndrea Arcangeli dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); 36364e45507SPeter Feiner vma_set_page_prot(vma); 364d08b3851SPeter Zijlstra 3657d12efaeSAndrew Morton change_protection(vma, start, end, vma->vm_page_prot, 3667d12efaeSAndrew Morton dirty_accountable, 0); 3677da4d641SPeter Zijlstra 36836f88188SKirill A. Shutemov /* 36936f88188SKirill A. Shutemov * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major 37036f88188SKirill A. Shutemov * fault on access. 37136f88188SKirill A. Shutemov */ 37236f88188SKirill A. Shutemov if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && 37336f88188SKirill A. Shutemov (newflags & VM_WRITE)) { 37436f88188SKirill A. Shutemov populate_vma_page_range(vma, start, end, NULL); 37536f88188SKirill A. Shutemov } 37636f88188SKirill A. Shutemov 37784638335SKonstantin Khlebnikov vm_stat_account(mm, oldflags, -nrpages); 37884638335SKonstantin Khlebnikov vm_stat_account(mm, newflags, nrpages); 37963bfd738SPekka Enberg perf_event_mmap(vma); 3801da177e4SLinus Torvalds return 0; 3811da177e4SLinus Torvalds 3821da177e4SLinus Torvalds fail: 3831da177e4SLinus Torvalds vm_unacct_memory(charged); 3841da177e4SLinus Torvalds return error; 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds 3877d06d9c9SDave Hansen /* 3887d06d9c9SDave Hansen * pkey==-1 when doing a legacy mprotect() 3897d06d9c9SDave Hansen */ 3907d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len, 3917d06d9c9SDave Hansen unsigned long prot, int pkey) 3921da177e4SLinus Torvalds { 39362b5f7d0SDave Hansen unsigned long nstart, end, tmp, reqprot; 3941da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 3951da177e4SLinus Torvalds int error = -EINVAL; 3961da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 397f138556dSPiotr Kwapulinski const bool rier = (current->personality & READ_IMPLIES_EXEC) && 398f138556dSPiotr Kwapulinski (prot & PROT_READ); 399f138556dSPiotr Kwapulinski 4001da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 4011da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 4021da177e4SLinus Torvalds return -EINVAL; 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds if (start & ~PAGE_MASK) 4051da177e4SLinus Torvalds return -EINVAL; 4061da177e4SLinus Torvalds if (!len) 4071da177e4SLinus Torvalds return 0; 4081da177e4SLinus Torvalds len = PAGE_ALIGN(len); 4091da177e4SLinus Torvalds end = start + len; 4101da177e4SLinus Torvalds if (end <= start) 4111da177e4SLinus Torvalds return -ENOMEM; 412b845f313SDave Kleikamp if (!arch_validate_prot(prot)) 4131da177e4SLinus Torvalds return -EINVAL; 4141da177e4SLinus Torvalds 4151da177e4SLinus Torvalds reqprot = prot; 4161da177e4SLinus Torvalds 417dc0ef0dfSMichal Hocko if (down_write_killable(¤t->mm->mmap_sem)) 418dc0ef0dfSMichal Hocko return -EINTR; 4191da177e4SLinus Torvalds 420e8c24d3aSDave Hansen /* 421e8c24d3aSDave Hansen * If userspace did not allocate the pkey, do not let 422e8c24d3aSDave Hansen * them use it here. 423e8c24d3aSDave Hansen */ 424e8c24d3aSDave Hansen error = -EINVAL; 425e8c24d3aSDave Hansen if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) 426e8c24d3aSDave Hansen goto out; 427e8c24d3aSDave Hansen 428097d5910SLinus Torvalds vma = find_vma(current->mm, start); 4291da177e4SLinus Torvalds error = -ENOMEM; 4301da177e4SLinus Torvalds if (!vma) 4311da177e4SLinus Torvalds goto out; 432097d5910SLinus Torvalds prev = vma->vm_prev; 4331da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) { 4341da177e4SLinus Torvalds if (vma->vm_start >= end) 4351da177e4SLinus Torvalds goto out; 4361da177e4SLinus Torvalds start = vma->vm_start; 4371da177e4SLinus Torvalds error = -EINVAL; 4381da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 4391da177e4SLinus Torvalds goto out; 4407d12efaeSAndrew Morton } else { 4411da177e4SLinus Torvalds if (vma->vm_start > start) 4421da177e4SLinus Torvalds goto out; 4431da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) { 4441da177e4SLinus Torvalds end = vma->vm_end; 4451da177e4SLinus Torvalds error = -EINVAL; 4461da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 4471da177e4SLinus Torvalds goto out; 4481da177e4SLinus Torvalds } 4491da177e4SLinus Torvalds } 4501da177e4SLinus Torvalds if (start > vma->vm_start) 4511da177e4SLinus Torvalds prev = vma; 4521da177e4SLinus Torvalds 4531da177e4SLinus Torvalds for (nstart = start ; ; ) { 454a8502b67SDave Hansen unsigned long mask_off_old_flags; 4551da177e4SLinus Torvalds unsigned long newflags; 4567d06d9c9SDave Hansen int new_vma_pkey; 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 4591da177e4SLinus Torvalds 460f138556dSPiotr Kwapulinski /* Does the application expect PROT_READ to imply PROT_EXEC */ 461f138556dSPiotr Kwapulinski if (rier && (vma->vm_flags & VM_MAYEXEC)) 462f138556dSPiotr Kwapulinski prot |= PROT_EXEC; 463f138556dSPiotr Kwapulinski 464a8502b67SDave Hansen /* 465a8502b67SDave Hansen * Each mprotect() call explicitly passes r/w/x permissions. 466a8502b67SDave Hansen * If a permission is not passed to mprotect(), it must be 467a8502b67SDave Hansen * cleared from the VMA. 468a8502b67SDave Hansen */ 469a8502b67SDave Hansen mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC | 470a8502b67SDave Hansen ARCH_VM_PKEY_FLAGS; 471a8502b67SDave Hansen 4727d06d9c9SDave Hansen new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); 4737d06d9c9SDave Hansen newflags = calc_vm_prot_bits(prot, new_vma_pkey); 474a8502b67SDave Hansen newflags |= (vma->vm_flags & ~mask_off_old_flags); 4751da177e4SLinus Torvalds 4767e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */ 4777e2cff42SPaolo 'Blaisorblade' Giarrusso if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { 4781da177e4SLinus Torvalds error = -EACCES; 4791da177e4SLinus Torvalds goto out; 4801da177e4SLinus Torvalds } 4811da177e4SLinus Torvalds 4821da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot); 4831da177e4SLinus Torvalds if (error) 4841da177e4SLinus Torvalds goto out; 4851da177e4SLinus Torvalds 4861da177e4SLinus Torvalds tmp = vma->vm_end; 4871da177e4SLinus Torvalds if (tmp > end) 4881da177e4SLinus Torvalds tmp = end; 4891da177e4SLinus Torvalds error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 4901da177e4SLinus Torvalds if (error) 4911da177e4SLinus Torvalds goto out; 4921da177e4SLinus Torvalds nstart = tmp; 4931da177e4SLinus Torvalds 4941da177e4SLinus Torvalds if (nstart < prev->vm_end) 4951da177e4SLinus Torvalds nstart = prev->vm_end; 4961da177e4SLinus Torvalds if (nstart >= end) 4971da177e4SLinus Torvalds goto out; 4981da177e4SLinus Torvalds 4991da177e4SLinus Torvalds vma = prev->vm_next; 5001da177e4SLinus Torvalds if (!vma || vma->vm_start != nstart) { 5011da177e4SLinus Torvalds error = -ENOMEM; 5021da177e4SLinus Torvalds goto out; 5031da177e4SLinus Torvalds } 504f138556dSPiotr Kwapulinski prot = reqprot; 5051da177e4SLinus Torvalds } 5061da177e4SLinus Torvalds out: 5071da177e4SLinus Torvalds up_write(¤t->mm->mmap_sem); 5081da177e4SLinus Torvalds return error; 5091da177e4SLinus Torvalds } 5107d06d9c9SDave Hansen 5117d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, 5127d06d9c9SDave Hansen unsigned long, prot) 5137d06d9c9SDave Hansen { 5147d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, -1); 5157d06d9c9SDave Hansen } 5167d06d9c9SDave Hansen 517c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS 518c7142aeaSHeiko Carstens 5197d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, 5207d06d9c9SDave Hansen unsigned long, prot, int, pkey) 5217d06d9c9SDave Hansen { 5227d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, pkey); 5237d06d9c9SDave Hansen } 524e8c24d3aSDave Hansen 525e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) 526e8c24d3aSDave Hansen { 527e8c24d3aSDave Hansen int pkey; 528e8c24d3aSDave Hansen int ret; 529e8c24d3aSDave Hansen 530e8c24d3aSDave Hansen /* No flags supported yet. */ 531e8c24d3aSDave Hansen if (flags) 532e8c24d3aSDave Hansen return -EINVAL; 533e8c24d3aSDave Hansen /* check for unsupported init values */ 534e8c24d3aSDave Hansen if (init_val & ~PKEY_ACCESS_MASK) 535e8c24d3aSDave Hansen return -EINVAL; 536e8c24d3aSDave Hansen 537e8c24d3aSDave Hansen down_write(¤t->mm->mmap_sem); 538e8c24d3aSDave Hansen pkey = mm_pkey_alloc(current->mm); 539e8c24d3aSDave Hansen 540e8c24d3aSDave Hansen ret = -ENOSPC; 541e8c24d3aSDave Hansen if (pkey == -1) 542e8c24d3aSDave Hansen goto out; 543e8c24d3aSDave Hansen 544e8c24d3aSDave Hansen ret = arch_set_user_pkey_access(current, pkey, init_val); 545e8c24d3aSDave Hansen if (ret) { 546e8c24d3aSDave Hansen mm_pkey_free(current->mm, pkey); 547e8c24d3aSDave Hansen goto out; 548e8c24d3aSDave Hansen } 549e8c24d3aSDave Hansen ret = pkey; 550e8c24d3aSDave Hansen out: 551e8c24d3aSDave Hansen up_write(¤t->mm->mmap_sem); 552e8c24d3aSDave Hansen return ret; 553e8c24d3aSDave Hansen } 554e8c24d3aSDave Hansen 555e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey) 556e8c24d3aSDave Hansen { 557e8c24d3aSDave Hansen int ret; 558e8c24d3aSDave Hansen 559e8c24d3aSDave Hansen down_write(¤t->mm->mmap_sem); 560e8c24d3aSDave Hansen ret = mm_pkey_free(current->mm, pkey); 561e8c24d3aSDave Hansen up_write(¤t->mm->mmap_sem); 562e8c24d3aSDave Hansen 563e8c24d3aSDave Hansen /* 564e8c24d3aSDave Hansen * We could provie warnings or errors if any VMA still 565e8c24d3aSDave Hansen * has the pkey set here. 566e8c24d3aSDave Hansen */ 567e8c24d3aSDave Hansen return ret; 568e8c24d3aSDave Hansen } 569c7142aeaSHeiko Carstens 570c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */ 571