1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mprotect.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds 61da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig 71da177e4SLinus Torvalds * 8046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 91da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12a520110eSChristoph Hellwig #include <linux/pagewalk.h> 131da177e4SLinus Torvalds #include <linux/hugetlb.h> 141da177e4SLinus Torvalds #include <linux/shm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/highmem.h> 181da177e4SLinus Torvalds #include <linux/security.h> 191da177e4SLinus Torvalds #include <linux/mempolicy.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/syscalls.h> 220697212aSChristoph Lameter #include <linux/swap.h> 230697212aSChristoph Lameter #include <linux/swapops.h> 24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 2564cdd548SKOSAKI Motohiro #include <linux/migrate.h> 26cdd6c482SIngo Molnar #include <linux/perf_event.h> 27e8c24d3aSDave Hansen #include <linux/pkeys.h> 2864a9a34eSMel Gorman #include <linux/ksm.h> 297c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 3009a913a7SMel Gorman #include <linux/mm_inline.h> 311da177e4SLinus Torvalds #include <asm/pgtable.h> 321da177e4SLinus Torvalds #include <asm/cacheflush.h> 33e8c24d3aSDave Hansen #include <asm/mmu_context.h> 341da177e4SLinus Torvalds #include <asm/tlbflush.h> 351da177e4SLinus Torvalds 3636f88188SKirill A. Shutemov #include "internal.h" 3736f88188SKirill A. Shutemov 384b10e7d5SMel Gorman static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 39c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 4058705444SPeter Xu unsigned long cp_flags) 411da177e4SLinus Torvalds { 420697212aSChristoph Lameter pte_t *pte, oldpte; 43705e87c0SHugh Dickins spinlock_t *ptl; 447da4d641SPeter Zijlstra unsigned long pages = 0; 453e321587SAndi Kleen int target_node = NUMA_NO_NODE; 4658705444SPeter Xu bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT; 4758705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 48292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 49292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 501da177e4SLinus Torvalds 51175ad4f1SAndrea Arcangeli /* 52175ad4f1SAndrea Arcangeli * Can be called with only the mmap_sem for reading by 53175ad4f1SAndrea Arcangeli * prot_numa so we must check the pmd isn't constantly 54175ad4f1SAndrea Arcangeli * changing from under us from pmd_none to pmd_trans_huge 55175ad4f1SAndrea Arcangeli * and/or the other way around. 56175ad4f1SAndrea Arcangeli */ 57175ad4f1SAndrea Arcangeli if (pmd_trans_unstable(pmd)) 58175ad4f1SAndrea Arcangeli return 0; 59175ad4f1SAndrea Arcangeli 60175ad4f1SAndrea Arcangeli /* 61175ad4f1SAndrea Arcangeli * The pmd points to a regular pte so the pmd can't change 62175ad4f1SAndrea Arcangeli * from under us even if the mmap_sem is only hold for 63175ad4f1SAndrea Arcangeli * reading. 64175ad4f1SAndrea Arcangeli */ 65175ad4f1SAndrea Arcangeli pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 661ad9f620SMel Gorman 673e321587SAndi Kleen /* Get target node for single threaded private VMAs */ 683e321587SAndi Kleen if (prot_numa && !(vma->vm_flags & VM_SHARED) && 693e321587SAndi Kleen atomic_read(&vma->vm_mm->mm_users) == 1) 703e321587SAndi Kleen target_node = numa_node_id(); 713e321587SAndi Kleen 723ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 736606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 741da177e4SLinus Torvalds do { 750697212aSChristoph Lameter oldpte = *pte; 760697212aSChristoph Lameter if (pte_present(oldpte)) { 771da177e4SLinus Torvalds pte_t ptent; 78b191f9b1SMel Gorman bool preserve_write = prot_numa && pte_write(oldpte); 791da177e4SLinus Torvalds 80e944fd67SMel Gorman /* 81e944fd67SMel Gorman * Avoid trapping faults against the zero or KSM 82e944fd67SMel Gorman * pages. See similar comment in change_huge_pmd. 83e944fd67SMel Gorman */ 84e944fd67SMel Gorman if (prot_numa) { 85e944fd67SMel Gorman struct page *page; 86e944fd67SMel Gorman 87a818f536SHuang Ying /* Avoid TLB flush if possible */ 88a818f536SHuang Ying if (pte_protnone(oldpte)) 89a818f536SHuang Ying continue; 90a818f536SHuang Ying 91e944fd67SMel Gorman page = vm_normal_page(vma, addr, oldpte); 92e944fd67SMel Gorman if (!page || PageKsm(page)) 93e944fd67SMel Gorman continue; 9410c1045fSMel Gorman 95859d4adcSHenry Willard /* Also skip shared copy-on-write pages */ 96859d4adcSHenry Willard if (is_cow_mapping(vma->vm_flags) && 97859d4adcSHenry Willard page_mapcount(page) != 1) 98859d4adcSHenry Willard continue; 99859d4adcSHenry Willard 10009a913a7SMel Gorman /* 10109a913a7SMel Gorman * While migration can move some dirty pages, 10209a913a7SMel Gorman * it cannot move them all from MIGRATE_ASYNC 10309a913a7SMel Gorman * context. 10409a913a7SMel Gorman */ 1059de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page)) 10609a913a7SMel Gorman continue; 10709a913a7SMel Gorman 1083e321587SAndi Kleen /* 1093e321587SAndi Kleen * Don't mess with PTEs if page is already on the node 1103e321587SAndi Kleen * a single-threaded process is running on. 1113e321587SAndi Kleen */ 1123e321587SAndi Kleen if (target_node == page_to_nid(page)) 1133e321587SAndi Kleen continue; 114e944fd67SMel Gorman } 115e944fd67SMel Gorman 11604a86453SAneesh Kumar K.V oldpte = ptep_modify_prot_start(vma, addr, pte); 11704a86453SAneesh Kumar K.V ptent = pte_modify(oldpte, newprot); 118b191f9b1SMel Gorman if (preserve_write) 119288bc549SAneesh Kumar K.V ptent = pte_mk_savedwrite(ptent); 1208a0516edSMel Gorman 121292924b2SPeter Xu if (uffd_wp) { 122292924b2SPeter Xu ptent = pte_wrprotect(ptent); 123292924b2SPeter Xu ptent = pte_mkuffd_wp(ptent); 124292924b2SPeter Xu } else if (uffd_wp_resolve) { 125292924b2SPeter Xu /* 126292924b2SPeter Xu * Leave the write bit to be handled 127292924b2SPeter Xu * by PF interrupt handler, then 128292924b2SPeter Xu * things like COW could be properly 129292924b2SPeter Xu * handled. 130292924b2SPeter Xu */ 131292924b2SPeter Xu ptent = pte_clear_uffd_wp(ptent); 132292924b2SPeter Xu } 133292924b2SPeter Xu 1348a0516edSMel Gorman /* Avoid taking write faults for known dirty pages */ 13564e45507SPeter Feiner if (dirty_accountable && pte_dirty(ptent) && 13664e45507SPeter Feiner (pte_soft_dirty(ptent) || 1378a0516edSMel Gorman !(vma->vm_flags & VM_SOFTDIRTY))) { 1389d85d586SAneesh Kumar K.V ptent = pte_mkwrite(ptent); 1398a0516edSMel Gorman } 14004a86453SAneesh Kumar K.V ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); 1414b10e7d5SMel Gorman pages++; 142f45ec5ffSPeter Xu } else if (is_swap_pte(oldpte)) { 1430697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte); 144f45ec5ffSPeter Xu pte_t newpte; 1450697212aSChristoph Lameter 1460697212aSChristoph Lameter if (is_write_migration_entry(entry)) { 1470697212aSChristoph Lameter /* 1480697212aSChristoph Lameter * A protection check is difficult so 1490697212aSChristoph Lameter * just be safe and disable write 1500697212aSChristoph Lameter */ 1510697212aSChristoph Lameter make_migration_entry_read(&entry); 152c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry); 153c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte)) 154c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte); 155f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte)) 156f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 157f45ec5ffSPeter Xu } else if (is_write_device_private_entry(entry)) { 1585042db43SJérôme Glisse /* 1595042db43SJérôme Glisse * We do not preserve soft-dirtiness. See 1605042db43SJérôme Glisse * copy_one_pte() for explanation. 1615042db43SJérôme Glisse */ 1625042db43SJérôme Glisse make_device_private_entry_read(&entry); 1635042db43SJérôme Glisse newpte = swp_entry_to_pte(entry); 164f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte)) 165f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 166f45ec5ffSPeter Xu } else { 167f45ec5ffSPeter Xu newpte = oldpte; 168f45ec5ffSPeter Xu } 1695042db43SJérôme Glisse 170f45ec5ffSPeter Xu if (uffd_wp) 171f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 172f45ec5ffSPeter Xu else if (uffd_wp_resolve) 173f45ec5ffSPeter Xu newpte = pte_swp_clear_uffd_wp(newpte); 174f45ec5ffSPeter Xu 175f45ec5ffSPeter Xu if (!pte_same(oldpte, newpte)) { 176f45ec5ffSPeter Xu set_pte_at(vma->vm_mm, addr, pte, newpte); 1775042db43SJérôme Glisse pages++; 1785042db43SJérôme Glisse } 179e920e14cSMel Gorman } 1801da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1816606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 182705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 1837da4d641SPeter Zijlstra 1847da4d641SPeter Zijlstra return pages; 1851da177e4SLinus Torvalds } 1861da177e4SLinus Torvalds 1878b272b3cSMel Gorman /* 1888b272b3cSMel Gorman * Used when setting automatic NUMA hinting protection where it is 1898b272b3cSMel Gorman * critical that a numa hinting PMD is not confused with a bad PMD. 1908b272b3cSMel Gorman */ 1918b272b3cSMel Gorman static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) 1928b272b3cSMel Gorman { 1938b272b3cSMel Gorman pmd_t pmdval = pmd_read_atomic(pmd); 1948b272b3cSMel Gorman 1958b272b3cSMel Gorman /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ 1968b272b3cSMel Gorman #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1978b272b3cSMel Gorman barrier(); 1988b272b3cSMel Gorman #endif 1998b272b3cSMel Gorman 2008b272b3cSMel Gorman if (pmd_none(pmdval)) 2018b272b3cSMel Gorman return 1; 2028b272b3cSMel Gorman if (pmd_trans_huge(pmdval)) 2038b272b3cSMel Gorman return 0; 2048b272b3cSMel Gorman if (unlikely(pmd_bad(pmdval))) { 2058b272b3cSMel Gorman pmd_clear_bad(pmd); 2068b272b3cSMel Gorman return 1; 2078b272b3cSMel Gorman } 2088b272b3cSMel Gorman 2098b272b3cSMel Gorman return 0; 2108b272b3cSMel Gorman } 2118b272b3cSMel Gorman 2127d12efaeSAndrew Morton static inline unsigned long change_pmd_range(struct vm_area_struct *vma, 2137d12efaeSAndrew Morton pud_t *pud, unsigned long addr, unsigned long end, 21458705444SPeter Xu pgprot_t newprot, unsigned long cp_flags) 2151da177e4SLinus Torvalds { 2161da177e4SLinus Torvalds pmd_t *pmd; 2171da177e4SLinus Torvalds unsigned long next; 2187da4d641SPeter Zijlstra unsigned long pages = 0; 21972403b4aSMel Gorman unsigned long nr_huge_updates = 0; 220ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 221ac46d4f3SJérôme Glisse 222ac46d4f3SJérôme Glisse range.start = 0; 2231da177e4SLinus Torvalds 2241da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 2251da177e4SLinus Torvalds do { 22625cbbef1SMel Gorman unsigned long this_pages; 22725cbbef1SMel Gorman 2281da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 2298b272b3cSMel Gorman 2308b272b3cSMel Gorman /* 2318b272b3cSMel Gorman * Automatic NUMA balancing walks the tables with mmap_sem 2328b272b3cSMel Gorman * held for read. It's possible a parallel update to occur 2338b272b3cSMel Gorman * between pmd_trans_huge() and a pmd_none_or_clear_bad() 2348b272b3cSMel Gorman * check leading to a false positive and clearing. 2358b272b3cSMel Gorman * Hence, it's necessary to atomically read the PMD value 2368b272b3cSMel Gorman * for all the checks. 2378b272b3cSMel Gorman */ 2388b272b3cSMel Gorman if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && 2398b272b3cSMel Gorman pmd_none_or_clear_bad_unless_trans_huge(pmd)) 2404991c09cSAnshuman Khandual goto next; 241a5338093SRik van Riel 242a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */ 243ac46d4f3SJérôme Glisse if (!range.start) { 2447269f999SJérôme Glisse mmu_notifier_range_init(&range, 2457269f999SJérôme Glisse MMU_NOTIFY_PROTECTION_VMA, 0, 2466f4f13e8SJérôme Glisse vma, vma->vm_mm, addr, end); 247ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 248a5338093SRik van Riel } 249a5338093SRik van Riel 25084c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 2516b9116a6SKirill A. Shutemov if (next - addr != HPAGE_PMD_SIZE) { 252fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 2536b9116a6SKirill A. Shutemov } else { 254f123d74aSMel Gorman int nr_ptes = change_huge_pmd(vma, pmd, addr, 25558705444SPeter Xu newprot, cp_flags); 256f123d74aSMel Gorman 257f123d74aSMel Gorman if (nr_ptes) { 25872403b4aSMel Gorman if (nr_ptes == HPAGE_PMD_NR) { 25972403b4aSMel Gorman pages += HPAGE_PMD_NR; 26072403b4aSMel Gorman nr_huge_updates++; 26172403b4aSMel Gorman } 2621ad9f620SMel Gorman 2631ad9f620SMel Gorman /* huge pmd was handled */ 2644991c09cSAnshuman Khandual goto next; 2657da4d641SPeter Zijlstra } 266f123d74aSMel Gorman } 26788a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */ 268cd7548abSJohannes Weiner } 26925cbbef1SMel Gorman this_pages = change_pte_range(vma, pmd, addr, next, newprot, 27058705444SPeter Xu cp_flags); 27125cbbef1SMel Gorman pages += this_pages; 2724991c09cSAnshuman Khandual next: 2734991c09cSAnshuman Khandual cond_resched(); 2741da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 2757da4d641SPeter Zijlstra 276ac46d4f3SJérôme Glisse if (range.start) 277ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 278a5338093SRik van Riel 27972403b4aSMel Gorman if (nr_huge_updates) 28072403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); 2817da4d641SPeter Zijlstra return pages; 2821da177e4SLinus Torvalds } 2831da177e4SLinus Torvalds 2847d12efaeSAndrew Morton static inline unsigned long change_pud_range(struct vm_area_struct *vma, 285c2febafcSKirill A. Shutemov p4d_t *p4d, unsigned long addr, unsigned long end, 28658705444SPeter Xu pgprot_t newprot, unsigned long cp_flags) 2871da177e4SLinus Torvalds { 2881da177e4SLinus Torvalds pud_t *pud; 2891da177e4SLinus Torvalds unsigned long next; 2907da4d641SPeter Zijlstra unsigned long pages = 0; 2911da177e4SLinus Torvalds 292c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 2931da177e4SLinus Torvalds do { 2941da177e4SLinus Torvalds next = pud_addr_end(addr, end); 2951da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 2961da177e4SLinus Torvalds continue; 2977da4d641SPeter Zijlstra pages += change_pmd_range(vma, pud, addr, next, newprot, 29858705444SPeter Xu cp_flags); 2991da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 3007da4d641SPeter Zijlstra 3017da4d641SPeter Zijlstra return pages; 3021da177e4SLinus Torvalds } 3031da177e4SLinus Torvalds 304c2febafcSKirill A. Shutemov static inline unsigned long change_p4d_range(struct vm_area_struct *vma, 305c2febafcSKirill A. Shutemov pgd_t *pgd, unsigned long addr, unsigned long end, 30658705444SPeter Xu pgprot_t newprot, unsigned long cp_flags) 307c2febafcSKirill A. Shutemov { 308c2febafcSKirill A. Shutemov p4d_t *p4d; 309c2febafcSKirill A. Shutemov unsigned long next; 310c2febafcSKirill A. Shutemov unsigned long pages = 0; 311c2febafcSKirill A. Shutemov 312c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 313c2febafcSKirill A. Shutemov do { 314c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 315c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 316c2febafcSKirill A. Shutemov continue; 317c2febafcSKirill A. Shutemov pages += change_pud_range(vma, p4d, addr, next, newprot, 31858705444SPeter Xu cp_flags); 319c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 320c2febafcSKirill A. Shutemov 321c2febafcSKirill A. Shutemov return pages; 322c2febafcSKirill A. Shutemov } 323c2febafcSKirill A. Shutemov 3247da4d641SPeter Zijlstra static unsigned long change_protection_range(struct vm_area_struct *vma, 325c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 32658705444SPeter Xu unsigned long cp_flags) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 3291da177e4SLinus Torvalds pgd_t *pgd; 3301da177e4SLinus Torvalds unsigned long next; 3311da177e4SLinus Torvalds unsigned long start = addr; 3327da4d641SPeter Zijlstra unsigned long pages = 0; 3331da177e4SLinus Torvalds 3341da177e4SLinus Torvalds BUG_ON(addr >= end); 3351da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 3361da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 33716af97dcSNadav Amit inc_tlb_flush_pending(mm); 3381da177e4SLinus Torvalds do { 3391da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 3401da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 3411da177e4SLinus Torvalds continue; 342c2febafcSKirill A. Shutemov pages += change_p4d_range(vma, pgd, addr, next, newprot, 34358705444SPeter Xu cp_flags); 3441da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 3457da4d641SPeter Zijlstra 3461233d588SIngo Molnar /* Only flush the TLB if we actually modified any entries: */ 3471233d588SIngo Molnar if (pages) 3481da177e4SLinus Torvalds flush_tlb_range(vma, start, end); 34916af97dcSNadav Amit dec_tlb_flush_pending(mm); 3507da4d641SPeter Zijlstra 3517da4d641SPeter Zijlstra return pages; 3527da4d641SPeter Zijlstra } 3537da4d641SPeter Zijlstra 3547da4d641SPeter Zijlstra unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 3557da4d641SPeter Zijlstra unsigned long end, pgprot_t newprot, 35658705444SPeter Xu unsigned long cp_flags) 3577da4d641SPeter Zijlstra { 3587da4d641SPeter Zijlstra unsigned long pages; 3597da4d641SPeter Zijlstra 360292924b2SPeter Xu BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); 361292924b2SPeter Xu 3627da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma)) 3637da4d641SPeter Zijlstra pages = hugetlb_change_protection(vma, start, end, newprot); 3647da4d641SPeter Zijlstra else 36558705444SPeter Xu pages = change_protection_range(vma, start, end, newprot, 36658705444SPeter Xu cp_flags); 3677da4d641SPeter Zijlstra 3687da4d641SPeter Zijlstra return pages; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 37142e4089cSAndi Kleen static int prot_none_pte_entry(pte_t *pte, unsigned long addr, 37242e4089cSAndi Kleen unsigned long next, struct mm_walk *walk) 37342e4089cSAndi Kleen { 37442e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 37542e4089cSAndi Kleen 0 : -EACCES; 37642e4089cSAndi Kleen } 37742e4089cSAndi Kleen 37842e4089cSAndi Kleen static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, 37942e4089cSAndi Kleen unsigned long addr, unsigned long next, 38042e4089cSAndi Kleen struct mm_walk *walk) 38142e4089cSAndi Kleen { 38242e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 38342e4089cSAndi Kleen 0 : -EACCES; 38442e4089cSAndi Kleen } 38542e4089cSAndi Kleen 38642e4089cSAndi Kleen static int prot_none_test(unsigned long addr, unsigned long next, 38742e4089cSAndi Kleen struct mm_walk *walk) 38842e4089cSAndi Kleen { 38942e4089cSAndi Kleen return 0; 39042e4089cSAndi Kleen } 39142e4089cSAndi Kleen 3927b86ac33SChristoph Hellwig static const struct mm_walk_ops prot_none_walk_ops = { 39342e4089cSAndi Kleen .pte_entry = prot_none_pte_entry, 39442e4089cSAndi Kleen .hugetlb_entry = prot_none_hugetlb_entry, 39542e4089cSAndi Kleen .test_walk = prot_none_test, 39642e4089cSAndi Kleen }; 39742e4089cSAndi Kleen 398b6a2fea3SOllie Wild int 3991da177e4SLinus Torvalds mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 4001da177e4SLinus Torvalds unsigned long start, unsigned long end, unsigned long newflags) 4011da177e4SLinus Torvalds { 4021da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 4031da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags; 4041da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT; 4051da177e4SLinus Torvalds unsigned long charged = 0; 4061da177e4SLinus Torvalds pgoff_t pgoff; 4071da177e4SLinus Torvalds int error; 408c1e6098bSPeter Zijlstra int dirty_accountable = 0; 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds if (newflags == oldflags) { 4111da177e4SLinus Torvalds *pprev = vma; 4121da177e4SLinus Torvalds return 0; 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds 4151da177e4SLinus Torvalds /* 41642e4089cSAndi Kleen * Do PROT_NONE PFN permission checks here when we can still 41742e4089cSAndi Kleen * bail out without undoing a lot of state. This is a rather 41842e4089cSAndi Kleen * uncommon case, so doesn't need to be very optimized. 41942e4089cSAndi Kleen */ 42042e4089cSAndi Kleen if (arch_has_pfn_modify_check() && 42142e4089cSAndi Kleen (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 422*6cb4d9a2SAnshuman Khandual (newflags & VM_ACCESS_FLAGS) == 0) { 4237b86ac33SChristoph Hellwig pgprot_t new_pgprot = vm_get_page_prot(newflags); 4247b86ac33SChristoph Hellwig 4257b86ac33SChristoph Hellwig error = walk_page_range(current->mm, start, end, 4267b86ac33SChristoph Hellwig &prot_none_walk_ops, &new_pgprot); 42742e4089cSAndi Kleen if (error) 42842e4089cSAndi Kleen return error; 42942e4089cSAndi Kleen } 43042e4089cSAndi Kleen 43142e4089cSAndi Kleen /* 4321da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit; 4331da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we 4345a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for 4355a6fe125SMel Gorman * even if read-only so there is no need to account for them here 4361da177e4SLinus Torvalds */ 4371da177e4SLinus Torvalds if (newflags & VM_WRITE) { 43884638335SKonstantin Khlebnikov /* Check space limits when area turns into data. */ 43984638335SKonstantin Khlebnikov if (!may_expand_vm(mm, newflags, nrpages) && 44084638335SKonstantin Khlebnikov may_expand_vm(mm, oldflags, nrpages)) 44184638335SKonstantin Khlebnikov return -ENOMEM; 4425a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 443cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) { 4441da177e4SLinus Torvalds charged = nrpages; 445191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 4461da177e4SLinus Torvalds return -ENOMEM; 4471da177e4SLinus Torvalds newflags |= VM_ACCOUNT; 4481da177e4SLinus Torvalds } 4491da177e4SLinus Torvalds } 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds /* 4521da177e4SLinus Torvalds * First try to merge with previous and/or next vma. 4531da177e4SLinus Torvalds */ 4541da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 4551da177e4SLinus Torvalds *pprev = vma_merge(mm, *pprev, start, end, newflags, 45619a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 45719a809afSAndrea Arcangeli vma->vm_userfaultfd_ctx); 4581da177e4SLinus Torvalds if (*pprev) { 4591da177e4SLinus Torvalds vma = *pprev; 460e86f15eeSAndrea Arcangeli VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); 4611da177e4SLinus Torvalds goto success; 4621da177e4SLinus Torvalds } 4631da177e4SLinus Torvalds 4641da177e4SLinus Torvalds *pprev = vma; 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds if (start != vma->vm_start) { 4671da177e4SLinus Torvalds error = split_vma(mm, vma, start, 1); 4681da177e4SLinus Torvalds if (error) 4691da177e4SLinus Torvalds goto fail; 4701da177e4SLinus Torvalds } 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds if (end != vma->vm_end) { 4731da177e4SLinus Torvalds error = split_vma(mm, vma, end, 0); 4741da177e4SLinus Torvalds if (error) 4751da177e4SLinus Torvalds goto fail; 4761da177e4SLinus Torvalds } 4771da177e4SLinus Torvalds 4781da177e4SLinus Torvalds success: 4791da177e4SLinus Torvalds /* 4801da177e4SLinus Torvalds * vm_flags and vm_page_prot are protected by the mmap_sem 4811da177e4SLinus Torvalds * held in write mode. 4821da177e4SLinus Torvalds */ 4831da177e4SLinus Torvalds vma->vm_flags = newflags; 4846d2329f8SAndrea Arcangeli dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); 48564e45507SPeter Feiner vma_set_page_prot(vma); 486d08b3851SPeter Zijlstra 4877d12efaeSAndrew Morton change_protection(vma, start, end, vma->vm_page_prot, 48858705444SPeter Xu dirty_accountable ? MM_CP_DIRTY_ACCT : 0); 4897da4d641SPeter Zijlstra 49036f88188SKirill A. Shutemov /* 49136f88188SKirill A. Shutemov * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major 49236f88188SKirill A. Shutemov * fault on access. 49336f88188SKirill A. Shutemov */ 49436f88188SKirill A. Shutemov if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && 49536f88188SKirill A. Shutemov (newflags & VM_WRITE)) { 49636f88188SKirill A. Shutemov populate_vma_page_range(vma, start, end, NULL); 49736f88188SKirill A. Shutemov } 49836f88188SKirill A. Shutemov 49984638335SKonstantin Khlebnikov vm_stat_account(mm, oldflags, -nrpages); 50084638335SKonstantin Khlebnikov vm_stat_account(mm, newflags, nrpages); 50163bfd738SPekka Enberg perf_event_mmap(vma); 5021da177e4SLinus Torvalds return 0; 5031da177e4SLinus Torvalds 5041da177e4SLinus Torvalds fail: 5051da177e4SLinus Torvalds vm_unacct_memory(charged); 5061da177e4SLinus Torvalds return error; 5071da177e4SLinus Torvalds } 5081da177e4SLinus Torvalds 5097d06d9c9SDave Hansen /* 5107d06d9c9SDave Hansen * pkey==-1 when doing a legacy mprotect() 5117d06d9c9SDave Hansen */ 5127d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len, 5137d06d9c9SDave Hansen unsigned long prot, int pkey) 5141da177e4SLinus Torvalds { 51562b5f7d0SDave Hansen unsigned long nstart, end, tmp, reqprot; 5161da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 5171da177e4SLinus Torvalds int error = -EINVAL; 5181da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 519f138556dSPiotr Kwapulinski const bool rier = (current->personality & READ_IMPLIES_EXEC) && 520f138556dSPiotr Kwapulinski (prot & PROT_READ); 521f138556dSPiotr Kwapulinski 522057d3389SAndrey Konovalov start = untagged_addr(start); 523057d3389SAndrey Konovalov 5241da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 5251da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 5261da177e4SLinus Torvalds return -EINVAL; 5271da177e4SLinus Torvalds 5281da177e4SLinus Torvalds if (start & ~PAGE_MASK) 5291da177e4SLinus Torvalds return -EINVAL; 5301da177e4SLinus Torvalds if (!len) 5311da177e4SLinus Torvalds return 0; 5321da177e4SLinus Torvalds len = PAGE_ALIGN(len); 5331da177e4SLinus Torvalds end = start + len; 5341da177e4SLinus Torvalds if (end <= start) 5351da177e4SLinus Torvalds return -ENOMEM; 5369035cf9aSKhalid Aziz if (!arch_validate_prot(prot, start)) 5371da177e4SLinus Torvalds return -EINVAL; 5381da177e4SLinus Torvalds 5391da177e4SLinus Torvalds reqprot = prot; 5401da177e4SLinus Torvalds 541dc0ef0dfSMichal Hocko if (down_write_killable(¤t->mm->mmap_sem)) 542dc0ef0dfSMichal Hocko return -EINTR; 5431da177e4SLinus Torvalds 544e8c24d3aSDave Hansen /* 545e8c24d3aSDave Hansen * If userspace did not allocate the pkey, do not let 546e8c24d3aSDave Hansen * them use it here. 547e8c24d3aSDave Hansen */ 548e8c24d3aSDave Hansen error = -EINVAL; 549e8c24d3aSDave Hansen if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) 550e8c24d3aSDave Hansen goto out; 551e8c24d3aSDave Hansen 552097d5910SLinus Torvalds vma = find_vma(current->mm, start); 5531da177e4SLinus Torvalds error = -ENOMEM; 5541da177e4SLinus Torvalds if (!vma) 5551da177e4SLinus Torvalds goto out; 556097d5910SLinus Torvalds prev = vma->vm_prev; 5571da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) { 5581da177e4SLinus Torvalds if (vma->vm_start >= end) 5591da177e4SLinus Torvalds goto out; 5601da177e4SLinus Torvalds start = vma->vm_start; 5611da177e4SLinus Torvalds error = -EINVAL; 5621da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 5631da177e4SLinus Torvalds goto out; 5647d12efaeSAndrew Morton } else { 5651da177e4SLinus Torvalds if (vma->vm_start > start) 5661da177e4SLinus Torvalds goto out; 5671da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) { 5681da177e4SLinus Torvalds end = vma->vm_end; 5691da177e4SLinus Torvalds error = -EINVAL; 5701da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 5711da177e4SLinus Torvalds goto out; 5721da177e4SLinus Torvalds } 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds if (start > vma->vm_start) 5751da177e4SLinus Torvalds prev = vma; 5761da177e4SLinus Torvalds 5771da177e4SLinus Torvalds for (nstart = start ; ; ) { 578a8502b67SDave Hansen unsigned long mask_off_old_flags; 5791da177e4SLinus Torvalds unsigned long newflags; 5807d06d9c9SDave Hansen int new_vma_pkey; 5811da177e4SLinus Torvalds 5821da177e4SLinus Torvalds /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 5831da177e4SLinus Torvalds 584f138556dSPiotr Kwapulinski /* Does the application expect PROT_READ to imply PROT_EXEC */ 585f138556dSPiotr Kwapulinski if (rier && (vma->vm_flags & VM_MAYEXEC)) 586f138556dSPiotr Kwapulinski prot |= PROT_EXEC; 587f138556dSPiotr Kwapulinski 588a8502b67SDave Hansen /* 589a8502b67SDave Hansen * Each mprotect() call explicitly passes r/w/x permissions. 590a8502b67SDave Hansen * If a permission is not passed to mprotect(), it must be 591a8502b67SDave Hansen * cleared from the VMA. 592a8502b67SDave Hansen */ 593a8502b67SDave Hansen mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC | 5942c2d57b5SKhalid Aziz VM_FLAGS_CLEAR; 595a8502b67SDave Hansen 5967d06d9c9SDave Hansen new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); 5977d06d9c9SDave Hansen newflags = calc_vm_prot_bits(prot, new_vma_pkey); 598a8502b67SDave Hansen newflags |= (vma->vm_flags & ~mask_off_old_flags); 5991da177e4SLinus Torvalds 6007e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */ 601*6cb4d9a2SAnshuman Khandual if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { 6021da177e4SLinus Torvalds error = -EACCES; 6031da177e4SLinus Torvalds goto out; 6041da177e4SLinus Torvalds } 6051da177e4SLinus Torvalds 6061da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot); 6071da177e4SLinus Torvalds if (error) 6081da177e4SLinus Torvalds goto out; 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds tmp = vma->vm_end; 6111da177e4SLinus Torvalds if (tmp > end) 6121da177e4SLinus Torvalds tmp = end; 6131da177e4SLinus Torvalds error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 6141da177e4SLinus Torvalds if (error) 6151da177e4SLinus Torvalds goto out; 6161da177e4SLinus Torvalds nstart = tmp; 6171da177e4SLinus Torvalds 6181da177e4SLinus Torvalds if (nstart < prev->vm_end) 6191da177e4SLinus Torvalds nstart = prev->vm_end; 6201da177e4SLinus Torvalds if (nstart >= end) 6211da177e4SLinus Torvalds goto out; 6221da177e4SLinus Torvalds 6231da177e4SLinus Torvalds vma = prev->vm_next; 6241da177e4SLinus Torvalds if (!vma || vma->vm_start != nstart) { 6251da177e4SLinus Torvalds error = -ENOMEM; 6261da177e4SLinus Torvalds goto out; 6271da177e4SLinus Torvalds } 628f138556dSPiotr Kwapulinski prot = reqprot; 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds out: 6311da177e4SLinus Torvalds up_write(¤t->mm->mmap_sem); 6321da177e4SLinus Torvalds return error; 6331da177e4SLinus Torvalds } 6347d06d9c9SDave Hansen 6357d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, 6367d06d9c9SDave Hansen unsigned long, prot) 6377d06d9c9SDave Hansen { 6387d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, -1); 6397d06d9c9SDave Hansen } 6407d06d9c9SDave Hansen 641c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS 642c7142aeaSHeiko Carstens 6437d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, 6447d06d9c9SDave Hansen unsigned long, prot, int, pkey) 6457d06d9c9SDave Hansen { 6467d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, pkey); 6477d06d9c9SDave Hansen } 648e8c24d3aSDave Hansen 649e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) 650e8c24d3aSDave Hansen { 651e8c24d3aSDave Hansen int pkey; 652e8c24d3aSDave Hansen int ret; 653e8c24d3aSDave Hansen 654e8c24d3aSDave Hansen /* No flags supported yet. */ 655e8c24d3aSDave Hansen if (flags) 656e8c24d3aSDave Hansen return -EINVAL; 657e8c24d3aSDave Hansen /* check for unsupported init values */ 658e8c24d3aSDave Hansen if (init_val & ~PKEY_ACCESS_MASK) 659e8c24d3aSDave Hansen return -EINVAL; 660e8c24d3aSDave Hansen 661e8c24d3aSDave Hansen down_write(¤t->mm->mmap_sem); 662e8c24d3aSDave Hansen pkey = mm_pkey_alloc(current->mm); 663e8c24d3aSDave Hansen 664e8c24d3aSDave Hansen ret = -ENOSPC; 665e8c24d3aSDave Hansen if (pkey == -1) 666e8c24d3aSDave Hansen goto out; 667e8c24d3aSDave Hansen 668e8c24d3aSDave Hansen ret = arch_set_user_pkey_access(current, pkey, init_val); 669e8c24d3aSDave Hansen if (ret) { 670e8c24d3aSDave Hansen mm_pkey_free(current->mm, pkey); 671e8c24d3aSDave Hansen goto out; 672e8c24d3aSDave Hansen } 673e8c24d3aSDave Hansen ret = pkey; 674e8c24d3aSDave Hansen out: 675e8c24d3aSDave Hansen up_write(¤t->mm->mmap_sem); 676e8c24d3aSDave Hansen return ret; 677e8c24d3aSDave Hansen } 678e8c24d3aSDave Hansen 679e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey) 680e8c24d3aSDave Hansen { 681e8c24d3aSDave Hansen int ret; 682e8c24d3aSDave Hansen 683e8c24d3aSDave Hansen down_write(¤t->mm->mmap_sem); 684e8c24d3aSDave Hansen ret = mm_pkey_free(current->mm, pkey); 685e8c24d3aSDave Hansen up_write(¤t->mm->mmap_sem); 686e8c24d3aSDave Hansen 687e8c24d3aSDave Hansen /* 688e8c24d3aSDave Hansen * We could provie warnings or errors if any VMA still 689e8c24d3aSDave Hansen * has the pkey set here. 690e8c24d3aSDave Hansen */ 691e8c24d3aSDave Hansen return ret; 692e8c24d3aSDave Hansen } 693c7142aeaSHeiko Carstens 694c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */ 695