1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mprotect.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds 61da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig 71da177e4SLinus Torvalds * 8046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 91da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12a520110eSChristoph Hellwig #include <linux/pagewalk.h> 131da177e4SLinus Torvalds #include <linux/hugetlb.h> 141da177e4SLinus Torvalds #include <linux/shm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/highmem.h> 181da177e4SLinus Torvalds #include <linux/security.h> 191da177e4SLinus Torvalds #include <linux/mempolicy.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/syscalls.h> 220697212aSChristoph Lameter #include <linux/swap.h> 230697212aSChristoph Lameter #include <linux/swapops.h> 24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 2564cdd548SKOSAKI Motohiro #include <linux/migrate.h> 26cdd6c482SIngo Molnar #include <linux/perf_event.h> 27e8c24d3aSDave Hansen #include <linux/pkeys.h> 2864a9a34eSMel Gorman #include <linux/ksm.h> 297c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 3009a913a7SMel Gorman #include <linux/mm_inline.h> 31ca5999fdSMike Rapoport #include <linux/pgtable.h> 32*a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h> 331da177e4SLinus Torvalds #include <asm/cacheflush.h> 34e8c24d3aSDave Hansen #include <asm/mmu_context.h> 351da177e4SLinus Torvalds #include <asm/tlbflush.h> 361da177e4SLinus Torvalds 3736f88188SKirill A. Shutemov #include "internal.h" 3836f88188SKirill A. Shutemov 394b10e7d5SMel Gorman static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 40c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 4158705444SPeter Xu unsigned long cp_flags) 421da177e4SLinus Torvalds { 430697212aSChristoph Lameter pte_t *pte, oldpte; 44705e87c0SHugh Dickins spinlock_t *ptl; 457da4d641SPeter Zijlstra unsigned long pages = 0; 463e321587SAndi Kleen int target_node = NUMA_NO_NODE; 4758705444SPeter Xu bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT; 4858705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 49292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 50292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 511da177e4SLinus Torvalds 52175ad4f1SAndrea Arcangeli /* 53c1e8d7c6SMichel Lespinasse * Can be called with only the mmap_lock for reading by 54175ad4f1SAndrea Arcangeli * prot_numa so we must check the pmd isn't constantly 55175ad4f1SAndrea Arcangeli * changing from under us from pmd_none to pmd_trans_huge 56175ad4f1SAndrea Arcangeli * and/or the other way around. 57175ad4f1SAndrea Arcangeli */ 58175ad4f1SAndrea Arcangeli if (pmd_trans_unstable(pmd)) 59175ad4f1SAndrea Arcangeli return 0; 60175ad4f1SAndrea Arcangeli 61175ad4f1SAndrea Arcangeli /* 62175ad4f1SAndrea Arcangeli * The pmd points to a regular pte so the pmd can't change 63c1e8d7c6SMichel Lespinasse * from under us even if the mmap_lock is only hold for 64175ad4f1SAndrea Arcangeli * reading. 65175ad4f1SAndrea Arcangeli */ 66175ad4f1SAndrea Arcangeli pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 671ad9f620SMel Gorman 683e321587SAndi Kleen /* Get target node for single threaded private VMAs */ 693e321587SAndi Kleen if (prot_numa && !(vma->vm_flags & VM_SHARED) && 703e321587SAndi Kleen atomic_read(&vma->vm_mm->mm_users) == 1) 713e321587SAndi Kleen target_node = numa_node_id(); 723e321587SAndi Kleen 733ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 746606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 751da177e4SLinus Torvalds do { 760697212aSChristoph Lameter oldpte = *pte; 770697212aSChristoph Lameter if (pte_present(oldpte)) { 781da177e4SLinus Torvalds pte_t ptent; 79b191f9b1SMel Gorman bool preserve_write = prot_numa && pte_write(oldpte); 801da177e4SLinus Torvalds 81e944fd67SMel Gorman /* 82e944fd67SMel Gorman * Avoid trapping faults against the zero or KSM 83e944fd67SMel Gorman * pages. See similar comment in change_huge_pmd. 84e944fd67SMel Gorman */ 85e944fd67SMel Gorman if (prot_numa) { 86e944fd67SMel Gorman struct page *page; 87*a1a3a2fcSHuang Ying int nid; 88e944fd67SMel Gorman 89a818f536SHuang Ying /* Avoid TLB flush if possible */ 90a818f536SHuang Ying if (pte_protnone(oldpte)) 91a818f536SHuang Ying continue; 92a818f536SHuang Ying 93e944fd67SMel Gorman page = vm_normal_page(vma, addr, oldpte); 94e944fd67SMel Gorman if (!page || PageKsm(page)) 95e944fd67SMel Gorman continue; 9610c1045fSMel Gorman 97859d4adcSHenry Willard /* Also skip shared copy-on-write pages */ 98859d4adcSHenry Willard if (is_cow_mapping(vma->vm_flags) && 9980d47f5dSLinus Torvalds page_count(page) != 1) 100859d4adcSHenry Willard continue; 101859d4adcSHenry Willard 10209a913a7SMel Gorman /* 10309a913a7SMel Gorman * While migration can move some dirty pages, 10409a913a7SMel Gorman * it cannot move them all from MIGRATE_ASYNC 10509a913a7SMel Gorman * context. 10609a913a7SMel Gorman */ 1079de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page)) 10809a913a7SMel Gorman continue; 10909a913a7SMel Gorman 1103e321587SAndi Kleen /* 1113e321587SAndi Kleen * Don't mess with PTEs if page is already on the node 1123e321587SAndi Kleen * a single-threaded process is running on. 1133e321587SAndi Kleen */ 114*a1a3a2fcSHuang Ying nid = page_to_nid(page); 115*a1a3a2fcSHuang Ying if (target_node == nid) 116*a1a3a2fcSHuang Ying continue; 117*a1a3a2fcSHuang Ying 118*a1a3a2fcSHuang Ying /* 119*a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa 120*a1a3a2fcSHuang Ying * balancing is disabled 121*a1a3a2fcSHuang Ying */ 122*a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 123*a1a3a2fcSHuang Ying node_is_toptier(nid)) 1243e321587SAndi Kleen continue; 125e944fd67SMel Gorman } 126e944fd67SMel Gorman 12704a86453SAneesh Kumar K.V oldpte = ptep_modify_prot_start(vma, addr, pte); 12804a86453SAneesh Kumar K.V ptent = pte_modify(oldpte, newprot); 129b191f9b1SMel Gorman if (preserve_write) 130288bc549SAneesh Kumar K.V ptent = pte_mk_savedwrite(ptent); 1318a0516edSMel Gorman 132292924b2SPeter Xu if (uffd_wp) { 133292924b2SPeter Xu ptent = pte_wrprotect(ptent); 134292924b2SPeter Xu ptent = pte_mkuffd_wp(ptent); 135292924b2SPeter Xu } else if (uffd_wp_resolve) { 136292924b2SPeter Xu /* 137292924b2SPeter Xu * Leave the write bit to be handled 138292924b2SPeter Xu * by PF interrupt handler, then 139292924b2SPeter Xu * things like COW could be properly 140292924b2SPeter Xu * handled. 141292924b2SPeter Xu */ 142292924b2SPeter Xu ptent = pte_clear_uffd_wp(ptent); 143292924b2SPeter Xu } 144292924b2SPeter Xu 1458a0516edSMel Gorman /* Avoid taking write faults for known dirty pages */ 14664e45507SPeter Feiner if (dirty_accountable && pte_dirty(ptent) && 14764e45507SPeter Feiner (pte_soft_dirty(ptent) || 1488a0516edSMel Gorman !(vma->vm_flags & VM_SOFTDIRTY))) { 1499d85d586SAneesh Kumar K.V ptent = pte_mkwrite(ptent); 1508a0516edSMel Gorman } 15104a86453SAneesh Kumar K.V ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); 1524b10e7d5SMel Gorman pages++; 153f45ec5ffSPeter Xu } else if (is_swap_pte(oldpte)) { 1540697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte); 155f45ec5ffSPeter Xu pte_t newpte; 1560697212aSChristoph Lameter 1574dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 1580697212aSChristoph Lameter /* 1590697212aSChristoph Lameter * A protection check is difficult so 1600697212aSChristoph Lameter * just be safe and disable write 1610697212aSChristoph Lameter */ 1624dd845b5SAlistair Popple entry = make_readable_migration_entry( 1634dd845b5SAlistair Popple swp_offset(entry)); 164c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry); 165c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte)) 166c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte); 167f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte)) 168f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 1694dd845b5SAlistair Popple } else if (is_writable_device_private_entry(entry)) { 1705042db43SJérôme Glisse /* 1715042db43SJérôme Glisse * We do not preserve soft-dirtiness. See 1725042db43SJérôme Glisse * copy_one_pte() for explanation. 1735042db43SJérôme Glisse */ 1744dd845b5SAlistair Popple entry = make_readable_device_private_entry( 1754dd845b5SAlistair Popple swp_offset(entry)); 1765042db43SJérôme Glisse newpte = swp_entry_to_pte(entry); 177f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte)) 178f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 179b756a3b5SAlistair Popple } else if (is_writable_device_exclusive_entry(entry)) { 180b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry( 181b756a3b5SAlistair Popple swp_offset(entry)); 182b756a3b5SAlistair Popple newpte = swp_entry_to_pte(entry); 183b756a3b5SAlistair Popple if (pte_swp_soft_dirty(oldpte)) 184b756a3b5SAlistair Popple newpte = pte_swp_mksoft_dirty(newpte); 185b756a3b5SAlistair Popple if (pte_swp_uffd_wp(oldpte)) 186b756a3b5SAlistair Popple newpte = pte_swp_mkuffd_wp(newpte); 187f45ec5ffSPeter Xu } else { 188f45ec5ffSPeter Xu newpte = oldpte; 189f45ec5ffSPeter Xu } 1905042db43SJérôme Glisse 191f45ec5ffSPeter Xu if (uffd_wp) 192f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 193f45ec5ffSPeter Xu else if (uffd_wp_resolve) 194f45ec5ffSPeter Xu newpte = pte_swp_clear_uffd_wp(newpte); 195f45ec5ffSPeter Xu 196f45ec5ffSPeter Xu if (!pte_same(oldpte, newpte)) { 197f45ec5ffSPeter Xu set_pte_at(vma->vm_mm, addr, pte, newpte); 1985042db43SJérôme Glisse pages++; 1995042db43SJérôme Glisse } 200e920e14cSMel Gorman } 2011da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 2026606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 203705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 2047da4d641SPeter Zijlstra 2057da4d641SPeter Zijlstra return pages; 2061da177e4SLinus Torvalds } 2071da177e4SLinus Torvalds 2088b272b3cSMel Gorman /* 2098b272b3cSMel Gorman * Used when setting automatic NUMA hinting protection where it is 2108b272b3cSMel Gorman * critical that a numa hinting PMD is not confused with a bad PMD. 2118b272b3cSMel Gorman */ 2128b272b3cSMel Gorman static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) 2138b272b3cSMel Gorman { 2148b272b3cSMel Gorman pmd_t pmdval = pmd_read_atomic(pmd); 2158b272b3cSMel Gorman 2168b272b3cSMel Gorman /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ 2178b272b3cSMel Gorman #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2188b272b3cSMel Gorman barrier(); 2198b272b3cSMel Gorman #endif 2208b272b3cSMel Gorman 2218b272b3cSMel Gorman if (pmd_none(pmdval)) 2228b272b3cSMel Gorman return 1; 2238b272b3cSMel Gorman if (pmd_trans_huge(pmdval)) 2248b272b3cSMel Gorman return 0; 2258b272b3cSMel Gorman if (unlikely(pmd_bad(pmdval))) { 2268b272b3cSMel Gorman pmd_clear_bad(pmd); 2278b272b3cSMel Gorman return 1; 2288b272b3cSMel Gorman } 2298b272b3cSMel Gorman 2308b272b3cSMel Gorman return 0; 2318b272b3cSMel Gorman } 2328b272b3cSMel Gorman 2337d12efaeSAndrew Morton static inline unsigned long change_pmd_range(struct vm_area_struct *vma, 2347d12efaeSAndrew Morton pud_t *pud, unsigned long addr, unsigned long end, 23558705444SPeter Xu pgprot_t newprot, unsigned long cp_flags) 2361da177e4SLinus Torvalds { 2371da177e4SLinus Torvalds pmd_t *pmd; 2381da177e4SLinus Torvalds unsigned long next; 2397da4d641SPeter Zijlstra unsigned long pages = 0; 24072403b4aSMel Gorman unsigned long nr_huge_updates = 0; 241ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 242ac46d4f3SJérôme Glisse 243ac46d4f3SJérôme Glisse range.start = 0; 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 2461da177e4SLinus Torvalds do { 24725cbbef1SMel Gorman unsigned long this_pages; 24825cbbef1SMel Gorman 2491da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 2508b272b3cSMel Gorman 2518b272b3cSMel Gorman /* 252c1e8d7c6SMichel Lespinasse * Automatic NUMA balancing walks the tables with mmap_lock 2538b272b3cSMel Gorman * held for read. It's possible a parallel update to occur 2548b272b3cSMel Gorman * between pmd_trans_huge() and a pmd_none_or_clear_bad() 2558b272b3cSMel Gorman * check leading to a false positive and clearing. 2568b272b3cSMel Gorman * Hence, it's necessary to atomically read the PMD value 2578b272b3cSMel Gorman * for all the checks. 2588b272b3cSMel Gorman */ 2598b272b3cSMel Gorman if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && 2608b272b3cSMel Gorman pmd_none_or_clear_bad_unless_trans_huge(pmd)) 2614991c09cSAnshuman Khandual goto next; 262a5338093SRik van Riel 263a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */ 264ac46d4f3SJérôme Glisse if (!range.start) { 2657269f999SJérôme Glisse mmu_notifier_range_init(&range, 2667269f999SJérôme Glisse MMU_NOTIFY_PROTECTION_VMA, 0, 2676f4f13e8SJérôme Glisse vma, vma->vm_mm, addr, end); 268ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 269a5338093SRik van Riel } 270a5338093SRik van Riel 27184c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 2726b9116a6SKirill A. Shutemov if (next - addr != HPAGE_PMD_SIZE) { 273fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 2746b9116a6SKirill A. Shutemov } else { 275f123d74aSMel Gorman int nr_ptes = change_huge_pmd(vma, pmd, addr, 27658705444SPeter Xu newprot, cp_flags); 277f123d74aSMel Gorman 278f123d74aSMel Gorman if (nr_ptes) { 27972403b4aSMel Gorman if (nr_ptes == HPAGE_PMD_NR) { 28072403b4aSMel Gorman pages += HPAGE_PMD_NR; 28172403b4aSMel Gorman nr_huge_updates++; 28272403b4aSMel Gorman } 2831ad9f620SMel Gorman 2841ad9f620SMel Gorman /* huge pmd was handled */ 2854991c09cSAnshuman Khandual goto next; 2867da4d641SPeter Zijlstra } 287f123d74aSMel Gorman } 28888a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */ 289cd7548abSJohannes Weiner } 29025cbbef1SMel Gorman this_pages = change_pte_range(vma, pmd, addr, next, newprot, 29158705444SPeter Xu cp_flags); 29225cbbef1SMel Gorman pages += this_pages; 2934991c09cSAnshuman Khandual next: 2944991c09cSAnshuman Khandual cond_resched(); 2951da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 2967da4d641SPeter Zijlstra 297ac46d4f3SJérôme Glisse if (range.start) 298ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 299a5338093SRik van Riel 30072403b4aSMel Gorman if (nr_huge_updates) 30172403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); 3027da4d641SPeter Zijlstra return pages; 3031da177e4SLinus Torvalds } 3041da177e4SLinus Torvalds 3057d12efaeSAndrew Morton static inline unsigned long change_pud_range(struct vm_area_struct *vma, 306c2febafcSKirill A. Shutemov p4d_t *p4d, unsigned long addr, unsigned long end, 30758705444SPeter Xu pgprot_t newprot, unsigned long cp_flags) 3081da177e4SLinus Torvalds { 3091da177e4SLinus Torvalds pud_t *pud; 3101da177e4SLinus Torvalds unsigned long next; 3117da4d641SPeter Zijlstra unsigned long pages = 0; 3121da177e4SLinus Torvalds 313c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 3141da177e4SLinus Torvalds do { 3151da177e4SLinus Torvalds next = pud_addr_end(addr, end); 3161da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 3171da177e4SLinus Torvalds continue; 3187da4d641SPeter Zijlstra pages += change_pmd_range(vma, pud, addr, next, newprot, 31958705444SPeter Xu cp_flags); 3201da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 3217da4d641SPeter Zijlstra 3227da4d641SPeter Zijlstra return pages; 3231da177e4SLinus Torvalds } 3241da177e4SLinus Torvalds 325c2febafcSKirill A. Shutemov static inline unsigned long change_p4d_range(struct vm_area_struct *vma, 326c2febafcSKirill A. Shutemov pgd_t *pgd, unsigned long addr, unsigned long end, 32758705444SPeter Xu pgprot_t newprot, unsigned long cp_flags) 328c2febafcSKirill A. Shutemov { 329c2febafcSKirill A. Shutemov p4d_t *p4d; 330c2febafcSKirill A. Shutemov unsigned long next; 331c2febafcSKirill A. Shutemov unsigned long pages = 0; 332c2febafcSKirill A. Shutemov 333c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 334c2febafcSKirill A. Shutemov do { 335c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 336c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 337c2febafcSKirill A. Shutemov continue; 338c2febafcSKirill A. Shutemov pages += change_pud_range(vma, p4d, addr, next, newprot, 33958705444SPeter Xu cp_flags); 340c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 341c2febafcSKirill A. Shutemov 342c2febafcSKirill A. Shutemov return pages; 343c2febafcSKirill A. Shutemov } 344c2febafcSKirill A. Shutemov 3457da4d641SPeter Zijlstra static unsigned long change_protection_range(struct vm_area_struct *vma, 346c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 34758705444SPeter Xu unsigned long cp_flags) 3481da177e4SLinus Torvalds { 3491da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 3501da177e4SLinus Torvalds pgd_t *pgd; 3511da177e4SLinus Torvalds unsigned long next; 3521da177e4SLinus Torvalds unsigned long start = addr; 3537da4d641SPeter Zijlstra unsigned long pages = 0; 3541da177e4SLinus Torvalds 3551da177e4SLinus Torvalds BUG_ON(addr >= end); 3561da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 3571da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 35816af97dcSNadav Amit inc_tlb_flush_pending(mm); 3591da177e4SLinus Torvalds do { 3601da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 3611da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 3621da177e4SLinus Torvalds continue; 363c2febafcSKirill A. Shutemov pages += change_p4d_range(vma, pgd, addr, next, newprot, 36458705444SPeter Xu cp_flags); 3651da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 3667da4d641SPeter Zijlstra 3671233d588SIngo Molnar /* Only flush the TLB if we actually modified any entries: */ 3681233d588SIngo Molnar if (pages) 3691da177e4SLinus Torvalds flush_tlb_range(vma, start, end); 37016af97dcSNadav Amit dec_tlb_flush_pending(mm); 3717da4d641SPeter Zijlstra 3727da4d641SPeter Zijlstra return pages; 3737da4d641SPeter Zijlstra } 3747da4d641SPeter Zijlstra 3757da4d641SPeter Zijlstra unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 3767da4d641SPeter Zijlstra unsigned long end, pgprot_t newprot, 37758705444SPeter Xu unsigned long cp_flags) 3787da4d641SPeter Zijlstra { 3797da4d641SPeter Zijlstra unsigned long pages; 3807da4d641SPeter Zijlstra 381292924b2SPeter Xu BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); 382292924b2SPeter Xu 3837da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma)) 3847da4d641SPeter Zijlstra pages = hugetlb_change_protection(vma, start, end, newprot); 3857da4d641SPeter Zijlstra else 38658705444SPeter Xu pages = change_protection_range(vma, start, end, newprot, 38758705444SPeter Xu cp_flags); 3887da4d641SPeter Zijlstra 3897da4d641SPeter Zijlstra return pages; 3901da177e4SLinus Torvalds } 3911da177e4SLinus Torvalds 39242e4089cSAndi Kleen static int prot_none_pte_entry(pte_t *pte, unsigned long addr, 39342e4089cSAndi Kleen unsigned long next, struct mm_walk *walk) 39442e4089cSAndi Kleen { 39542e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 39642e4089cSAndi Kleen 0 : -EACCES; 39742e4089cSAndi Kleen } 39842e4089cSAndi Kleen 39942e4089cSAndi Kleen static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, 40042e4089cSAndi Kleen unsigned long addr, unsigned long next, 40142e4089cSAndi Kleen struct mm_walk *walk) 40242e4089cSAndi Kleen { 40342e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 40442e4089cSAndi Kleen 0 : -EACCES; 40542e4089cSAndi Kleen } 40642e4089cSAndi Kleen 40742e4089cSAndi Kleen static int prot_none_test(unsigned long addr, unsigned long next, 40842e4089cSAndi Kleen struct mm_walk *walk) 40942e4089cSAndi Kleen { 41042e4089cSAndi Kleen return 0; 41142e4089cSAndi Kleen } 41242e4089cSAndi Kleen 4137b86ac33SChristoph Hellwig static const struct mm_walk_ops prot_none_walk_ops = { 41442e4089cSAndi Kleen .pte_entry = prot_none_pte_entry, 41542e4089cSAndi Kleen .hugetlb_entry = prot_none_hugetlb_entry, 41642e4089cSAndi Kleen .test_walk = prot_none_test, 41742e4089cSAndi Kleen }; 41842e4089cSAndi Kleen 419b6a2fea3SOllie Wild int 4201da177e4SLinus Torvalds mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 4211da177e4SLinus Torvalds unsigned long start, unsigned long end, unsigned long newflags) 4221da177e4SLinus Torvalds { 4231da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 4241da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags; 4251da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT; 4261da177e4SLinus Torvalds unsigned long charged = 0; 4271da177e4SLinus Torvalds pgoff_t pgoff; 4281da177e4SLinus Torvalds int error; 429c1e6098bSPeter Zijlstra int dirty_accountable = 0; 4301da177e4SLinus Torvalds 4311da177e4SLinus Torvalds if (newflags == oldflags) { 4321da177e4SLinus Torvalds *pprev = vma; 4331da177e4SLinus Torvalds return 0; 4341da177e4SLinus Torvalds } 4351da177e4SLinus Torvalds 4361da177e4SLinus Torvalds /* 43742e4089cSAndi Kleen * Do PROT_NONE PFN permission checks here when we can still 43842e4089cSAndi Kleen * bail out without undoing a lot of state. This is a rather 43942e4089cSAndi Kleen * uncommon case, so doesn't need to be very optimized. 44042e4089cSAndi Kleen */ 44142e4089cSAndi Kleen if (arch_has_pfn_modify_check() && 44242e4089cSAndi Kleen (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 4436cb4d9a2SAnshuman Khandual (newflags & VM_ACCESS_FLAGS) == 0) { 4447b86ac33SChristoph Hellwig pgprot_t new_pgprot = vm_get_page_prot(newflags); 4457b86ac33SChristoph Hellwig 4467b86ac33SChristoph Hellwig error = walk_page_range(current->mm, start, end, 4477b86ac33SChristoph Hellwig &prot_none_walk_ops, &new_pgprot); 44842e4089cSAndi Kleen if (error) 44942e4089cSAndi Kleen return error; 45042e4089cSAndi Kleen } 45142e4089cSAndi Kleen 45242e4089cSAndi Kleen /* 4531da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit; 4541da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we 4555a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for 4565a6fe125SMel Gorman * even if read-only so there is no need to account for them here 4571da177e4SLinus Torvalds */ 4581da177e4SLinus Torvalds if (newflags & VM_WRITE) { 45984638335SKonstantin Khlebnikov /* Check space limits when area turns into data. */ 46084638335SKonstantin Khlebnikov if (!may_expand_vm(mm, newflags, nrpages) && 46184638335SKonstantin Khlebnikov may_expand_vm(mm, oldflags, nrpages)) 46284638335SKonstantin Khlebnikov return -ENOMEM; 4635a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 464cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) { 4651da177e4SLinus Torvalds charged = nrpages; 466191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 4671da177e4SLinus Torvalds return -ENOMEM; 4681da177e4SLinus Torvalds newflags |= VM_ACCOUNT; 4691da177e4SLinus Torvalds } 4701da177e4SLinus Torvalds } 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds /* 4731da177e4SLinus Torvalds * First try to merge with previous and/or next vma. 4741da177e4SLinus Torvalds */ 4751da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 4761da177e4SLinus Torvalds *pprev = vma_merge(mm, *pprev, start, end, newflags, 47719a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 4785c26f6acSSuren Baghdasaryan vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 4791da177e4SLinus Torvalds if (*pprev) { 4801da177e4SLinus Torvalds vma = *pprev; 481e86f15eeSAndrea Arcangeli VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); 4821da177e4SLinus Torvalds goto success; 4831da177e4SLinus Torvalds } 4841da177e4SLinus Torvalds 4851da177e4SLinus Torvalds *pprev = vma; 4861da177e4SLinus Torvalds 4871da177e4SLinus Torvalds if (start != vma->vm_start) { 4881da177e4SLinus Torvalds error = split_vma(mm, vma, start, 1); 4891da177e4SLinus Torvalds if (error) 4901da177e4SLinus Torvalds goto fail; 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds 4931da177e4SLinus Torvalds if (end != vma->vm_end) { 4941da177e4SLinus Torvalds error = split_vma(mm, vma, end, 0); 4951da177e4SLinus Torvalds if (error) 4961da177e4SLinus Torvalds goto fail; 4971da177e4SLinus Torvalds } 4981da177e4SLinus Torvalds 4991da177e4SLinus Torvalds success: 5001da177e4SLinus Torvalds /* 501c1e8d7c6SMichel Lespinasse * vm_flags and vm_page_prot are protected by the mmap_lock 5021da177e4SLinus Torvalds * held in write mode. 5031da177e4SLinus Torvalds */ 5041da177e4SLinus Torvalds vma->vm_flags = newflags; 5056d2329f8SAndrea Arcangeli dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); 50664e45507SPeter Feiner vma_set_page_prot(vma); 507d08b3851SPeter Zijlstra 5087d12efaeSAndrew Morton change_protection(vma, start, end, vma->vm_page_prot, 50958705444SPeter Xu dirty_accountable ? MM_CP_DIRTY_ACCT : 0); 5107da4d641SPeter Zijlstra 51136f88188SKirill A. Shutemov /* 51236f88188SKirill A. Shutemov * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major 51336f88188SKirill A. Shutemov * fault on access. 51436f88188SKirill A. Shutemov */ 51536f88188SKirill A. Shutemov if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && 51636f88188SKirill A. Shutemov (newflags & VM_WRITE)) { 51736f88188SKirill A. Shutemov populate_vma_page_range(vma, start, end, NULL); 51836f88188SKirill A. Shutemov } 51936f88188SKirill A. Shutemov 52084638335SKonstantin Khlebnikov vm_stat_account(mm, oldflags, -nrpages); 52184638335SKonstantin Khlebnikov vm_stat_account(mm, newflags, nrpages); 52263bfd738SPekka Enberg perf_event_mmap(vma); 5231da177e4SLinus Torvalds return 0; 5241da177e4SLinus Torvalds 5251da177e4SLinus Torvalds fail: 5261da177e4SLinus Torvalds vm_unacct_memory(charged); 5271da177e4SLinus Torvalds return error; 5281da177e4SLinus Torvalds } 5291da177e4SLinus Torvalds 5307d06d9c9SDave Hansen /* 5317d06d9c9SDave Hansen * pkey==-1 when doing a legacy mprotect() 5327d06d9c9SDave Hansen */ 5337d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len, 5347d06d9c9SDave Hansen unsigned long prot, int pkey) 5351da177e4SLinus Torvalds { 53662b5f7d0SDave Hansen unsigned long nstart, end, tmp, reqprot; 5371da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 5381da177e4SLinus Torvalds int error = -EINVAL; 5391da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 540f138556dSPiotr Kwapulinski const bool rier = (current->personality & READ_IMPLIES_EXEC) && 541f138556dSPiotr Kwapulinski (prot & PROT_READ); 542f138556dSPiotr Kwapulinski 543057d3389SAndrey Konovalov start = untagged_addr(start); 544057d3389SAndrey Konovalov 5451da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 5461da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 5471da177e4SLinus Torvalds return -EINVAL; 5481da177e4SLinus Torvalds 5491da177e4SLinus Torvalds if (start & ~PAGE_MASK) 5501da177e4SLinus Torvalds return -EINVAL; 5511da177e4SLinus Torvalds if (!len) 5521da177e4SLinus Torvalds return 0; 5531da177e4SLinus Torvalds len = PAGE_ALIGN(len); 5541da177e4SLinus Torvalds end = start + len; 5551da177e4SLinus Torvalds if (end <= start) 5561da177e4SLinus Torvalds return -ENOMEM; 5579035cf9aSKhalid Aziz if (!arch_validate_prot(prot, start)) 5581da177e4SLinus Torvalds return -EINVAL; 5591da177e4SLinus Torvalds 5601da177e4SLinus Torvalds reqprot = prot; 5611da177e4SLinus Torvalds 562d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm)) 563dc0ef0dfSMichal Hocko return -EINTR; 5641da177e4SLinus Torvalds 565e8c24d3aSDave Hansen /* 566e8c24d3aSDave Hansen * If userspace did not allocate the pkey, do not let 567e8c24d3aSDave Hansen * them use it here. 568e8c24d3aSDave Hansen */ 569e8c24d3aSDave Hansen error = -EINVAL; 570e8c24d3aSDave Hansen if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) 571e8c24d3aSDave Hansen goto out; 572e8c24d3aSDave Hansen 573097d5910SLinus Torvalds vma = find_vma(current->mm, start); 5741da177e4SLinus Torvalds error = -ENOMEM; 5751da177e4SLinus Torvalds if (!vma) 5761da177e4SLinus Torvalds goto out; 5776af5fa0dSLiu Song 5781da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) { 5791da177e4SLinus Torvalds if (vma->vm_start >= end) 5801da177e4SLinus Torvalds goto out; 5811da177e4SLinus Torvalds start = vma->vm_start; 5821da177e4SLinus Torvalds error = -EINVAL; 5831da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 5841da177e4SLinus Torvalds goto out; 5857d12efaeSAndrew Morton } else { 5861da177e4SLinus Torvalds if (vma->vm_start > start) 5871da177e4SLinus Torvalds goto out; 5881da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) { 5891da177e4SLinus Torvalds end = vma->vm_end; 5901da177e4SLinus Torvalds error = -EINVAL; 5911da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 5921da177e4SLinus Torvalds goto out; 5931da177e4SLinus Torvalds } 5941da177e4SLinus Torvalds } 5956af5fa0dSLiu Song 5961da177e4SLinus Torvalds if (start > vma->vm_start) 5971da177e4SLinus Torvalds prev = vma; 5986af5fa0dSLiu Song else 5996af5fa0dSLiu Song prev = vma->vm_prev; 6001da177e4SLinus Torvalds 6011da177e4SLinus Torvalds for (nstart = start ; ; ) { 602a8502b67SDave Hansen unsigned long mask_off_old_flags; 6031da177e4SLinus Torvalds unsigned long newflags; 6047d06d9c9SDave Hansen int new_vma_pkey; 6051da177e4SLinus Torvalds 6061da177e4SLinus Torvalds /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 6071da177e4SLinus Torvalds 608f138556dSPiotr Kwapulinski /* Does the application expect PROT_READ to imply PROT_EXEC */ 609f138556dSPiotr Kwapulinski if (rier && (vma->vm_flags & VM_MAYEXEC)) 610f138556dSPiotr Kwapulinski prot |= PROT_EXEC; 611f138556dSPiotr Kwapulinski 612a8502b67SDave Hansen /* 613a8502b67SDave Hansen * Each mprotect() call explicitly passes r/w/x permissions. 614a8502b67SDave Hansen * If a permission is not passed to mprotect(), it must be 615a8502b67SDave Hansen * cleared from the VMA. 616a8502b67SDave Hansen */ 617a8502b67SDave Hansen mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC | 6182c2d57b5SKhalid Aziz VM_FLAGS_CLEAR; 619a8502b67SDave Hansen 6207d06d9c9SDave Hansen new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); 6217d06d9c9SDave Hansen newflags = calc_vm_prot_bits(prot, new_vma_pkey); 622a8502b67SDave Hansen newflags |= (vma->vm_flags & ~mask_off_old_flags); 6231da177e4SLinus Torvalds 6247e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */ 6256cb4d9a2SAnshuman Khandual if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { 6261da177e4SLinus Torvalds error = -EACCES; 6271da177e4SLinus Torvalds goto out; 6281da177e4SLinus Torvalds } 6291da177e4SLinus Torvalds 630c462ac28SCatalin Marinas /* Allow architectures to sanity-check the new flags */ 631c462ac28SCatalin Marinas if (!arch_validate_flags(newflags)) { 632c462ac28SCatalin Marinas error = -EINVAL; 633c462ac28SCatalin Marinas goto out; 634c462ac28SCatalin Marinas } 635c462ac28SCatalin Marinas 6361da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot); 6371da177e4SLinus Torvalds if (error) 6381da177e4SLinus Torvalds goto out; 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds tmp = vma->vm_end; 6411da177e4SLinus Torvalds if (tmp > end) 6421da177e4SLinus Torvalds tmp = end; 64395bb7c42SSean Christopherson 644dbf53f75STianjia Zhang if (vma->vm_ops && vma->vm_ops->mprotect) { 64595bb7c42SSean Christopherson error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); 64695bb7c42SSean Christopherson if (error) 64795bb7c42SSean Christopherson goto out; 648dbf53f75STianjia Zhang } 64995bb7c42SSean Christopherson 6501da177e4SLinus Torvalds error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 6511da177e4SLinus Torvalds if (error) 6521da177e4SLinus Torvalds goto out; 65395bb7c42SSean Christopherson 6541da177e4SLinus Torvalds nstart = tmp; 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds if (nstart < prev->vm_end) 6571da177e4SLinus Torvalds nstart = prev->vm_end; 6581da177e4SLinus Torvalds if (nstart >= end) 6591da177e4SLinus Torvalds goto out; 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds vma = prev->vm_next; 6621da177e4SLinus Torvalds if (!vma || vma->vm_start != nstart) { 6631da177e4SLinus Torvalds error = -ENOMEM; 6641da177e4SLinus Torvalds goto out; 6651da177e4SLinus Torvalds } 666f138556dSPiotr Kwapulinski prot = reqprot; 6671da177e4SLinus Torvalds } 6681da177e4SLinus Torvalds out: 669d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 6701da177e4SLinus Torvalds return error; 6711da177e4SLinus Torvalds } 6727d06d9c9SDave Hansen 6737d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, 6747d06d9c9SDave Hansen unsigned long, prot) 6757d06d9c9SDave Hansen { 6767d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, -1); 6777d06d9c9SDave Hansen } 6787d06d9c9SDave Hansen 679c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS 680c7142aeaSHeiko Carstens 6817d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, 6827d06d9c9SDave Hansen unsigned long, prot, int, pkey) 6837d06d9c9SDave Hansen { 6847d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, pkey); 6857d06d9c9SDave Hansen } 686e8c24d3aSDave Hansen 687e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) 688e8c24d3aSDave Hansen { 689e8c24d3aSDave Hansen int pkey; 690e8c24d3aSDave Hansen int ret; 691e8c24d3aSDave Hansen 692e8c24d3aSDave Hansen /* No flags supported yet. */ 693e8c24d3aSDave Hansen if (flags) 694e8c24d3aSDave Hansen return -EINVAL; 695e8c24d3aSDave Hansen /* check for unsupported init values */ 696e8c24d3aSDave Hansen if (init_val & ~PKEY_ACCESS_MASK) 697e8c24d3aSDave Hansen return -EINVAL; 698e8c24d3aSDave Hansen 699d8ed45c5SMichel Lespinasse mmap_write_lock(current->mm); 700e8c24d3aSDave Hansen pkey = mm_pkey_alloc(current->mm); 701e8c24d3aSDave Hansen 702e8c24d3aSDave Hansen ret = -ENOSPC; 703e8c24d3aSDave Hansen if (pkey == -1) 704e8c24d3aSDave Hansen goto out; 705e8c24d3aSDave Hansen 706e8c24d3aSDave Hansen ret = arch_set_user_pkey_access(current, pkey, init_val); 707e8c24d3aSDave Hansen if (ret) { 708e8c24d3aSDave Hansen mm_pkey_free(current->mm, pkey); 709e8c24d3aSDave Hansen goto out; 710e8c24d3aSDave Hansen } 711e8c24d3aSDave Hansen ret = pkey; 712e8c24d3aSDave Hansen out: 713d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 714e8c24d3aSDave Hansen return ret; 715e8c24d3aSDave Hansen } 716e8c24d3aSDave Hansen 717e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey) 718e8c24d3aSDave Hansen { 719e8c24d3aSDave Hansen int ret; 720e8c24d3aSDave Hansen 721d8ed45c5SMichel Lespinasse mmap_write_lock(current->mm); 722e8c24d3aSDave Hansen ret = mm_pkey_free(current->mm, pkey); 723d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 724e8c24d3aSDave Hansen 725e8c24d3aSDave Hansen /* 726f0953a1bSIngo Molnar * We could provide warnings or errors if any VMA still 727e8c24d3aSDave Hansen * has the pkey set here. 728e8c24d3aSDave Hansen */ 729e8c24d3aSDave Hansen return ret; 730e8c24d3aSDave Hansen } 731c7142aeaSHeiko Carstens 732c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */ 733