1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mprotect.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds 61da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig 71da177e4SLinus Torvalds * 8046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 91da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12a520110eSChristoph Hellwig #include <linux/pagewalk.h> 131da177e4SLinus Torvalds #include <linux/hugetlb.h> 141da177e4SLinus Torvalds #include <linux/shm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/highmem.h> 181da177e4SLinus Torvalds #include <linux/security.h> 191da177e4SLinus Torvalds #include <linux/mempolicy.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/syscalls.h> 220697212aSChristoph Lameter #include <linux/swap.h> 230697212aSChristoph Lameter #include <linux/swapops.h> 24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 2564cdd548SKOSAKI Motohiro #include <linux/migrate.h> 26cdd6c482SIngo Molnar #include <linux/perf_event.h> 27e8c24d3aSDave Hansen #include <linux/pkeys.h> 2864a9a34eSMel Gorman #include <linux/ksm.h> 297c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 3009a913a7SMel Gorman #include <linux/mm_inline.h> 31ca5999fdSMike Rapoport #include <linux/pgtable.h> 32a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h> 33fe2567ebSPeter Xu #include <linux/userfaultfd_k.h> 34467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h> 351da177e4SLinus Torvalds #include <asm/cacheflush.h> 36e8c24d3aSDave Hansen #include <asm/mmu_context.h> 371da177e4SLinus Torvalds #include <asm/tlbflush.h> 384a18419fSNadav Amit #include <asm/tlb.h> 391da177e4SLinus Torvalds 4036f88188SKirill A. Shutemov #include "internal.h" 4136f88188SKirill A. Shutemov 4264fe24a3SDavid Hildenbrand static inline bool can_change_pte_writable(struct vm_area_struct *vma, 4364fe24a3SDavid Hildenbrand unsigned long addr, pte_t pte) 4464fe24a3SDavid Hildenbrand { 4564fe24a3SDavid Hildenbrand struct page *page; 4664fe24a3SDavid Hildenbrand 47*7ea7e333SDavid Hildenbrand if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) 48*7ea7e333SDavid Hildenbrand return false; 4964fe24a3SDavid Hildenbrand 50*7ea7e333SDavid Hildenbrand /* Don't touch entries that are not even readable. */ 51d8488773SNadav Amit if (pte_protnone(pte)) 5264fe24a3SDavid Hildenbrand return false; 5364fe24a3SDavid Hildenbrand 5464fe24a3SDavid Hildenbrand /* Do we need write faults for softdirty tracking? */ 5576aefad6SPeter Xu if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte)) 5664fe24a3SDavid Hildenbrand return false; 5764fe24a3SDavid Hildenbrand 5864fe24a3SDavid Hildenbrand /* Do we need write faults for uffd-wp tracking? */ 5964fe24a3SDavid Hildenbrand if (userfaultfd_pte_wp(vma, pte)) 6064fe24a3SDavid Hildenbrand return false; 6164fe24a3SDavid Hildenbrand 6264fe24a3SDavid Hildenbrand if (!(vma->vm_flags & VM_SHARED)) { 6364fe24a3SDavid Hildenbrand /* 64*7ea7e333SDavid Hildenbrand * Writable MAP_PRIVATE mapping: We can only special-case on 65*7ea7e333SDavid Hildenbrand * exclusive anonymous pages, because we know that our 66*7ea7e333SDavid Hildenbrand * write-fault handler similarly would map them writable without 67*7ea7e333SDavid Hildenbrand * any additional checks while holding the PT lock. 6864fe24a3SDavid Hildenbrand */ 6964fe24a3SDavid Hildenbrand page = vm_normal_page(vma, addr, pte); 70d8488773SNadav Amit return page && PageAnon(page) && PageAnonExclusive(page); 7164fe24a3SDavid Hildenbrand } 7264fe24a3SDavid Hildenbrand 73*7ea7e333SDavid Hildenbrand /* 74*7ea7e333SDavid Hildenbrand * Writable MAP_SHARED mapping: "clean" might indicate that the FS still 75*7ea7e333SDavid Hildenbrand * needs a real write-fault for writenotify 76*7ea7e333SDavid Hildenbrand * (see vma_wants_writenotify()). If "dirty", the assumption is that the 77*7ea7e333SDavid Hildenbrand * FS was already notified and we can simply mark the PTE writable 78*7ea7e333SDavid Hildenbrand * just like the write-fault handler would do. 79*7ea7e333SDavid Hildenbrand */ 80d8488773SNadav Amit return pte_dirty(pte); 8164fe24a3SDavid Hildenbrand } 8264fe24a3SDavid Hildenbrand 834a18419fSNadav Amit static unsigned long change_pte_range(struct mmu_gather *tlb, 844a18419fSNadav Amit struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, 854a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags) 861da177e4SLinus Torvalds { 870697212aSChristoph Lameter pte_t *pte, oldpte; 88705e87c0SHugh Dickins spinlock_t *ptl; 897da4d641SPeter Zijlstra unsigned long pages = 0; 903e321587SAndi Kleen int target_node = NUMA_NO_NODE; 9158705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA; 92292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 93292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 941da177e4SLinus Torvalds 954a18419fSNadav Amit tlb_change_page_size(tlb, PAGE_SIZE); 964a18419fSNadav Amit 97175ad4f1SAndrea Arcangeli /* 98c1e8d7c6SMichel Lespinasse * Can be called with only the mmap_lock for reading by 99175ad4f1SAndrea Arcangeli * prot_numa so we must check the pmd isn't constantly 100175ad4f1SAndrea Arcangeli * changing from under us from pmd_none to pmd_trans_huge 101175ad4f1SAndrea Arcangeli * and/or the other way around. 102175ad4f1SAndrea Arcangeli */ 103175ad4f1SAndrea Arcangeli if (pmd_trans_unstable(pmd)) 104175ad4f1SAndrea Arcangeli return 0; 105175ad4f1SAndrea Arcangeli 106175ad4f1SAndrea Arcangeli /* 107175ad4f1SAndrea Arcangeli * The pmd points to a regular pte so the pmd can't change 108c1e8d7c6SMichel Lespinasse * from under us even if the mmap_lock is only hold for 109175ad4f1SAndrea Arcangeli * reading. 110175ad4f1SAndrea Arcangeli */ 111175ad4f1SAndrea Arcangeli pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1121ad9f620SMel Gorman 1133e321587SAndi Kleen /* Get target node for single threaded private VMAs */ 1143e321587SAndi Kleen if (prot_numa && !(vma->vm_flags & VM_SHARED) && 1153e321587SAndi Kleen atomic_read(&vma->vm_mm->mm_users) == 1) 1163e321587SAndi Kleen target_node = numa_node_id(); 1173e321587SAndi Kleen 1183ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 1196606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1201da177e4SLinus Torvalds do { 1210697212aSChristoph Lameter oldpte = *pte; 1220697212aSChristoph Lameter if (pte_present(oldpte)) { 1231da177e4SLinus Torvalds pte_t ptent; 124b191f9b1SMel Gorman bool preserve_write = prot_numa && pte_write(oldpte); 1251da177e4SLinus Torvalds 126e944fd67SMel Gorman /* 127e944fd67SMel Gorman * Avoid trapping faults against the zero or KSM 128e944fd67SMel Gorman * pages. See similar comment in change_huge_pmd. 129e944fd67SMel Gorman */ 130e944fd67SMel Gorman if (prot_numa) { 131e944fd67SMel Gorman struct page *page; 132a1a3a2fcSHuang Ying int nid; 13333024536SHuang Ying bool toptier; 134e944fd67SMel Gorman 135a818f536SHuang Ying /* Avoid TLB flush if possible */ 136a818f536SHuang Ying if (pte_protnone(oldpte)) 137a818f536SHuang Ying continue; 138a818f536SHuang Ying 139e944fd67SMel Gorman page = vm_normal_page(vma, addr, oldpte); 1403218f871SAlex Sierra if (!page || is_zone_device_page(page) || PageKsm(page)) 141e944fd67SMel Gorman continue; 14210c1045fSMel Gorman 143859d4adcSHenry Willard /* Also skip shared copy-on-write pages */ 144859d4adcSHenry Willard if (is_cow_mapping(vma->vm_flags) && 14580d47f5dSLinus Torvalds page_count(page) != 1) 146859d4adcSHenry Willard continue; 147859d4adcSHenry Willard 14809a913a7SMel Gorman /* 14909a913a7SMel Gorman * While migration can move some dirty pages, 15009a913a7SMel Gorman * it cannot move them all from MIGRATE_ASYNC 15109a913a7SMel Gorman * context. 15209a913a7SMel Gorman */ 1539de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page)) 15409a913a7SMel Gorman continue; 15509a913a7SMel Gorman 1563e321587SAndi Kleen /* 1573e321587SAndi Kleen * Don't mess with PTEs if page is already on the node 1583e321587SAndi Kleen * a single-threaded process is running on. 1593e321587SAndi Kleen */ 160a1a3a2fcSHuang Ying nid = page_to_nid(page); 161a1a3a2fcSHuang Ying if (target_node == nid) 162a1a3a2fcSHuang Ying continue; 16333024536SHuang Ying toptier = node_is_toptier(nid); 164a1a3a2fcSHuang Ying 165a1a3a2fcSHuang Ying /* 166a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa 167a1a3a2fcSHuang Ying * balancing is disabled 168a1a3a2fcSHuang Ying */ 169a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && 17033024536SHuang Ying toptier) 1713e321587SAndi Kleen continue; 17233024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && 17333024536SHuang Ying !toptier) 17433024536SHuang Ying xchg_page_access_time(page, 17533024536SHuang Ying jiffies_to_msecs(jiffies)); 176e944fd67SMel Gorman } 177e944fd67SMel Gorman 17804a86453SAneesh Kumar K.V oldpte = ptep_modify_prot_start(vma, addr, pte); 17904a86453SAneesh Kumar K.V ptent = pte_modify(oldpte, newprot); 180b191f9b1SMel Gorman if (preserve_write) 181288bc549SAneesh Kumar K.V ptent = pte_mk_savedwrite(ptent); 1828a0516edSMel Gorman 183292924b2SPeter Xu if (uffd_wp) { 184292924b2SPeter Xu ptent = pte_wrprotect(ptent); 185292924b2SPeter Xu ptent = pte_mkuffd_wp(ptent); 186292924b2SPeter Xu } else if (uffd_wp_resolve) { 187292924b2SPeter Xu ptent = pte_clear_uffd_wp(ptent); 188292924b2SPeter Xu } 189292924b2SPeter Xu 19064fe24a3SDavid Hildenbrand /* 19164fe24a3SDavid Hildenbrand * In some writable, shared mappings, we might want 19264fe24a3SDavid Hildenbrand * to catch actual write access -- see 19364fe24a3SDavid Hildenbrand * vma_wants_writenotify(). 19464fe24a3SDavid Hildenbrand * 19564fe24a3SDavid Hildenbrand * In all writable, private mappings, we have to 19664fe24a3SDavid Hildenbrand * properly handle COW. 19764fe24a3SDavid Hildenbrand * 19864fe24a3SDavid Hildenbrand * In both cases, we can sometimes still change PTEs 19964fe24a3SDavid Hildenbrand * writable and avoid the write-fault handler, for 20064fe24a3SDavid Hildenbrand * example, if a PTE is already dirty and no other 20164fe24a3SDavid Hildenbrand * COW or special handling is required. 20264fe24a3SDavid Hildenbrand */ 20364fe24a3SDavid Hildenbrand if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && 20464fe24a3SDavid Hildenbrand !pte_write(ptent) && 20564fe24a3SDavid Hildenbrand can_change_pte_writable(vma, addr, ptent)) 2069d85d586SAneesh Kumar K.V ptent = pte_mkwrite(ptent); 20764fe24a3SDavid Hildenbrand 20804a86453SAneesh Kumar K.V ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); 209c9fe6656SNadav Amit if (pte_needs_flush(oldpte, ptent)) 2104a18419fSNadav Amit tlb_flush_pte_range(tlb, addr, PAGE_SIZE); 2114b10e7d5SMel Gorman pages++; 212f45ec5ffSPeter Xu } else if (is_swap_pte(oldpte)) { 2130697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte); 214f45ec5ffSPeter Xu pte_t newpte; 2150697212aSChristoph Lameter 2164dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) { 2173d2f78f0SPeter Xu struct page *page = pfn_swap_entry_to_page(entry); 2183d2f78f0SPeter Xu 2190697212aSChristoph Lameter /* 2200697212aSChristoph Lameter * A protection check is difficult so 2210697212aSChristoph Lameter * just be safe and disable write 2220697212aSChristoph Lameter */ 2236c287605SDavid Hildenbrand if (PageAnon(page)) 2246c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry( 2254dd845b5SAlistair Popple swp_offset(entry)); 2266c287605SDavid Hildenbrand else 2276c287605SDavid Hildenbrand entry = make_readable_migration_entry(swp_offset(entry)); 228c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry); 229c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte)) 230c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte); 231f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte)) 232f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 2334dd845b5SAlistair Popple } else if (is_writable_device_private_entry(entry)) { 2345042db43SJérôme Glisse /* 2355042db43SJérôme Glisse * We do not preserve soft-dirtiness. See 2365042db43SJérôme Glisse * copy_one_pte() for explanation. 2375042db43SJérôme Glisse */ 2384dd845b5SAlistair Popple entry = make_readable_device_private_entry( 2394dd845b5SAlistair Popple swp_offset(entry)); 2405042db43SJérôme Glisse newpte = swp_entry_to_pte(entry); 241f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte)) 242f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 243b756a3b5SAlistair Popple } else if (is_writable_device_exclusive_entry(entry)) { 244b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry( 245b756a3b5SAlistair Popple swp_offset(entry)); 246b756a3b5SAlistair Popple newpte = swp_entry_to_pte(entry); 247b756a3b5SAlistair Popple if (pte_swp_soft_dirty(oldpte)) 248b756a3b5SAlistair Popple newpte = pte_swp_mksoft_dirty(newpte); 249b756a3b5SAlistair Popple if (pte_swp_uffd_wp(oldpte)) 250b756a3b5SAlistair Popple newpte = pte_swp_mkuffd_wp(newpte); 251fe2567ebSPeter Xu } else if (pte_marker_entry_uffd_wp(entry)) { 252fe2567ebSPeter Xu /* 253fe2567ebSPeter Xu * If this is uffd-wp pte marker and we'd like 254fe2567ebSPeter Xu * to unprotect it, drop it; the next page 255fe2567ebSPeter Xu * fault will trigger without uffd trapping. 256fe2567ebSPeter Xu */ 257fe2567ebSPeter Xu if (uffd_wp_resolve) { 258fe2567ebSPeter Xu pte_clear(vma->vm_mm, addr, pte); 259fe2567ebSPeter Xu pages++; 260fe2567ebSPeter Xu } 2615c041f5dSPeter Xu continue; 262f45ec5ffSPeter Xu } else { 263f45ec5ffSPeter Xu newpte = oldpte; 264f45ec5ffSPeter Xu } 2655042db43SJérôme Glisse 266f45ec5ffSPeter Xu if (uffd_wp) 267f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte); 268f45ec5ffSPeter Xu else if (uffd_wp_resolve) 269f45ec5ffSPeter Xu newpte = pte_swp_clear_uffd_wp(newpte); 270f45ec5ffSPeter Xu 271f45ec5ffSPeter Xu if (!pte_same(oldpte, newpte)) { 272f45ec5ffSPeter Xu set_pte_at(vma->vm_mm, addr, pte, newpte); 2735042db43SJérôme Glisse pages++; 2745042db43SJérôme Glisse } 275fe2567ebSPeter Xu } else { 276fe2567ebSPeter Xu /* It must be an none page, or what else?.. */ 277fe2567ebSPeter Xu WARN_ON_ONCE(!pte_none(oldpte)); 278fe2567ebSPeter Xu if (unlikely(uffd_wp && !vma_is_anonymous(vma))) { 279fe2567ebSPeter Xu /* 280fe2567ebSPeter Xu * For file-backed mem, we need to be able to 281fe2567ebSPeter Xu * wr-protect a none pte, because even if the 282fe2567ebSPeter Xu * pte is none, the page/swap cache could 283fe2567ebSPeter Xu * exist. Doing that by install a marker. 284fe2567ebSPeter Xu */ 285fe2567ebSPeter Xu set_pte_at(vma->vm_mm, addr, pte, 286fe2567ebSPeter Xu make_pte_marker(PTE_MARKER_UFFD_WP)); 287fe2567ebSPeter Xu pages++; 288fe2567ebSPeter Xu } 289e920e14cSMel Gorman } 2901da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 2916606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 292705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 2937da4d641SPeter Zijlstra 2947da4d641SPeter Zijlstra return pages; 2951da177e4SLinus Torvalds } 2961da177e4SLinus Torvalds 2978b272b3cSMel Gorman /* 2988b272b3cSMel Gorman * Used when setting automatic NUMA hinting protection where it is 2998b272b3cSMel Gorman * critical that a numa hinting PMD is not confused with a bad PMD. 3008b272b3cSMel Gorman */ 3018b272b3cSMel Gorman static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) 3028b272b3cSMel Gorman { 3038b272b3cSMel Gorman pmd_t pmdval = pmd_read_atomic(pmd); 3048b272b3cSMel Gorman 3058b272b3cSMel Gorman /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ 3068b272b3cSMel Gorman #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3078b272b3cSMel Gorman barrier(); 3088b272b3cSMel Gorman #endif 3098b272b3cSMel Gorman 3108b272b3cSMel Gorman if (pmd_none(pmdval)) 3118b272b3cSMel Gorman return 1; 3128b272b3cSMel Gorman if (pmd_trans_huge(pmdval)) 3138b272b3cSMel Gorman return 0; 3148b272b3cSMel Gorman if (unlikely(pmd_bad(pmdval))) { 3158b272b3cSMel Gorman pmd_clear_bad(pmd); 3168b272b3cSMel Gorman return 1; 3178b272b3cSMel Gorman } 3188b272b3cSMel Gorman 3198b272b3cSMel Gorman return 0; 3208b272b3cSMel Gorman } 3218b272b3cSMel Gorman 322fe2567ebSPeter Xu /* Return true if we're uffd wr-protecting file-backed memory, or false */ 323fe2567ebSPeter Xu static inline bool 324fe2567ebSPeter Xu uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags) 325fe2567ebSPeter Xu { 326fe2567ebSPeter Xu return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); 327fe2567ebSPeter Xu } 328fe2567ebSPeter Xu 329fe2567ebSPeter Xu /* 330fe2567ebSPeter Xu * If wr-protecting the range for file-backed, populate pgtable for the case 331fe2567ebSPeter Xu * when pgtable is empty but page cache exists. When {pte|pmd|...}_alloc() 332fe2567ebSPeter Xu * failed it means no memory, we don't have a better option but stop. 333fe2567ebSPeter Xu */ 334fe2567ebSPeter Xu #define change_pmd_prepare(vma, pmd, cp_flags) \ 335fe2567ebSPeter Xu do { \ 336fe2567ebSPeter Xu if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \ 337fe2567ebSPeter Xu if (WARN_ON_ONCE(pte_alloc(vma->vm_mm, pmd))) \ 338fe2567ebSPeter Xu break; \ 339fe2567ebSPeter Xu } \ 340fe2567ebSPeter Xu } while (0) 341fe2567ebSPeter Xu /* 342fe2567ebSPeter Xu * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to 343fe2567ebSPeter Xu * have separate change_pmd_prepare() because pte_alloc() returns 0 on success, 344fe2567ebSPeter Xu * while {pmd|pud|p4d}_alloc() returns the valid pointer on success. 345fe2567ebSPeter Xu */ 346fe2567ebSPeter Xu #define change_prepare(vma, high, low, addr, cp_flags) \ 347fe2567ebSPeter Xu do { \ 348fe2567ebSPeter Xu if (unlikely(uffd_wp_protect_file(vma, cp_flags))) { \ 349fe2567ebSPeter Xu low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ 350fe2567ebSPeter Xu if (WARN_ON_ONCE(p == NULL)) \ 351fe2567ebSPeter Xu break; \ 352fe2567ebSPeter Xu } \ 353fe2567ebSPeter Xu } while (0) 354fe2567ebSPeter Xu 3554a18419fSNadav Amit static inline unsigned long change_pmd_range(struct mmu_gather *tlb, 3564a18419fSNadav Amit struct vm_area_struct *vma, pud_t *pud, unsigned long addr, 3574a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags) 3581da177e4SLinus Torvalds { 3591da177e4SLinus Torvalds pmd_t *pmd; 3601da177e4SLinus Torvalds unsigned long next; 3617da4d641SPeter Zijlstra unsigned long pages = 0; 36272403b4aSMel Gorman unsigned long nr_huge_updates = 0; 363ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 364ac46d4f3SJérôme Glisse 365ac46d4f3SJérôme Glisse range.start = 0; 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 3681da177e4SLinus Torvalds do { 36925cbbef1SMel Gorman unsigned long this_pages; 37025cbbef1SMel Gorman 3711da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 3728b272b3cSMel Gorman 373fe2567ebSPeter Xu change_pmd_prepare(vma, pmd, cp_flags); 3748b272b3cSMel Gorman /* 375c1e8d7c6SMichel Lespinasse * Automatic NUMA balancing walks the tables with mmap_lock 3768b272b3cSMel Gorman * held for read. It's possible a parallel update to occur 3778b272b3cSMel Gorman * between pmd_trans_huge() and a pmd_none_or_clear_bad() 3788b272b3cSMel Gorman * check leading to a false positive and clearing. 3798b272b3cSMel Gorman * Hence, it's necessary to atomically read the PMD value 3808b272b3cSMel Gorman * for all the checks. 3818b272b3cSMel Gorman */ 3828b272b3cSMel Gorman if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && 3838b272b3cSMel Gorman pmd_none_or_clear_bad_unless_trans_huge(pmd)) 3844991c09cSAnshuman Khandual goto next; 385a5338093SRik van Riel 386a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */ 387ac46d4f3SJérôme Glisse if (!range.start) { 3887269f999SJérôme Glisse mmu_notifier_range_init(&range, 3897269f999SJérôme Glisse MMU_NOTIFY_PROTECTION_VMA, 0, 3906f4f13e8SJérôme Glisse vma, vma->vm_mm, addr, end); 391ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 392a5338093SRik van Riel } 393a5338093SRik van Riel 39484c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 395019c2d8bSPeter Xu if ((next - addr != HPAGE_PMD_SIZE) || 396019c2d8bSPeter Xu uffd_wp_protect_file(vma, cp_flags)) { 397fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 398019c2d8bSPeter Xu /* 399019c2d8bSPeter Xu * For file-backed, the pmd could have been 400019c2d8bSPeter Xu * cleared; make sure pmd populated if 401019c2d8bSPeter Xu * necessary, then fall-through to pte level. 402019c2d8bSPeter Xu */ 403019c2d8bSPeter Xu change_pmd_prepare(vma, pmd, cp_flags); 4046b9116a6SKirill A. Shutemov } else { 4054a18419fSNadav Amit /* 4064a18419fSNadav Amit * change_huge_pmd() does not defer TLB flushes, 4074a18419fSNadav Amit * so no need to propagate the tlb argument. 4084a18419fSNadav Amit */ 4094a18419fSNadav Amit int nr_ptes = change_huge_pmd(tlb, vma, pmd, 4104a18419fSNadav Amit addr, newprot, cp_flags); 411f123d74aSMel Gorman 412f123d74aSMel Gorman if (nr_ptes) { 41372403b4aSMel Gorman if (nr_ptes == HPAGE_PMD_NR) { 41472403b4aSMel Gorman pages += HPAGE_PMD_NR; 41572403b4aSMel Gorman nr_huge_updates++; 41672403b4aSMel Gorman } 4171ad9f620SMel Gorman 4181ad9f620SMel Gorman /* huge pmd was handled */ 4194991c09cSAnshuman Khandual goto next; 4207da4d641SPeter Zijlstra } 421f123d74aSMel Gorman } 42288a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */ 423cd7548abSJohannes Weiner } 4244a18419fSNadav Amit this_pages = change_pte_range(tlb, vma, pmd, addr, next, 4254a18419fSNadav Amit newprot, cp_flags); 42625cbbef1SMel Gorman pages += this_pages; 4274991c09cSAnshuman Khandual next: 4284991c09cSAnshuman Khandual cond_resched(); 4291da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 4307da4d641SPeter Zijlstra 431ac46d4f3SJérôme Glisse if (range.start) 432ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 433a5338093SRik van Riel 43472403b4aSMel Gorman if (nr_huge_updates) 43572403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); 4367da4d641SPeter Zijlstra return pages; 4371da177e4SLinus Torvalds } 4381da177e4SLinus Torvalds 4394a18419fSNadav Amit static inline unsigned long change_pud_range(struct mmu_gather *tlb, 4404a18419fSNadav Amit struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, 4414a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags) 4421da177e4SLinus Torvalds { 4431da177e4SLinus Torvalds pud_t *pud; 4441da177e4SLinus Torvalds unsigned long next; 4457da4d641SPeter Zijlstra unsigned long pages = 0; 4461da177e4SLinus Torvalds 447c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 4481da177e4SLinus Torvalds do { 4491da177e4SLinus Torvalds next = pud_addr_end(addr, end); 450fe2567ebSPeter Xu change_prepare(vma, pud, pmd, addr, cp_flags); 4511da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 4521da177e4SLinus Torvalds continue; 4534a18419fSNadav Amit pages += change_pmd_range(tlb, vma, pud, addr, next, newprot, 45458705444SPeter Xu cp_flags); 4551da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 4567da4d641SPeter Zijlstra 4577da4d641SPeter Zijlstra return pages; 4581da177e4SLinus Torvalds } 4591da177e4SLinus Torvalds 4604a18419fSNadav Amit static inline unsigned long change_p4d_range(struct mmu_gather *tlb, 4614a18419fSNadav Amit struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, 4624a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags) 463c2febafcSKirill A. Shutemov { 464c2febafcSKirill A. Shutemov p4d_t *p4d; 465c2febafcSKirill A. Shutemov unsigned long next; 466c2febafcSKirill A. Shutemov unsigned long pages = 0; 467c2febafcSKirill A. Shutemov 468c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 469c2febafcSKirill A. Shutemov do { 470c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 471fe2567ebSPeter Xu change_prepare(vma, p4d, pud, addr, cp_flags); 472c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 473c2febafcSKirill A. Shutemov continue; 4744a18419fSNadav Amit pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, 47558705444SPeter Xu cp_flags); 476c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 477c2febafcSKirill A. Shutemov 478c2febafcSKirill A. Shutemov return pages; 479c2febafcSKirill A. Shutemov } 480c2febafcSKirill A. Shutemov 4814a18419fSNadav Amit static unsigned long change_protection_range(struct mmu_gather *tlb, 4824a18419fSNadav Amit struct vm_area_struct *vma, unsigned long addr, 4834a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags) 4841da177e4SLinus Torvalds { 4851da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 4861da177e4SLinus Torvalds pgd_t *pgd; 4871da177e4SLinus Torvalds unsigned long next; 4887da4d641SPeter Zijlstra unsigned long pages = 0; 4891da177e4SLinus Torvalds 4901da177e4SLinus Torvalds BUG_ON(addr >= end); 4911da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 4924a18419fSNadav Amit tlb_start_vma(tlb, vma); 4931da177e4SLinus Torvalds do { 4941da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 495fe2567ebSPeter Xu change_prepare(vma, pgd, p4d, addr, cp_flags); 4961da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 4971da177e4SLinus Torvalds continue; 4984a18419fSNadav Amit pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, 49958705444SPeter Xu cp_flags); 5001da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 5017da4d641SPeter Zijlstra 5024a18419fSNadav Amit tlb_end_vma(tlb, vma); 5037da4d641SPeter Zijlstra 5047da4d641SPeter Zijlstra return pages; 5057da4d641SPeter Zijlstra } 5067da4d641SPeter Zijlstra 5074a18419fSNadav Amit unsigned long change_protection(struct mmu_gather *tlb, 5084a18419fSNadav Amit struct vm_area_struct *vma, unsigned long start, 5097da4d641SPeter Zijlstra unsigned long end, pgprot_t newprot, 51058705444SPeter Xu unsigned long cp_flags) 5117da4d641SPeter Zijlstra { 5127da4d641SPeter Zijlstra unsigned long pages; 5137da4d641SPeter Zijlstra 514292924b2SPeter Xu BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); 515292924b2SPeter Xu 5167da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma)) 5175a90d5a1SPeter Xu pages = hugetlb_change_protection(vma, start, end, newprot, 5185a90d5a1SPeter Xu cp_flags); 5197da4d641SPeter Zijlstra else 5204a18419fSNadav Amit pages = change_protection_range(tlb, vma, start, end, newprot, 52158705444SPeter Xu cp_flags); 5227da4d641SPeter Zijlstra 5237da4d641SPeter Zijlstra return pages; 5241da177e4SLinus Torvalds } 5251da177e4SLinus Torvalds 52642e4089cSAndi Kleen static int prot_none_pte_entry(pte_t *pte, unsigned long addr, 52742e4089cSAndi Kleen unsigned long next, struct mm_walk *walk) 52842e4089cSAndi Kleen { 52942e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 53042e4089cSAndi Kleen 0 : -EACCES; 53142e4089cSAndi Kleen } 53242e4089cSAndi Kleen 53342e4089cSAndi Kleen static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, 53442e4089cSAndi Kleen unsigned long addr, unsigned long next, 53542e4089cSAndi Kleen struct mm_walk *walk) 53642e4089cSAndi Kleen { 53742e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 53842e4089cSAndi Kleen 0 : -EACCES; 53942e4089cSAndi Kleen } 54042e4089cSAndi Kleen 54142e4089cSAndi Kleen static int prot_none_test(unsigned long addr, unsigned long next, 54242e4089cSAndi Kleen struct mm_walk *walk) 54342e4089cSAndi Kleen { 54442e4089cSAndi Kleen return 0; 54542e4089cSAndi Kleen } 54642e4089cSAndi Kleen 5477b86ac33SChristoph Hellwig static const struct mm_walk_ops prot_none_walk_ops = { 54842e4089cSAndi Kleen .pte_entry = prot_none_pte_entry, 54942e4089cSAndi Kleen .hugetlb_entry = prot_none_hugetlb_entry, 55042e4089cSAndi Kleen .test_walk = prot_none_test, 55142e4089cSAndi Kleen }; 55242e4089cSAndi Kleen 553b6a2fea3SOllie Wild int 5544a18419fSNadav Amit mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma, 5554a18419fSNadav Amit struct vm_area_struct **pprev, unsigned long start, 5564a18419fSNadav Amit unsigned long end, unsigned long newflags) 5571da177e4SLinus Torvalds { 5581da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 5591da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags; 5601da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT; 5611da177e4SLinus Torvalds unsigned long charged = 0; 56264fe24a3SDavid Hildenbrand bool try_change_writable; 5631da177e4SLinus Torvalds pgoff_t pgoff; 5641da177e4SLinus Torvalds int error; 5651da177e4SLinus Torvalds 5661da177e4SLinus Torvalds if (newflags == oldflags) { 5671da177e4SLinus Torvalds *pprev = vma; 5681da177e4SLinus Torvalds return 0; 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds /* 57242e4089cSAndi Kleen * Do PROT_NONE PFN permission checks here when we can still 57342e4089cSAndi Kleen * bail out without undoing a lot of state. This is a rather 57442e4089cSAndi Kleen * uncommon case, so doesn't need to be very optimized. 57542e4089cSAndi Kleen */ 57642e4089cSAndi Kleen if (arch_has_pfn_modify_check() && 57742e4089cSAndi Kleen (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 5786cb4d9a2SAnshuman Khandual (newflags & VM_ACCESS_FLAGS) == 0) { 5797b86ac33SChristoph Hellwig pgprot_t new_pgprot = vm_get_page_prot(newflags); 5807b86ac33SChristoph Hellwig 5817b86ac33SChristoph Hellwig error = walk_page_range(current->mm, start, end, 5827b86ac33SChristoph Hellwig &prot_none_walk_ops, &new_pgprot); 58342e4089cSAndi Kleen if (error) 58442e4089cSAndi Kleen return error; 58542e4089cSAndi Kleen } 58642e4089cSAndi Kleen 58742e4089cSAndi Kleen /* 5881da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit; 5891da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we 5905a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for 5915a6fe125SMel Gorman * even if read-only so there is no need to account for them here 5921da177e4SLinus Torvalds */ 5931da177e4SLinus Torvalds if (newflags & VM_WRITE) { 59484638335SKonstantin Khlebnikov /* Check space limits when area turns into data. */ 59584638335SKonstantin Khlebnikov if (!may_expand_vm(mm, newflags, nrpages) && 59684638335SKonstantin Khlebnikov may_expand_vm(mm, oldflags, nrpages)) 59784638335SKonstantin Khlebnikov return -ENOMEM; 5985a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 599cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) { 6001da177e4SLinus Torvalds charged = nrpages; 601191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 6021da177e4SLinus Torvalds return -ENOMEM; 6031da177e4SLinus Torvalds newflags |= VM_ACCOUNT; 6041da177e4SLinus Torvalds } 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds 6071da177e4SLinus Torvalds /* 6081da177e4SLinus Torvalds * First try to merge with previous and/or next vma. 6091da177e4SLinus Torvalds */ 6101da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 6111da177e4SLinus Torvalds *pprev = vma_merge(mm, *pprev, start, end, newflags, 61219a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 6135c26f6acSSuren Baghdasaryan vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 6141da177e4SLinus Torvalds if (*pprev) { 6151da177e4SLinus Torvalds vma = *pprev; 616e86f15eeSAndrea Arcangeli VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); 6171da177e4SLinus Torvalds goto success; 6181da177e4SLinus Torvalds } 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds *pprev = vma; 6211da177e4SLinus Torvalds 6221da177e4SLinus Torvalds if (start != vma->vm_start) { 6231da177e4SLinus Torvalds error = split_vma(mm, vma, start, 1); 6241da177e4SLinus Torvalds if (error) 6251da177e4SLinus Torvalds goto fail; 6261da177e4SLinus Torvalds } 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds if (end != vma->vm_end) { 6291da177e4SLinus Torvalds error = split_vma(mm, vma, end, 0); 6301da177e4SLinus Torvalds if (error) 6311da177e4SLinus Torvalds goto fail; 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds 6341da177e4SLinus Torvalds success: 6351da177e4SLinus Torvalds /* 636c1e8d7c6SMichel Lespinasse * vm_flags and vm_page_prot are protected by the mmap_lock 6371da177e4SLinus Torvalds * held in write mode. 6381da177e4SLinus Torvalds */ 6391da177e4SLinus Torvalds vma->vm_flags = newflags; 64064fe24a3SDavid Hildenbrand /* 64164fe24a3SDavid Hildenbrand * We want to check manually if we can change individual PTEs writable 64264fe24a3SDavid Hildenbrand * if we can't do that automatically for all PTEs in a mapping. For 64364fe24a3SDavid Hildenbrand * private mappings, that's always the case when we have write 64464fe24a3SDavid Hildenbrand * permissions as we properly have to handle COW. 64564fe24a3SDavid Hildenbrand */ 64664fe24a3SDavid Hildenbrand if (vma->vm_flags & VM_SHARED) 64764fe24a3SDavid Hildenbrand try_change_writable = vma_wants_writenotify(vma, vma->vm_page_prot); 64864fe24a3SDavid Hildenbrand else 64964fe24a3SDavid Hildenbrand try_change_writable = !!(vma->vm_flags & VM_WRITE); 65064e45507SPeter Feiner vma_set_page_prot(vma); 651d08b3851SPeter Zijlstra 6524a18419fSNadav Amit change_protection(tlb, vma, start, end, vma->vm_page_prot, 65364fe24a3SDavid Hildenbrand try_change_writable ? MM_CP_TRY_CHANGE_WRITABLE : 0); 6547da4d641SPeter Zijlstra 65536f88188SKirill A. Shutemov /* 65636f88188SKirill A. Shutemov * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major 65736f88188SKirill A. Shutemov * fault on access. 65836f88188SKirill A. Shutemov */ 65936f88188SKirill A. Shutemov if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && 66036f88188SKirill A. Shutemov (newflags & VM_WRITE)) { 66136f88188SKirill A. Shutemov populate_vma_page_range(vma, start, end, NULL); 66236f88188SKirill A. Shutemov } 66336f88188SKirill A. Shutemov 66484638335SKonstantin Khlebnikov vm_stat_account(mm, oldflags, -nrpages); 66584638335SKonstantin Khlebnikov vm_stat_account(mm, newflags, nrpages); 66663bfd738SPekka Enberg perf_event_mmap(vma); 6671da177e4SLinus Torvalds return 0; 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds fail: 6701da177e4SLinus Torvalds vm_unacct_memory(charged); 6711da177e4SLinus Torvalds return error; 6721da177e4SLinus Torvalds } 6731da177e4SLinus Torvalds 6747d06d9c9SDave Hansen /* 6757d06d9c9SDave Hansen * pkey==-1 when doing a legacy mprotect() 6767d06d9c9SDave Hansen */ 6777d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len, 6787d06d9c9SDave Hansen unsigned long prot, int pkey) 6791da177e4SLinus Torvalds { 68062b5f7d0SDave Hansen unsigned long nstart, end, tmp, reqprot; 6811da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 68248725bbcSXiu Jianfeng int error; 6831da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 684f138556dSPiotr Kwapulinski const bool rier = (current->personality & READ_IMPLIES_EXEC) && 685f138556dSPiotr Kwapulinski (prot & PROT_READ); 6864a18419fSNadav Amit struct mmu_gather tlb; 68770821e0bSLiam R. Howlett MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); 688f138556dSPiotr Kwapulinski 689057d3389SAndrey Konovalov start = untagged_addr(start); 690057d3389SAndrey Konovalov 6911da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 6921da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 6931da177e4SLinus Torvalds return -EINVAL; 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds if (start & ~PAGE_MASK) 6961da177e4SLinus Torvalds return -EINVAL; 6971da177e4SLinus Torvalds if (!len) 6981da177e4SLinus Torvalds return 0; 6991da177e4SLinus Torvalds len = PAGE_ALIGN(len); 7001da177e4SLinus Torvalds end = start + len; 7011da177e4SLinus Torvalds if (end <= start) 7021da177e4SLinus Torvalds return -ENOMEM; 7039035cf9aSKhalid Aziz if (!arch_validate_prot(prot, start)) 7041da177e4SLinus Torvalds return -EINVAL; 7051da177e4SLinus Torvalds 7061da177e4SLinus Torvalds reqprot = prot; 7071da177e4SLinus Torvalds 708d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm)) 709dc0ef0dfSMichal Hocko return -EINTR; 7101da177e4SLinus Torvalds 711e8c24d3aSDave Hansen /* 712e8c24d3aSDave Hansen * If userspace did not allocate the pkey, do not let 713e8c24d3aSDave Hansen * them use it here. 714e8c24d3aSDave Hansen */ 715e8c24d3aSDave Hansen error = -EINVAL; 716e8c24d3aSDave Hansen if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) 717e8c24d3aSDave Hansen goto out; 718e8c24d3aSDave Hansen 71970821e0bSLiam R. Howlett mas_set(&mas, start); 72070821e0bSLiam R. Howlett vma = mas_find(&mas, ULONG_MAX); 7211da177e4SLinus Torvalds error = -ENOMEM; 7221da177e4SLinus Torvalds if (!vma) 7231da177e4SLinus Torvalds goto out; 7246af5fa0dSLiu Song 7251da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) { 7261da177e4SLinus Torvalds if (vma->vm_start >= end) 7271da177e4SLinus Torvalds goto out; 7281da177e4SLinus Torvalds start = vma->vm_start; 7291da177e4SLinus Torvalds error = -EINVAL; 7301da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 7311da177e4SLinus Torvalds goto out; 7327d12efaeSAndrew Morton } else { 7331da177e4SLinus Torvalds if (vma->vm_start > start) 7341da177e4SLinus Torvalds goto out; 7351da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) { 7361da177e4SLinus Torvalds end = vma->vm_end; 7371da177e4SLinus Torvalds error = -EINVAL; 7381da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 7391da177e4SLinus Torvalds goto out; 7401da177e4SLinus Torvalds } 7411da177e4SLinus Torvalds } 7426af5fa0dSLiu Song 7431da177e4SLinus Torvalds if (start > vma->vm_start) 7441da177e4SLinus Torvalds prev = vma; 7456af5fa0dSLiu Song else 74670821e0bSLiam R. Howlett prev = mas_prev(&mas, 0); 7471da177e4SLinus Torvalds 7484a18419fSNadav Amit tlb_gather_mmu(&tlb, current->mm); 7491da177e4SLinus Torvalds for (nstart = start ; ; ) { 750a8502b67SDave Hansen unsigned long mask_off_old_flags; 7511da177e4SLinus Torvalds unsigned long newflags; 7527d06d9c9SDave Hansen int new_vma_pkey; 7531da177e4SLinus Torvalds 7541da177e4SLinus Torvalds /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 7551da177e4SLinus Torvalds 756f138556dSPiotr Kwapulinski /* Does the application expect PROT_READ to imply PROT_EXEC */ 757f138556dSPiotr Kwapulinski if (rier && (vma->vm_flags & VM_MAYEXEC)) 758f138556dSPiotr Kwapulinski prot |= PROT_EXEC; 759f138556dSPiotr Kwapulinski 760a8502b67SDave Hansen /* 761a8502b67SDave Hansen * Each mprotect() call explicitly passes r/w/x permissions. 762a8502b67SDave Hansen * If a permission is not passed to mprotect(), it must be 763a8502b67SDave Hansen * cleared from the VMA. 764a8502b67SDave Hansen */ 765e39ee675SKefeng Wang mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR; 766a8502b67SDave Hansen 7677d06d9c9SDave Hansen new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); 7687d06d9c9SDave Hansen newflags = calc_vm_prot_bits(prot, new_vma_pkey); 769a8502b67SDave Hansen newflags |= (vma->vm_flags & ~mask_off_old_flags); 7701da177e4SLinus Torvalds 7717e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */ 7726cb4d9a2SAnshuman Khandual if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { 7731da177e4SLinus Torvalds error = -EACCES; 7744a18419fSNadav Amit break; 7751da177e4SLinus Torvalds } 7761da177e4SLinus Torvalds 777c462ac28SCatalin Marinas /* Allow architectures to sanity-check the new flags */ 778c462ac28SCatalin Marinas if (!arch_validate_flags(newflags)) { 779c462ac28SCatalin Marinas error = -EINVAL; 7804a18419fSNadav Amit break; 781c462ac28SCatalin Marinas } 782c462ac28SCatalin Marinas 7831da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot); 7841da177e4SLinus Torvalds if (error) 7854a18419fSNadav Amit break; 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds tmp = vma->vm_end; 7881da177e4SLinus Torvalds if (tmp > end) 7891da177e4SLinus Torvalds tmp = end; 79095bb7c42SSean Christopherson 791dbf53f75STianjia Zhang if (vma->vm_ops && vma->vm_ops->mprotect) { 79295bb7c42SSean Christopherson error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); 79395bb7c42SSean Christopherson if (error) 7944a18419fSNadav Amit break; 795dbf53f75STianjia Zhang } 79695bb7c42SSean Christopherson 7974a18419fSNadav Amit error = mprotect_fixup(&tlb, vma, &prev, nstart, tmp, newflags); 7981da177e4SLinus Torvalds if (error) 7994a18419fSNadav Amit break; 80095bb7c42SSean Christopherson 8011da177e4SLinus Torvalds nstart = tmp; 8021da177e4SLinus Torvalds 8031da177e4SLinus Torvalds if (nstart < prev->vm_end) 8041da177e4SLinus Torvalds nstart = prev->vm_end; 8051da177e4SLinus Torvalds if (nstart >= end) 8064a18419fSNadav Amit break; 8071da177e4SLinus Torvalds 80870821e0bSLiam R. Howlett vma = find_vma(current->mm, prev->vm_end); 8091da177e4SLinus Torvalds if (!vma || vma->vm_start != nstart) { 8101da177e4SLinus Torvalds error = -ENOMEM; 8114a18419fSNadav Amit break; 8121da177e4SLinus Torvalds } 813f138556dSPiotr Kwapulinski prot = reqprot; 8141da177e4SLinus Torvalds } 8154a18419fSNadav Amit tlb_finish_mmu(&tlb); 8161da177e4SLinus Torvalds out: 817d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 8181da177e4SLinus Torvalds return error; 8191da177e4SLinus Torvalds } 8207d06d9c9SDave Hansen 8217d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, 8227d06d9c9SDave Hansen unsigned long, prot) 8237d06d9c9SDave Hansen { 8247d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, -1); 8257d06d9c9SDave Hansen } 8267d06d9c9SDave Hansen 827c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS 828c7142aeaSHeiko Carstens 8297d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, 8307d06d9c9SDave Hansen unsigned long, prot, int, pkey) 8317d06d9c9SDave Hansen { 8327d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, pkey); 8337d06d9c9SDave Hansen } 834e8c24d3aSDave Hansen 835e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) 836e8c24d3aSDave Hansen { 837e8c24d3aSDave Hansen int pkey; 838e8c24d3aSDave Hansen int ret; 839e8c24d3aSDave Hansen 840e8c24d3aSDave Hansen /* No flags supported yet. */ 841e8c24d3aSDave Hansen if (flags) 842e8c24d3aSDave Hansen return -EINVAL; 843e8c24d3aSDave Hansen /* check for unsupported init values */ 844e8c24d3aSDave Hansen if (init_val & ~PKEY_ACCESS_MASK) 845e8c24d3aSDave Hansen return -EINVAL; 846e8c24d3aSDave Hansen 847d8ed45c5SMichel Lespinasse mmap_write_lock(current->mm); 848e8c24d3aSDave Hansen pkey = mm_pkey_alloc(current->mm); 849e8c24d3aSDave Hansen 850e8c24d3aSDave Hansen ret = -ENOSPC; 851e8c24d3aSDave Hansen if (pkey == -1) 852e8c24d3aSDave Hansen goto out; 853e8c24d3aSDave Hansen 854e8c24d3aSDave Hansen ret = arch_set_user_pkey_access(current, pkey, init_val); 855e8c24d3aSDave Hansen if (ret) { 856e8c24d3aSDave Hansen mm_pkey_free(current->mm, pkey); 857e8c24d3aSDave Hansen goto out; 858e8c24d3aSDave Hansen } 859e8c24d3aSDave Hansen ret = pkey; 860e8c24d3aSDave Hansen out: 861d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 862e8c24d3aSDave Hansen return ret; 863e8c24d3aSDave Hansen } 864e8c24d3aSDave Hansen 865e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey) 866e8c24d3aSDave Hansen { 867e8c24d3aSDave Hansen int ret; 868e8c24d3aSDave Hansen 869d8ed45c5SMichel Lespinasse mmap_write_lock(current->mm); 870e8c24d3aSDave Hansen ret = mm_pkey_free(current->mm, pkey); 871d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 872e8c24d3aSDave Hansen 873e8c24d3aSDave Hansen /* 874f0953a1bSIngo Molnar * We could provide warnings or errors if any VMA still 875e8c24d3aSDave Hansen * has the pkey set here. 876e8c24d3aSDave Hansen */ 877e8c24d3aSDave Hansen return ret; 878e8c24d3aSDave Hansen } 879c7142aeaSHeiko Carstens 880c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */ 881