1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * mm/mprotect.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds
61da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig
71da177e4SLinus Torvalds *
8046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk>
91da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
101da177e4SLinus Torvalds */
111da177e4SLinus Torvalds
12a520110eSChristoph Hellwig #include <linux/pagewalk.h>
131da177e4SLinus Torvalds #include <linux/hugetlb.h>
141da177e4SLinus Torvalds #include <linux/shm.h>
151da177e4SLinus Torvalds #include <linux/mman.h>
161da177e4SLinus Torvalds #include <linux/fs.h>
171da177e4SLinus Torvalds #include <linux/highmem.h>
181da177e4SLinus Torvalds #include <linux/security.h>
191da177e4SLinus Torvalds #include <linux/mempolicy.h>
201da177e4SLinus Torvalds #include <linux/personality.h>
211da177e4SLinus Torvalds #include <linux/syscalls.h>
220697212aSChristoph Lameter #include <linux/swap.h>
230697212aSChristoph Lameter #include <linux/swapops.h>
24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
2564cdd548SKOSAKI Motohiro #include <linux/migrate.h>
26cdd6c482SIngo Molnar #include <linux/perf_event.h>
27e8c24d3aSDave Hansen #include <linux/pkeys.h>
2864a9a34eSMel Gorman #include <linux/ksm.h>
297c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
3009a913a7SMel Gorman #include <linux/mm_inline.h>
31ca5999fdSMike Rapoport #include <linux/pgtable.h>
32a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h>
33fe2567ebSPeter Xu #include <linux/userfaultfd_k.h>
34467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h>
351da177e4SLinus Torvalds #include <asm/cacheflush.h>
36e8c24d3aSDave Hansen #include <asm/mmu_context.h>
371da177e4SLinus Torvalds #include <asm/tlbflush.h>
384a18419fSNadav Amit #include <asm/tlb.h>
391da177e4SLinus Torvalds
4036f88188SKirill A. Shutemov #include "internal.h"
4136f88188SKirill A. Shutemov
can_change_pte_writable(struct vm_area_struct * vma,unsigned long addr,pte_t pte)426a56ccbcSDavid Hildenbrand bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
436a56ccbcSDavid Hildenbrand pte_t pte)
4464fe24a3SDavid Hildenbrand {
4564fe24a3SDavid Hildenbrand struct page *page;
4664fe24a3SDavid Hildenbrand
477ea7e333SDavid Hildenbrand if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
487ea7e333SDavid Hildenbrand return false;
4964fe24a3SDavid Hildenbrand
507ea7e333SDavid Hildenbrand /* Don't touch entries that are not even readable. */
51d8488773SNadav Amit if (pte_protnone(pte))
5264fe24a3SDavid Hildenbrand return false;
5364fe24a3SDavid Hildenbrand
5464fe24a3SDavid Hildenbrand /* Do we need write faults for softdirty tracking? */
5576aefad6SPeter Xu if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
5664fe24a3SDavid Hildenbrand return false;
5764fe24a3SDavid Hildenbrand
5864fe24a3SDavid Hildenbrand /* Do we need write faults for uffd-wp tracking? */
5964fe24a3SDavid Hildenbrand if (userfaultfd_pte_wp(vma, pte))
6064fe24a3SDavid Hildenbrand return false;
6164fe24a3SDavid Hildenbrand
6264fe24a3SDavid Hildenbrand if (!(vma->vm_flags & VM_SHARED)) {
6364fe24a3SDavid Hildenbrand /*
647ea7e333SDavid Hildenbrand * Writable MAP_PRIVATE mapping: We can only special-case on
657ea7e333SDavid Hildenbrand * exclusive anonymous pages, because we know that our
667ea7e333SDavid Hildenbrand * write-fault handler similarly would map them writable without
677ea7e333SDavid Hildenbrand * any additional checks while holding the PT lock.
6864fe24a3SDavid Hildenbrand */
6964fe24a3SDavid Hildenbrand page = vm_normal_page(vma, addr, pte);
70d8488773SNadav Amit return page && PageAnon(page) && PageAnonExclusive(page);
7164fe24a3SDavid Hildenbrand }
7264fe24a3SDavid Hildenbrand
737ea7e333SDavid Hildenbrand /*
747ea7e333SDavid Hildenbrand * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
757ea7e333SDavid Hildenbrand * needs a real write-fault for writenotify
767ea7e333SDavid Hildenbrand * (see vma_wants_writenotify()). If "dirty", the assumption is that the
777ea7e333SDavid Hildenbrand * FS was already notified and we can simply mark the PTE writable
787ea7e333SDavid Hildenbrand * just like the write-fault handler would do.
797ea7e333SDavid Hildenbrand */
80d8488773SNadav Amit return pte_dirty(pte);
8164fe24a3SDavid Hildenbrand }
8264fe24a3SDavid Hildenbrand
change_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)83a79390f5SPeter Xu static long change_pte_range(struct mmu_gather *tlb,
844a18419fSNadav Amit struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
854a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags)
861da177e4SLinus Torvalds {
870697212aSChristoph Lameter pte_t *pte, oldpte;
88705e87c0SHugh Dickins spinlock_t *ptl;
89a79390f5SPeter Xu long pages = 0;
903e321587SAndi Kleen int target_node = NUMA_NO_NODE;
9158705444SPeter Xu bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
92292924b2SPeter Xu bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
93292924b2SPeter Xu bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
941da177e4SLinus Torvalds
954a18419fSNadav Amit tlb_change_page_size(tlb, PAGE_SIZE);
96175ad4f1SAndrea Arcangeli pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
97670ddd8cSHugh Dickins if (!pte)
98670ddd8cSHugh Dickins return -EAGAIN;
991ad9f620SMel Gorman
1003e321587SAndi Kleen /* Get target node for single threaded private VMAs */
1013e321587SAndi Kleen if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
1023e321587SAndi Kleen atomic_read(&vma->vm_mm->mm_users) == 1)
1033e321587SAndi Kleen target_node = numa_node_id();
1043e321587SAndi Kleen
1053ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm);
1066606c3e0SZachary Amsden arch_enter_lazy_mmu_mode();
1071da177e4SLinus Torvalds do {
108c33c7948SRyan Roberts oldpte = ptep_get(pte);
1090697212aSChristoph Lameter if (pte_present(oldpte)) {
1101da177e4SLinus Torvalds pte_t ptent;
1111da177e4SLinus Torvalds
112e944fd67SMel Gorman /*
113e944fd67SMel Gorman * Avoid trapping faults against the zero or KSM
114e944fd67SMel Gorman * pages. See similar comment in change_huge_pmd.
115e944fd67SMel Gorman */
116e944fd67SMel Gorman if (prot_numa) {
117e944fd67SMel Gorman struct page *page;
118a1a3a2fcSHuang Ying int nid;
11933024536SHuang Ying bool toptier;
120e944fd67SMel Gorman
121a818f536SHuang Ying /* Avoid TLB flush if possible */
122a818f536SHuang Ying if (pte_protnone(oldpte))
123a818f536SHuang Ying continue;
124a818f536SHuang Ying
125e944fd67SMel Gorman page = vm_normal_page(vma, addr, oldpte);
1263218f871SAlex Sierra if (!page || is_zone_device_page(page) || PageKsm(page))
127e944fd67SMel Gorman continue;
12810c1045fSMel Gorman
129859d4adcSHenry Willard /* Also skip shared copy-on-write pages */
130859d4adcSHenry Willard if (is_cow_mapping(vma->vm_flags) &&
13180d47f5dSLinus Torvalds page_count(page) != 1)
132859d4adcSHenry Willard continue;
133859d4adcSHenry Willard
13409a913a7SMel Gorman /*
13509a913a7SMel Gorman * While migration can move some dirty pages,
13609a913a7SMel Gorman * it cannot move them all from MIGRATE_ASYNC
13709a913a7SMel Gorman * context.
13809a913a7SMel Gorman */
1399de4f22aSHuang Ying if (page_is_file_lru(page) && PageDirty(page))
14009a913a7SMel Gorman continue;
14109a913a7SMel Gorman
1423e321587SAndi Kleen /*
1433e321587SAndi Kleen * Don't mess with PTEs if page is already on the node
1443e321587SAndi Kleen * a single-threaded process is running on.
1453e321587SAndi Kleen */
146a1a3a2fcSHuang Ying nid = page_to_nid(page);
147a1a3a2fcSHuang Ying if (target_node == nid)
148a1a3a2fcSHuang Ying continue;
14933024536SHuang Ying toptier = node_is_toptier(nid);
150a1a3a2fcSHuang Ying
151a1a3a2fcSHuang Ying /*
152a1a3a2fcSHuang Ying * Skip scanning top tier node if normal numa
153a1a3a2fcSHuang Ying * balancing is disabled
154a1a3a2fcSHuang Ying */
155a1a3a2fcSHuang Ying if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
15633024536SHuang Ying toptier)
1573e321587SAndi Kleen continue;
15833024536SHuang Ying if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
15933024536SHuang Ying !toptier)
16033024536SHuang Ying xchg_page_access_time(page,
16133024536SHuang Ying jiffies_to_msecs(jiffies));
162e944fd67SMel Gorman }
163e944fd67SMel Gorman
16404a86453SAneesh Kumar K.V oldpte = ptep_modify_prot_start(vma, addr, pte);
16504a86453SAneesh Kumar K.V ptent = pte_modify(oldpte, newprot);
1668a0516edSMel Gorman
167f1eb1bacSPeter Xu if (uffd_wp)
168292924b2SPeter Xu ptent = pte_mkuffd_wp(ptent);
169f1eb1bacSPeter Xu else if (uffd_wp_resolve)
170292924b2SPeter Xu ptent = pte_clear_uffd_wp(ptent);
171292924b2SPeter Xu
17264fe24a3SDavid Hildenbrand /*
17364fe24a3SDavid Hildenbrand * In some writable, shared mappings, we might want
17464fe24a3SDavid Hildenbrand * to catch actual write access -- see
17564fe24a3SDavid Hildenbrand * vma_wants_writenotify().
17664fe24a3SDavid Hildenbrand *
17764fe24a3SDavid Hildenbrand * In all writable, private mappings, we have to
17864fe24a3SDavid Hildenbrand * properly handle COW.
17964fe24a3SDavid Hildenbrand *
18064fe24a3SDavid Hildenbrand * In both cases, we can sometimes still change PTEs
18164fe24a3SDavid Hildenbrand * writable and avoid the write-fault handler, for
18264fe24a3SDavid Hildenbrand * example, if a PTE is already dirty and no other
18364fe24a3SDavid Hildenbrand * COW or special handling is required.
18464fe24a3SDavid Hildenbrand */
18564fe24a3SDavid Hildenbrand if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
18664fe24a3SDavid Hildenbrand !pte_write(ptent) &&
18764fe24a3SDavid Hildenbrand can_change_pte_writable(vma, addr, ptent))
188161e393cSRick Edgecombe ptent = pte_mkwrite(ptent, vma);
18964fe24a3SDavid Hildenbrand
19004a86453SAneesh Kumar K.V ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
191c9fe6656SNadav Amit if (pte_needs_flush(oldpte, ptent))
1924a18419fSNadav Amit tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
1934b10e7d5SMel Gorman pages++;
194f45ec5ffSPeter Xu } else if (is_swap_pte(oldpte)) {
1950697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte);
196f45ec5ffSPeter Xu pte_t newpte;
1970697212aSChristoph Lameter
1984dd845b5SAlistair Popple if (is_writable_migration_entry(entry)) {
1993d2f78f0SPeter Xu struct page *page = pfn_swap_entry_to_page(entry);
2003d2f78f0SPeter Xu
2010697212aSChristoph Lameter /*
2020697212aSChristoph Lameter * A protection check is difficult so
2030697212aSChristoph Lameter * just be safe and disable write
2040697212aSChristoph Lameter */
2056c287605SDavid Hildenbrand if (PageAnon(page))
2066c287605SDavid Hildenbrand entry = make_readable_exclusive_migration_entry(
2074dd845b5SAlistair Popple swp_offset(entry));
2086c287605SDavid Hildenbrand else
2096c287605SDavid Hildenbrand entry = make_readable_migration_entry(swp_offset(entry));
210c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry);
211c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte))
212c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte);
2134dd845b5SAlistair Popple } else if (is_writable_device_private_entry(entry)) {
2145042db43SJérôme Glisse /*
2155042db43SJérôme Glisse * We do not preserve soft-dirtiness. See
216eafcb7a9SMiaohe Lin * copy_nonpresent_pte() for explanation.
2175042db43SJérôme Glisse */
2184dd845b5SAlistair Popple entry = make_readable_device_private_entry(
2194dd845b5SAlistair Popple swp_offset(entry));
2205042db43SJérôme Glisse newpte = swp_entry_to_pte(entry);
221f45ec5ffSPeter Xu if (pte_swp_uffd_wp(oldpte))
222f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte);
223b756a3b5SAlistair Popple } else if (is_writable_device_exclusive_entry(entry)) {
224b756a3b5SAlistair Popple entry = make_readable_device_exclusive_entry(
225b756a3b5SAlistair Popple swp_offset(entry));
226b756a3b5SAlistair Popple newpte = swp_entry_to_pte(entry);
227b756a3b5SAlistair Popple if (pte_swp_soft_dirty(oldpte))
228b756a3b5SAlistair Popple newpte = pte_swp_mksoft_dirty(newpte);
229b756a3b5SAlistair Popple if (pte_swp_uffd_wp(oldpte))
230b756a3b5SAlistair Popple newpte = pte_swp_mkuffd_wp(newpte);
2317e3ce3f8SPeter Xu } else if (is_pte_marker_entry(entry)) {
2327e3ce3f8SPeter Xu /*
233af19487fSAxel Rasmussen * Ignore error swap entries unconditionally,
2347e3ce3f8SPeter Xu * because any access should sigbus anyway.
2357e3ce3f8SPeter Xu */
236af19487fSAxel Rasmussen if (is_poisoned_swp_entry(entry))
2377e3ce3f8SPeter Xu continue;
238fe2567ebSPeter Xu /*
239fe2567ebSPeter Xu * If this is uffd-wp pte marker and we'd like
240fe2567ebSPeter Xu * to unprotect it, drop it; the next page
241fe2567ebSPeter Xu * fault will trigger without uffd trapping.
242fe2567ebSPeter Xu */
243fe2567ebSPeter Xu if (uffd_wp_resolve) {
244fe2567ebSPeter Xu pte_clear(vma->vm_mm, addr, pte);
245fe2567ebSPeter Xu pages++;
246fe2567ebSPeter Xu }
2475c041f5dSPeter Xu continue;
248f45ec5ffSPeter Xu } else {
249f45ec5ffSPeter Xu newpte = oldpte;
250f45ec5ffSPeter Xu }
2515042db43SJérôme Glisse
252f45ec5ffSPeter Xu if (uffd_wp)
253f45ec5ffSPeter Xu newpte = pte_swp_mkuffd_wp(newpte);
254f45ec5ffSPeter Xu else if (uffd_wp_resolve)
255f45ec5ffSPeter Xu newpte = pte_swp_clear_uffd_wp(newpte);
256f45ec5ffSPeter Xu
257f45ec5ffSPeter Xu if (!pte_same(oldpte, newpte)) {
258f45ec5ffSPeter Xu set_pte_at(vma->vm_mm, addr, pte, newpte);
2595042db43SJérôme Glisse pages++;
2605042db43SJérôme Glisse }
261fe2567ebSPeter Xu } else {
262fe2567ebSPeter Xu /* It must be an none page, or what else?.. */
263fe2567ebSPeter Xu WARN_ON_ONCE(!pte_none(oldpte));
2642bad466cSPeter Xu
2652bad466cSPeter Xu /*
2662bad466cSPeter Xu * Nobody plays with any none ptes besides
2672bad466cSPeter Xu * userfaultfd when applying the protections.
2682bad466cSPeter Xu */
2692bad466cSPeter Xu if (likely(!uffd_wp))
2702bad466cSPeter Xu continue;
2712bad466cSPeter Xu
2722bad466cSPeter Xu if (userfaultfd_wp_use_markers(vma)) {
273fe2567ebSPeter Xu /*
274fe2567ebSPeter Xu * For file-backed mem, we need to be able to
275fe2567ebSPeter Xu * wr-protect a none pte, because even if the
276fe2567ebSPeter Xu * pte is none, the page/swap cache could
277fe2567ebSPeter Xu * exist. Doing that by install a marker.
278fe2567ebSPeter Xu */
279fe2567ebSPeter Xu set_pte_at(vma->vm_mm, addr, pte,
280fe2567ebSPeter Xu make_pte_marker(PTE_MARKER_UFFD_WP));
281fe2567ebSPeter Xu pages++;
282fe2567ebSPeter Xu }
283e920e14cSMel Gorman }
2841da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end);
2856606c3e0SZachary Amsden arch_leave_lazy_mmu_mode();
286705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl);
2877da4d641SPeter Zijlstra
2887da4d641SPeter Zijlstra return pages;
2891da177e4SLinus Torvalds }
2901da177e4SLinus Torvalds
2918b272b3cSMel Gorman /*
2922bad466cSPeter Xu * Return true if we want to split THPs into PTE mappings in change
2932bad466cSPeter Xu * protection procedure, false otherwise.
2942bad466cSPeter Xu */
295fe2567ebSPeter Xu static inline bool
pgtable_split_needed(struct vm_area_struct * vma,unsigned long cp_flags)2962bad466cSPeter Xu pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
297fe2567ebSPeter Xu {
2982bad466cSPeter Xu /*
2992bad466cSPeter Xu * pte markers only resides in pte level, if we need pte markers,
3002bad466cSPeter Xu * we need to split. We cannot wr-protect shmem thp because file
3012bad466cSPeter Xu * thp is handled differently when split by erasing the pmd so far.
3022bad466cSPeter Xu */
303fe2567ebSPeter Xu return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
304fe2567ebSPeter Xu }
305fe2567ebSPeter Xu
306fe2567ebSPeter Xu /*
3072bad466cSPeter Xu * Return true if we want to populate pgtables in change protection
3082bad466cSPeter Xu * procedure, false otherwise
3092bad466cSPeter Xu */
3102bad466cSPeter Xu static inline bool
pgtable_populate_needed(struct vm_area_struct * vma,unsigned long cp_flags)3112bad466cSPeter Xu pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
3122bad466cSPeter Xu {
3132bad466cSPeter Xu /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
3142bad466cSPeter Xu if (!(cp_flags & MM_CP_UFFD_WP))
3152bad466cSPeter Xu return false;
3162bad466cSPeter Xu
3172bad466cSPeter Xu /* Populate if the userfaultfd mode requires pte markers */
3182bad466cSPeter Xu return userfaultfd_wp_use_markers(vma);
3192bad466cSPeter Xu }
3202bad466cSPeter Xu
3212bad466cSPeter Xu /*
3222bad466cSPeter Xu * Populate the pgtable underneath for whatever reason if requested.
3232bad466cSPeter Xu * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
3242bad466cSPeter Xu * allocation failures during page faults by kicking OOM and returning
3252bad466cSPeter Xu * error.
326fe2567ebSPeter Xu */
327fe2567ebSPeter Xu #define change_pmd_prepare(vma, pmd, cp_flags) \
328d1751118SPeter Xu ({ \
329d1751118SPeter Xu long err = 0; \
3302bad466cSPeter Xu if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
331d1751118SPeter Xu if (pte_alloc(vma->vm_mm, pmd)) \
332d1751118SPeter Xu err = -ENOMEM; \
333fe2567ebSPeter Xu } \
334d1751118SPeter Xu err; \
335d1751118SPeter Xu })
336d1751118SPeter Xu
337fe2567ebSPeter Xu /*
338fe2567ebSPeter Xu * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
339fe2567ebSPeter Xu * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
340fe2567ebSPeter Xu * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
341fe2567ebSPeter Xu */
342fe2567ebSPeter Xu #define change_prepare(vma, high, low, addr, cp_flags) \
343d1751118SPeter Xu ({ \
344d1751118SPeter Xu long err = 0; \
3452bad466cSPeter Xu if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
346fe2567ebSPeter Xu low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
347d1751118SPeter Xu if (p == NULL) \
348d1751118SPeter Xu err = -ENOMEM; \
349fe2567ebSPeter Xu } \
350d1751118SPeter Xu err; \
351d1751118SPeter Xu })
352fe2567ebSPeter Xu
change_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)353a79390f5SPeter Xu static inline long change_pmd_range(struct mmu_gather *tlb,
3544a18419fSNadav Amit struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
3554a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags)
3561da177e4SLinus Torvalds {
3571da177e4SLinus Torvalds pmd_t *pmd;
3581da177e4SLinus Torvalds unsigned long next;
359a79390f5SPeter Xu long pages = 0;
36072403b4aSMel Gorman unsigned long nr_huge_updates = 0;
361ac46d4f3SJérôme Glisse struct mmu_notifier_range range;
362ac46d4f3SJérôme Glisse
363ac46d4f3SJérôme Glisse range.start = 0;
3641da177e4SLinus Torvalds
3651da177e4SLinus Torvalds pmd = pmd_offset(pud, addr);
3661da177e4SLinus Torvalds do {
367d1751118SPeter Xu long ret;
368670ddd8cSHugh Dickins pmd_t _pmd;
369670ddd8cSHugh Dickins again:
3701da177e4SLinus Torvalds next = pmd_addr_end(addr, end);
3718b272b3cSMel Gorman
372d1751118SPeter Xu ret = change_pmd_prepare(vma, pmd, cp_flags);
373d1751118SPeter Xu if (ret) {
374d1751118SPeter Xu pages = ret;
375d1751118SPeter Xu break;
376d1751118SPeter Xu }
377670ddd8cSHugh Dickins
378670ddd8cSHugh Dickins if (pmd_none(*pmd))
3794991c09cSAnshuman Khandual goto next;
380a5338093SRik van Riel
381a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */
382ac46d4f3SJérôme Glisse if (!range.start) {
3837269f999SJérôme Glisse mmu_notifier_range_init(&range,
3847269f999SJérôme Glisse MMU_NOTIFY_PROTECTION_VMA, 0,
3857d4a8be0SAlistair Popple vma->vm_mm, addr, end);
386ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range);
387a5338093SRik van Riel }
388a5338093SRik van Riel
389670ddd8cSHugh Dickins _pmd = pmdp_get_lockless(pmd);
390670ddd8cSHugh Dickins if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
391019c2d8bSPeter Xu if ((next - addr != HPAGE_PMD_SIZE) ||
3922bad466cSPeter Xu pgtable_split_needed(vma, cp_flags)) {
393fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL);
394019c2d8bSPeter Xu /*
395019c2d8bSPeter Xu * For file-backed, the pmd could have been
396019c2d8bSPeter Xu * cleared; make sure pmd populated if
397019c2d8bSPeter Xu * necessary, then fall-through to pte level.
398019c2d8bSPeter Xu */
399d1751118SPeter Xu ret = change_pmd_prepare(vma, pmd, cp_flags);
400d1751118SPeter Xu if (ret) {
401d1751118SPeter Xu pages = ret;
402d1751118SPeter Xu break;
403d1751118SPeter Xu }
4046b9116a6SKirill A. Shutemov } else {
405670ddd8cSHugh Dickins ret = change_huge_pmd(tlb, vma, pmd,
4064a18419fSNadav Amit addr, newprot, cp_flags);
407670ddd8cSHugh Dickins if (ret) {
408670ddd8cSHugh Dickins if (ret == HPAGE_PMD_NR) {
40972403b4aSMel Gorman pages += HPAGE_PMD_NR;
41072403b4aSMel Gorman nr_huge_updates++;
41172403b4aSMel Gorman }
4121ad9f620SMel Gorman
4131ad9f620SMel Gorman /* huge pmd was handled */
4144991c09cSAnshuman Khandual goto next;
4157da4d641SPeter Zijlstra }
416f123d74aSMel Gorman }
41788a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */
418cd7548abSJohannes Weiner }
419670ddd8cSHugh Dickins
420670ddd8cSHugh Dickins ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
421670ddd8cSHugh Dickins cp_flags);
422670ddd8cSHugh Dickins if (ret < 0)
423670ddd8cSHugh Dickins goto again;
424670ddd8cSHugh Dickins pages += ret;
4254991c09cSAnshuman Khandual next:
4264991c09cSAnshuman Khandual cond_resched();
4271da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end);
4287da4d641SPeter Zijlstra
429ac46d4f3SJérôme Glisse if (range.start)
430ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range);
431a5338093SRik van Riel
43272403b4aSMel Gorman if (nr_huge_updates)
43372403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
4347da4d641SPeter Zijlstra return pages;
4351da177e4SLinus Torvalds }
4361da177e4SLinus Torvalds
change_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)437a79390f5SPeter Xu static inline long change_pud_range(struct mmu_gather *tlb,
4384a18419fSNadav Amit struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
4394a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags)
4401da177e4SLinus Torvalds {
4411da177e4SLinus Torvalds pud_t *pud;
4421da177e4SLinus Torvalds unsigned long next;
443d1751118SPeter Xu long pages = 0, ret;
4441da177e4SLinus Torvalds
445c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr);
4461da177e4SLinus Torvalds do {
4471da177e4SLinus Torvalds next = pud_addr_end(addr, end);
448d1751118SPeter Xu ret = change_prepare(vma, pud, pmd, addr, cp_flags);
449d1751118SPeter Xu if (ret)
450d1751118SPeter Xu return ret;
4511da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud))
4521da177e4SLinus Torvalds continue;
4534a18419fSNadav Amit pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
45458705444SPeter Xu cp_flags);
4551da177e4SLinus Torvalds } while (pud++, addr = next, addr != end);
4567da4d641SPeter Zijlstra
4577da4d641SPeter Zijlstra return pages;
4581da177e4SLinus Torvalds }
4591da177e4SLinus Torvalds
change_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)460a79390f5SPeter Xu static inline long change_p4d_range(struct mmu_gather *tlb,
4614a18419fSNadav Amit struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
4624a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags)
463c2febafcSKirill A. Shutemov {
464c2febafcSKirill A. Shutemov p4d_t *p4d;
465c2febafcSKirill A. Shutemov unsigned long next;
466d1751118SPeter Xu long pages = 0, ret;
467c2febafcSKirill A. Shutemov
468c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr);
469c2febafcSKirill A. Shutemov do {
470c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end);
471d1751118SPeter Xu ret = change_prepare(vma, p4d, pud, addr, cp_flags);
472d1751118SPeter Xu if (ret)
473d1751118SPeter Xu return ret;
474c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d))
475c2febafcSKirill A. Shutemov continue;
4764a18419fSNadav Amit pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
47758705444SPeter Xu cp_flags);
478c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end);
479c2febafcSKirill A. Shutemov
480c2febafcSKirill A. Shutemov return pages;
481c2febafcSKirill A. Shutemov }
482c2febafcSKirill A. Shutemov
change_protection_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)483a79390f5SPeter Xu static long change_protection_range(struct mmu_gather *tlb,
4844a18419fSNadav Amit struct vm_area_struct *vma, unsigned long addr,
4854a18419fSNadav Amit unsigned long end, pgprot_t newprot, unsigned long cp_flags)
4861da177e4SLinus Torvalds {
4871da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm;
4881da177e4SLinus Torvalds pgd_t *pgd;
4891da177e4SLinus Torvalds unsigned long next;
490d1751118SPeter Xu long pages = 0, ret;
4911da177e4SLinus Torvalds
4921da177e4SLinus Torvalds BUG_ON(addr >= end);
4931da177e4SLinus Torvalds pgd = pgd_offset(mm, addr);
4944a18419fSNadav Amit tlb_start_vma(tlb, vma);
4951da177e4SLinus Torvalds do {
4961da177e4SLinus Torvalds next = pgd_addr_end(addr, end);
497d1751118SPeter Xu ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
498d1751118SPeter Xu if (ret) {
499d1751118SPeter Xu pages = ret;
500d1751118SPeter Xu break;
501d1751118SPeter Xu }
5021da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd))
5031da177e4SLinus Torvalds continue;
5044a18419fSNadav Amit pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
50558705444SPeter Xu cp_flags);
5061da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end);
5077da4d641SPeter Zijlstra
5084a18419fSNadav Amit tlb_end_vma(tlb, vma);
5097da4d641SPeter Zijlstra
5107da4d641SPeter Zijlstra return pages;
5117da4d641SPeter Zijlstra }
5127da4d641SPeter Zijlstra
change_protection(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long cp_flags)513a79390f5SPeter Xu long change_protection(struct mmu_gather *tlb,
5144a18419fSNadav Amit struct vm_area_struct *vma, unsigned long start,
5151ef488edSDavid Hildenbrand unsigned long end, unsigned long cp_flags)
5167da4d641SPeter Zijlstra {
5171ef488edSDavid Hildenbrand pgprot_t newprot = vma->vm_page_prot;
518a79390f5SPeter Xu long pages;
5197da4d641SPeter Zijlstra
520292924b2SPeter Xu BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
521292924b2SPeter Xu
5221ef488edSDavid Hildenbrand #ifdef CONFIG_NUMA_BALANCING
5231ef488edSDavid Hildenbrand /*
5241ef488edSDavid Hildenbrand * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
5251ef488edSDavid Hildenbrand * are expected to reflect their requirements via VMA flags such that
5261ef488edSDavid Hildenbrand * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
5271ef488edSDavid Hildenbrand */
5281ef488edSDavid Hildenbrand if (cp_flags & MM_CP_PROT_NUMA)
5291ef488edSDavid Hildenbrand newprot = PAGE_NONE;
5301ef488edSDavid Hildenbrand #else
5311ef488edSDavid Hildenbrand WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA);
5321ef488edSDavid Hildenbrand #endif
5331ef488edSDavid Hildenbrand
5347da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma))
5355a90d5a1SPeter Xu pages = hugetlb_change_protection(vma, start, end, newprot,
5365a90d5a1SPeter Xu cp_flags);
5377da4d641SPeter Zijlstra else
5384a18419fSNadav Amit pages = change_protection_range(tlb, vma, start, end, newprot,
53958705444SPeter Xu cp_flags);
5407da4d641SPeter Zijlstra
5417da4d641SPeter Zijlstra return pages;
5421da177e4SLinus Torvalds }
5431da177e4SLinus Torvalds
prot_none_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)54442e4089cSAndi Kleen static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
54542e4089cSAndi Kleen unsigned long next, struct mm_walk *walk)
54642e4089cSAndi Kleen {
547c33c7948SRyan Roberts return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
548c33c7948SRyan Roberts *(pgprot_t *)(walk->private)) ?
54942e4089cSAndi Kleen 0 : -EACCES;
55042e4089cSAndi Kleen }
55142e4089cSAndi Kleen
prot_none_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long next,struct mm_walk * walk)55242e4089cSAndi Kleen static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
55342e4089cSAndi Kleen unsigned long addr, unsigned long next,
55442e4089cSAndi Kleen struct mm_walk *walk)
55542e4089cSAndi Kleen {
556c33c7948SRyan Roberts return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
557c33c7948SRyan Roberts *(pgprot_t *)(walk->private)) ?
55842e4089cSAndi Kleen 0 : -EACCES;
55942e4089cSAndi Kleen }
56042e4089cSAndi Kleen
prot_none_test(unsigned long addr,unsigned long next,struct mm_walk * walk)56142e4089cSAndi Kleen static int prot_none_test(unsigned long addr, unsigned long next,
56242e4089cSAndi Kleen struct mm_walk *walk)
56342e4089cSAndi Kleen {
56442e4089cSAndi Kleen return 0;
56542e4089cSAndi Kleen }
56642e4089cSAndi Kleen
5677b86ac33SChristoph Hellwig static const struct mm_walk_ops prot_none_walk_ops = {
56842e4089cSAndi Kleen .pte_entry = prot_none_pte_entry,
56942e4089cSAndi Kleen .hugetlb_entry = prot_none_hugetlb_entry,
57042e4089cSAndi Kleen .test_walk = prot_none_test,
57149b06385SSuren Baghdasaryan .walk_lock = PGWALK_WRLOCK,
57242e4089cSAndi Kleen };
57342e4089cSAndi Kleen
574b6a2fea3SOllie Wild int
mprotect_fixup(struct vma_iterator * vmi,struct mmu_gather * tlb,struct vm_area_struct * vma,struct vm_area_struct ** pprev,unsigned long start,unsigned long end,unsigned long newflags)5752286a691SLiam R. Howlett mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
5762286a691SLiam R. Howlett struct vm_area_struct *vma, struct vm_area_struct **pprev,
5772286a691SLiam R. Howlett unsigned long start, unsigned long end, unsigned long newflags)
5781da177e4SLinus Torvalds {
5791da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm;
5801da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags;
5811da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT;
582eb309ec8SDavid Hildenbrand unsigned int mm_cp_flags = 0;
5831da177e4SLinus Torvalds unsigned long charged = 0;
5841da177e4SLinus Torvalds pgoff_t pgoff;
5851da177e4SLinus Torvalds int error;
5861da177e4SLinus Torvalds
5871da177e4SLinus Torvalds if (newflags == oldflags) {
5881da177e4SLinus Torvalds *pprev = vma;
5891da177e4SLinus Torvalds return 0;
5901da177e4SLinus Torvalds }
5911da177e4SLinus Torvalds
5921da177e4SLinus Torvalds /*
59342e4089cSAndi Kleen * Do PROT_NONE PFN permission checks here when we can still
59442e4089cSAndi Kleen * bail out without undoing a lot of state. This is a rather
59542e4089cSAndi Kleen * uncommon case, so doesn't need to be very optimized.
59642e4089cSAndi Kleen */
59742e4089cSAndi Kleen if (arch_has_pfn_modify_check() &&
59842e4089cSAndi Kleen (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
5996cb4d9a2SAnshuman Khandual (newflags & VM_ACCESS_FLAGS) == 0) {
6007b86ac33SChristoph Hellwig pgprot_t new_pgprot = vm_get_page_prot(newflags);
6017b86ac33SChristoph Hellwig
6027b86ac33SChristoph Hellwig error = walk_page_range(current->mm, start, end,
6037b86ac33SChristoph Hellwig &prot_none_walk_ops, &new_pgprot);
60442e4089cSAndi Kleen if (error)
60542e4089cSAndi Kleen return error;
60642e4089cSAndi Kleen }
60742e4089cSAndi Kleen
60842e4089cSAndi Kleen /*
6091da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit;
6101da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we
6115a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for
6125a6fe125SMel Gorman * even if read-only so there is no need to account for them here
6131da177e4SLinus Torvalds */
6141da177e4SLinus Torvalds if (newflags & VM_WRITE) {
61584638335SKonstantin Khlebnikov /* Check space limits when area turns into data. */
61684638335SKonstantin Khlebnikov if (!may_expand_vm(mm, newflags, nrpages) &&
61784638335SKonstantin Khlebnikov may_expand_vm(mm, oldflags, nrpages))
61884638335SKonstantin Khlebnikov return -ENOMEM;
6195a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
620cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) {
6211da177e4SLinus Torvalds charged = nrpages;
622191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged))
6231da177e4SLinus Torvalds return -ENOMEM;
6241da177e4SLinus Torvalds newflags |= VM_ACCOUNT;
6251da177e4SLinus Torvalds }
6261da177e4SLinus Torvalds }
6271da177e4SLinus Torvalds
6281da177e4SLinus Torvalds /*
6291da177e4SLinus Torvalds * First try to merge with previous and/or next vma.
6301da177e4SLinus Torvalds */
6311da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
6329760ebffSLiam R. Howlett *pprev = vma_merge(vmi, mm, *pprev, start, end, newflags,
63319a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
6345c26f6acSSuren Baghdasaryan vma->vm_userfaultfd_ctx, anon_vma_name(vma));
6351da177e4SLinus Torvalds if (*pprev) {
6361da177e4SLinus Torvalds vma = *pprev;
637e86f15eeSAndrea Arcangeli VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
6381da177e4SLinus Torvalds goto success;
6391da177e4SLinus Torvalds }
6401da177e4SLinus Torvalds
6411da177e4SLinus Torvalds *pprev = vma;
6421da177e4SLinus Torvalds
6431da177e4SLinus Torvalds if (start != vma->vm_start) {
6449760ebffSLiam R. Howlett error = split_vma(vmi, vma, start, 1);
6451da177e4SLinus Torvalds if (error)
6461da177e4SLinus Torvalds goto fail;
6471da177e4SLinus Torvalds }
6481da177e4SLinus Torvalds
6491da177e4SLinus Torvalds if (end != vma->vm_end) {
6509760ebffSLiam R. Howlett error = split_vma(vmi, vma, end, 0);
6511da177e4SLinus Torvalds if (error)
6521da177e4SLinus Torvalds goto fail;
6531da177e4SLinus Torvalds }
6541da177e4SLinus Torvalds
6551da177e4SLinus Torvalds success:
6561da177e4SLinus Torvalds /*
657c1e8d7c6SMichel Lespinasse * vm_flags and vm_page_prot are protected by the mmap_lock
6581da177e4SLinus Torvalds * held in write mode.
6591da177e4SLinus Torvalds */
66060081bf1SSuren Baghdasaryan vma_start_write(vma);
6611c71222eSSuren Baghdasaryan vm_flags_reset(vma, newflags);
662eb309ec8SDavid Hildenbrand if (vma_wants_manual_pte_write_upgrade(vma))
663eb309ec8SDavid Hildenbrand mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
66464e45507SPeter Feiner vma_set_page_prot(vma);
665d08b3851SPeter Zijlstra
6661ef488edSDavid Hildenbrand change_protection(tlb, vma, start, end, mm_cp_flags);
6677da4d641SPeter Zijlstra
66836f88188SKirill A. Shutemov /*
66936f88188SKirill A. Shutemov * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
67036f88188SKirill A. Shutemov * fault on access.
67136f88188SKirill A. Shutemov */
67236f88188SKirill A. Shutemov if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
67336f88188SKirill A. Shutemov (newflags & VM_WRITE)) {
67436f88188SKirill A. Shutemov populate_vma_page_range(vma, start, end, NULL);
67536f88188SKirill A. Shutemov }
67636f88188SKirill A. Shutemov
67784638335SKonstantin Khlebnikov vm_stat_account(mm, oldflags, -nrpages);
67884638335SKonstantin Khlebnikov vm_stat_account(mm, newflags, nrpages);
67963bfd738SPekka Enberg perf_event_mmap(vma);
6801da177e4SLinus Torvalds return 0;
6811da177e4SLinus Torvalds
6821da177e4SLinus Torvalds fail:
6831da177e4SLinus Torvalds vm_unacct_memory(charged);
6841da177e4SLinus Torvalds return error;
6851da177e4SLinus Torvalds }
6861da177e4SLinus Torvalds
6877d06d9c9SDave Hansen /*
6887d06d9c9SDave Hansen * pkey==-1 when doing a legacy mprotect()
6897d06d9c9SDave Hansen */
do_mprotect_pkey(unsigned long start,size_t len,unsigned long prot,int pkey)6907d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len,
6917d06d9c9SDave Hansen unsigned long prot, int pkey)
6921da177e4SLinus Torvalds {
69362b5f7d0SDave Hansen unsigned long nstart, end, tmp, reqprot;
6941da177e4SLinus Torvalds struct vm_area_struct *vma, *prev;
69548725bbcSXiu Jianfeng int error;
6961da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
697f138556dSPiotr Kwapulinski const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
698f138556dSPiotr Kwapulinski (prot & PROT_READ);
6994a18419fSNadav Amit struct mmu_gather tlb;
7002286a691SLiam R. Howlett struct vma_iterator vmi;
701f138556dSPiotr Kwapulinski
702057d3389SAndrey Konovalov start = untagged_addr(start);
703057d3389SAndrey Konovalov
7041da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
7051da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
7061da177e4SLinus Torvalds return -EINVAL;
7071da177e4SLinus Torvalds
7081da177e4SLinus Torvalds if (start & ~PAGE_MASK)
7091da177e4SLinus Torvalds return -EINVAL;
7101da177e4SLinus Torvalds if (!len)
7111da177e4SLinus Torvalds return 0;
7121da177e4SLinus Torvalds len = PAGE_ALIGN(len);
7131da177e4SLinus Torvalds end = start + len;
7141da177e4SLinus Torvalds if (end <= start)
7151da177e4SLinus Torvalds return -ENOMEM;
7169035cf9aSKhalid Aziz if (!arch_validate_prot(prot, start))
7171da177e4SLinus Torvalds return -EINVAL;
7181da177e4SLinus Torvalds
7191da177e4SLinus Torvalds reqprot = prot;
7201da177e4SLinus Torvalds
721d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm))
722dc0ef0dfSMichal Hocko return -EINTR;
7231da177e4SLinus Torvalds
724e8c24d3aSDave Hansen /*
725e8c24d3aSDave Hansen * If userspace did not allocate the pkey, do not let
726e8c24d3aSDave Hansen * them use it here.
727e8c24d3aSDave Hansen */
728e8c24d3aSDave Hansen error = -EINVAL;
729e8c24d3aSDave Hansen if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
730e8c24d3aSDave Hansen goto out;
731e8c24d3aSDave Hansen
7322286a691SLiam R. Howlett vma_iter_init(&vmi, current->mm, start);
7332286a691SLiam R. Howlett vma = vma_find(&vmi, end);
7341da177e4SLinus Torvalds error = -ENOMEM;
7351da177e4SLinus Torvalds if (!vma)
7361da177e4SLinus Torvalds goto out;
7376af5fa0dSLiu Song
7381da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) {
7391da177e4SLinus Torvalds if (vma->vm_start >= end)
7401da177e4SLinus Torvalds goto out;
7411da177e4SLinus Torvalds start = vma->vm_start;
7421da177e4SLinus Torvalds error = -EINVAL;
7431da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN))
7441da177e4SLinus Torvalds goto out;
7457d12efaeSAndrew Morton } else {
7461da177e4SLinus Torvalds if (vma->vm_start > start)
7471da177e4SLinus Torvalds goto out;
7481da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) {
7491da177e4SLinus Torvalds end = vma->vm_end;
7501da177e4SLinus Torvalds error = -EINVAL;
7511da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP))
7521da177e4SLinus Torvalds goto out;
7531da177e4SLinus Torvalds }
7541da177e4SLinus Torvalds }
7556af5fa0dSLiu Song
7562286a691SLiam R. Howlett prev = vma_prev(&vmi);
7571da177e4SLinus Torvalds if (start > vma->vm_start)
7581da177e4SLinus Torvalds prev = vma;
7591da177e4SLinus Torvalds
7604a18419fSNadav Amit tlb_gather_mmu(&tlb, current->mm);
7612286a691SLiam R. Howlett nstart = start;
7622286a691SLiam R. Howlett tmp = vma->vm_start;
7632286a691SLiam R. Howlett for_each_vma_range(vmi, vma, end) {
764a8502b67SDave Hansen unsigned long mask_off_old_flags;
7651da177e4SLinus Torvalds unsigned long newflags;
7667d06d9c9SDave Hansen int new_vma_pkey;
7671da177e4SLinus Torvalds
7682286a691SLiam R. Howlett if (vma->vm_start != tmp) {
7692286a691SLiam R. Howlett error = -ENOMEM;
7702286a691SLiam R. Howlett break;
7712286a691SLiam R. Howlett }
7721da177e4SLinus Torvalds
773f138556dSPiotr Kwapulinski /* Does the application expect PROT_READ to imply PROT_EXEC */
774f138556dSPiotr Kwapulinski if (rier && (vma->vm_flags & VM_MAYEXEC))
775f138556dSPiotr Kwapulinski prot |= PROT_EXEC;
776f138556dSPiotr Kwapulinski
777a8502b67SDave Hansen /*
778a8502b67SDave Hansen * Each mprotect() call explicitly passes r/w/x permissions.
779a8502b67SDave Hansen * If a permission is not passed to mprotect(), it must be
780a8502b67SDave Hansen * cleared from the VMA.
781a8502b67SDave Hansen */
782e39ee675SKefeng Wang mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
783a8502b67SDave Hansen
7847d06d9c9SDave Hansen new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
7857d06d9c9SDave Hansen newflags = calc_vm_prot_bits(prot, new_vma_pkey);
786a8502b67SDave Hansen newflags |= (vma->vm_flags & ~mask_off_old_flags);
7871da177e4SLinus Torvalds
7887e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */
7896cb4d9a2SAnshuman Khandual if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
7901da177e4SLinus Torvalds error = -EACCES;
7914a18419fSNadav Amit break;
7921da177e4SLinus Torvalds }
7931da177e4SLinus Torvalds
794*3a6d8d3fSLorenzo Stoakes if (map_deny_write_exec(vma->vm_flags, newflags)) {
795b507808eSJoey Gouly error = -EACCES;
7963d27a95bSJoey Gouly break;
797b507808eSJoey Gouly }
798b507808eSJoey Gouly
799c462ac28SCatalin Marinas /* Allow architectures to sanity-check the new flags */
800c462ac28SCatalin Marinas if (!arch_validate_flags(newflags)) {
801c462ac28SCatalin Marinas error = -EINVAL;
8024a18419fSNadav Amit break;
803c462ac28SCatalin Marinas }
804c462ac28SCatalin Marinas
8051da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot);
8061da177e4SLinus Torvalds if (error)
8074a18419fSNadav Amit break;
8081da177e4SLinus Torvalds
8091da177e4SLinus Torvalds tmp = vma->vm_end;
8101da177e4SLinus Torvalds if (tmp > end)
8111da177e4SLinus Torvalds tmp = end;
81295bb7c42SSean Christopherson
813dbf53f75STianjia Zhang if (vma->vm_ops && vma->vm_ops->mprotect) {
81495bb7c42SSean Christopherson error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
81595bb7c42SSean Christopherson if (error)
8164a18419fSNadav Amit break;
817dbf53f75STianjia Zhang }
81895bb7c42SSean Christopherson
8192286a691SLiam R. Howlett error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);
8201da177e4SLinus Torvalds if (error)
8214a18419fSNadav Amit break;
82295bb7c42SSean Christopherson
8232fcd07b7SLiam R. Howlett tmp = vma_iter_end(&vmi);
8241da177e4SLinus Torvalds nstart = tmp;
825f138556dSPiotr Kwapulinski prot = reqprot;
8261da177e4SLinus Torvalds }
8274a18419fSNadav Amit tlb_finish_mmu(&tlb);
8282286a691SLiam R. Howlett
82977795f90SLiam R. Howlett if (!error && tmp < end)
8302286a691SLiam R. Howlett error = -ENOMEM;
8312286a691SLiam R. Howlett
8321da177e4SLinus Torvalds out:
833d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm);
8341da177e4SLinus Torvalds return error;
8351da177e4SLinus Torvalds }
8367d06d9c9SDave Hansen
SYSCALL_DEFINE3(mprotect,unsigned long,start,size_t,len,unsigned long,prot)8377d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
8387d06d9c9SDave Hansen unsigned long, prot)
8397d06d9c9SDave Hansen {
8407d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, -1);
8417d06d9c9SDave Hansen }
8427d06d9c9SDave Hansen
843c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS
844c7142aeaSHeiko Carstens
SYSCALL_DEFINE4(pkey_mprotect,unsigned long,start,size_t,len,unsigned long,prot,int,pkey)8457d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
8467d06d9c9SDave Hansen unsigned long, prot, int, pkey)
8477d06d9c9SDave Hansen {
8487d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, pkey);
8497d06d9c9SDave Hansen }
850e8c24d3aSDave Hansen
SYSCALL_DEFINE2(pkey_alloc,unsigned long,flags,unsigned long,init_val)851e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
852e8c24d3aSDave Hansen {
853e8c24d3aSDave Hansen int pkey;
854e8c24d3aSDave Hansen int ret;
855e8c24d3aSDave Hansen
856e8c24d3aSDave Hansen /* No flags supported yet. */
857e8c24d3aSDave Hansen if (flags)
858e8c24d3aSDave Hansen return -EINVAL;
859e8c24d3aSDave Hansen /* check for unsupported init values */
860e8c24d3aSDave Hansen if (init_val & ~PKEY_ACCESS_MASK)
861e8c24d3aSDave Hansen return -EINVAL;
862e8c24d3aSDave Hansen
863d8ed45c5SMichel Lespinasse mmap_write_lock(current->mm);
864e8c24d3aSDave Hansen pkey = mm_pkey_alloc(current->mm);
865e8c24d3aSDave Hansen
866e8c24d3aSDave Hansen ret = -ENOSPC;
867e8c24d3aSDave Hansen if (pkey == -1)
868e8c24d3aSDave Hansen goto out;
869e8c24d3aSDave Hansen
870e8c24d3aSDave Hansen ret = arch_set_user_pkey_access(current, pkey, init_val);
871e8c24d3aSDave Hansen if (ret) {
872e8c24d3aSDave Hansen mm_pkey_free(current->mm, pkey);
873e8c24d3aSDave Hansen goto out;
874e8c24d3aSDave Hansen }
875e8c24d3aSDave Hansen ret = pkey;
876e8c24d3aSDave Hansen out:
877d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm);
878e8c24d3aSDave Hansen return ret;
879e8c24d3aSDave Hansen }
880e8c24d3aSDave Hansen
SYSCALL_DEFINE1(pkey_free,int,pkey)881e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey)
882e8c24d3aSDave Hansen {
883e8c24d3aSDave Hansen int ret;
884e8c24d3aSDave Hansen
885d8ed45c5SMichel Lespinasse mmap_write_lock(current->mm);
886e8c24d3aSDave Hansen ret = mm_pkey_free(current->mm, pkey);
887d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm);
888e8c24d3aSDave Hansen
889e8c24d3aSDave Hansen /*
890f0953a1bSIngo Molnar * We could provide warnings or errors if any VMA still
891e8c24d3aSDave Hansen * has the pkey set here.
892e8c24d3aSDave Hansen */
893e8c24d3aSDave Hansen return ret;
894e8c24d3aSDave Hansen }
895c7142aeaSHeiko Carstens
896c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */
897