xref: /openbmc/linux/include/linux/userfaultfd_k.h (revision fc71884a)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2932b18e0SAndrea Arcangeli /*
3932b18e0SAndrea Arcangeli  *  include/linux/userfaultfd_k.h
4932b18e0SAndrea Arcangeli  *
5932b18e0SAndrea Arcangeli  *  Copyright (C) 2015  Red Hat, Inc.
6932b18e0SAndrea Arcangeli  *
7932b18e0SAndrea Arcangeli  */
8932b18e0SAndrea Arcangeli 
9932b18e0SAndrea Arcangeli #ifndef _LINUX_USERFAULTFD_K_H
10932b18e0SAndrea Arcangeli #define _LINUX_USERFAULTFD_K_H
11932b18e0SAndrea Arcangeli 
12932b18e0SAndrea Arcangeli #ifdef CONFIG_USERFAULTFD
13932b18e0SAndrea Arcangeli 
14932b18e0SAndrea Arcangeli #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15932b18e0SAndrea Arcangeli 
16932b18e0SAndrea Arcangeli #include <linux/fcntl.h>
1755adf4deSAndrea Arcangeli #include <linux/mm.h>
181db9dbc2SPeter Xu #include <linux/swap.h>
191db9dbc2SPeter Xu #include <linux/swapops.h>
2055adf4deSAndrea Arcangeli #include <asm-generic/pgtable_uffd.h>
21b1f9e876SPeter Xu #include <linux/hugetlb_inline.h>
22932b18e0SAndrea Arcangeli 
237677f7fdSAxel Rasmussen /* The set of all possible UFFD-related VM flags. */
247677f7fdSAxel Rasmussen #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
257677f7fdSAxel Rasmussen 
26932b18e0SAndrea Arcangeli /*
27932b18e0SAndrea Arcangeli  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
28932b18e0SAndrea Arcangeli  * new flags, since they might collide with O_* ones. We want
29932b18e0SAndrea Arcangeli  * to re-use O_* flags that couldn't possibly have a meaning
30932b18e0SAndrea Arcangeli  * from userfaultfd, in order to leave a free define-space for
31932b18e0SAndrea Arcangeli  * shared O_* flags.
32932b18e0SAndrea Arcangeli  */
33932b18e0SAndrea Arcangeli #define UFFD_CLOEXEC O_CLOEXEC
34932b18e0SAndrea Arcangeli #define UFFD_NONBLOCK O_NONBLOCK
35932b18e0SAndrea Arcangeli 
36932b18e0SAndrea Arcangeli #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
37932b18e0SAndrea Arcangeli #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
38932b18e0SAndrea Arcangeli 
392b740303SSouptick Joarder extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
40932b18e0SAndrea Arcangeli 
41d9712937SAxel Rasmussen /* A combined operation mode + behavior flags. */
42d9712937SAxel Rasmussen typedef unsigned int __bitwise uffd_flags_t;
43d9712937SAxel Rasmussen 
44d9712937SAxel Rasmussen /* Mutually exclusive modes of operation. */
45d9712937SAxel Rasmussen enum mfill_atomic_mode {
46d9712937SAxel Rasmussen 	MFILL_ATOMIC_COPY,
47d9712937SAxel Rasmussen 	MFILL_ATOMIC_ZEROPAGE,
48d9712937SAxel Rasmussen 	MFILL_ATOMIC_CONTINUE,
49*fc71884aSAxel Rasmussen 	MFILL_ATOMIC_POISON,
50d9712937SAxel Rasmussen 	NR_MFILL_ATOMIC_MODES,
51f6191471SAxel Rasmussen };
52f6191471SAxel Rasmussen 
53d9712937SAxel Rasmussen #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
54d9712937SAxel Rasmussen #define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
55d9712937SAxel Rasmussen #define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
56d9712937SAxel Rasmussen #define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
57d9712937SAxel Rasmussen 
uffd_flags_mode_is(uffd_flags_t flags,enum mfill_atomic_mode expected)58d9712937SAxel Rasmussen static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
59d9712937SAxel Rasmussen {
60d9712937SAxel Rasmussen 	return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
61d9712937SAxel Rasmussen }
62d9712937SAxel Rasmussen 
uffd_flags_set_mode(uffd_flags_t flags,enum mfill_atomic_mode mode)63d9712937SAxel Rasmussen static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
64d9712937SAxel Rasmussen {
65d9712937SAxel Rasmussen 	flags &= ~MFILL_ATOMIC_MODE_MASK;
66d9712937SAxel Rasmussen 	return flags | ((__force uffd_flags_t) mode);
67d9712937SAxel Rasmussen }
68d9712937SAxel Rasmussen 
69d9712937SAxel Rasmussen /* Flags controlling behavior. These behavior changes are mode-independent. */
70d9712937SAxel Rasmussen #define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
71d9712937SAxel Rasmussen 
7261c50040SAxel Rasmussen extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
737d64ae3aSAxel Rasmussen 				    struct vm_area_struct *dst_vma,
747d64ae3aSAxel Rasmussen 				    unsigned long dst_addr, struct page *page,
75d9712937SAxel Rasmussen 				    bool newly_allocated, uffd_flags_t flags);
767d64ae3aSAxel Rasmussen 
77a734991cSAxel Rasmussen extern ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
78df2cc96eSMike Rapoport 				 unsigned long src_start, unsigned long len,
79d9712937SAxel Rasmussen 				 atomic_t *mmap_changing, uffd_flags_t flags);
80a734991cSAxel Rasmussen extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
81c1a4de99SAndrea Arcangeli 				     unsigned long dst_start,
82df2cc96eSMike Rapoport 				     unsigned long len,
83a759a909SNadav Amit 				     atomic_t *mmap_changing);
84a734991cSAxel Rasmussen extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
8502891844SAxel Rasmussen 				     unsigned long len, atomic_t *mmap_changing,
8602891844SAxel Rasmussen 				     uffd_flags_t flags);
87*fc71884aSAxel Rasmussen extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
88*fc71884aSAxel Rasmussen 				   unsigned long len, atomic_t *mmap_changing,
89*fc71884aSAxel Rasmussen 				   uffd_flags_t flags);
90ffd05793SShaohua Li extern int mwriteprotect_range(struct mm_struct *dst_mm,
91ffd05793SShaohua Li 			       unsigned long start, unsigned long len,
92a759a909SNadav Amit 			       bool enable_wp, atomic_t *mmap_changing);
9361c50040SAxel Rasmussen extern long uffd_wp_range(struct vm_area_struct *vma,
94f369b07cSPeter Xu 			  unsigned long start, unsigned long len, bool enable_wp);
95c1a4de99SAndrea Arcangeli 
96932b18e0SAndrea Arcangeli /* mm helpers */
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)97932b18e0SAndrea Arcangeli static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
98932b18e0SAndrea Arcangeli 					struct vm_userfaultfd_ctx vm_ctx)
99932b18e0SAndrea Arcangeli {
100932b18e0SAndrea Arcangeli 	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
101932b18e0SAndrea Arcangeli }
102932b18e0SAndrea Arcangeli 
103c1991e07SPeter Xu /*
1040d9cadabSAxel Rasmussen  * Never enable huge pmd sharing on some uffd registered vmas:
1050d9cadabSAxel Rasmussen  *
1060d9cadabSAxel Rasmussen  * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
1070d9cadabSAxel Rasmussen  *
1080d9cadabSAxel Rasmussen  * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
1090d9cadabSAxel Rasmussen  *   VMAs which share huge pmds. (If you have two mappings to the same
1100d9cadabSAxel Rasmussen  *   underlying pages, and fault in the non-UFFD-registered one with a write,
1110d9cadabSAxel Rasmussen  *   with huge pmd sharing this would *also* setup the second UFFD-registered
1120d9cadabSAxel Rasmussen  *   mapping, and we'd not get minor faults.)
113c1991e07SPeter Xu  */
uffd_disable_huge_pmd_share(struct vm_area_struct * vma)114c1991e07SPeter Xu static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
115c1991e07SPeter Xu {
1160d9cadabSAxel Rasmussen 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
117c1991e07SPeter Xu }
118c1991e07SPeter Xu 
1199c28a205SPeter Xu /*
1209c28a205SPeter Xu  * Don't do fault around for either WP or MINOR registered uffd range.  For
1219c28a205SPeter Xu  * MINOR registered range, fault around will be a total disaster and ptes can
1229c28a205SPeter Xu  * be installed without notifications; for WP it should mostly be fine as long
1239c28a205SPeter Xu  * as the fault around checks for pte_none() before the installation, however
1249c28a205SPeter Xu  * to be super safe we just forbid it.
1259c28a205SPeter Xu  */
uffd_disable_fault_around(struct vm_area_struct * vma)1269c28a205SPeter Xu static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
1279c28a205SPeter Xu {
1289c28a205SPeter Xu 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
1299c28a205SPeter Xu }
1309c28a205SPeter Xu 
userfaultfd_missing(struct vm_area_struct * vma)131932b18e0SAndrea Arcangeli static inline bool userfaultfd_missing(struct vm_area_struct *vma)
132932b18e0SAndrea Arcangeli {
133932b18e0SAndrea Arcangeli 	return vma->vm_flags & VM_UFFD_MISSING;
134932b18e0SAndrea Arcangeli }
135932b18e0SAndrea Arcangeli 
userfaultfd_wp(struct vm_area_struct * vma)1361df319e0SShaohua Li static inline bool userfaultfd_wp(struct vm_area_struct *vma)
1371df319e0SShaohua Li {
1381df319e0SShaohua Li 	return vma->vm_flags & VM_UFFD_WP;
1391df319e0SShaohua Li }
1401df319e0SShaohua Li 
userfaultfd_minor(struct vm_area_struct * vma)1417677f7fdSAxel Rasmussen static inline bool userfaultfd_minor(struct vm_area_struct *vma)
1427677f7fdSAxel Rasmussen {
1437677f7fdSAxel Rasmussen 	return vma->vm_flags & VM_UFFD_MINOR;
1447677f7fdSAxel Rasmussen }
1457677f7fdSAxel Rasmussen 
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)14655adf4deSAndrea Arcangeli static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
14755adf4deSAndrea Arcangeli 				      pte_t pte)
14855adf4deSAndrea Arcangeli {
14955adf4deSAndrea Arcangeli 	return userfaultfd_wp(vma) && pte_uffd_wp(pte);
15055adf4deSAndrea Arcangeli }
15155adf4deSAndrea Arcangeli 
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)15255adf4deSAndrea Arcangeli static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
15355adf4deSAndrea Arcangeli 					   pmd_t pmd)
15455adf4deSAndrea Arcangeli {
15555adf4deSAndrea Arcangeli 	return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
15655adf4deSAndrea Arcangeli }
15755adf4deSAndrea Arcangeli 
userfaultfd_armed(struct vm_area_struct * vma)158932b18e0SAndrea Arcangeli static inline bool userfaultfd_armed(struct vm_area_struct *vma)
159932b18e0SAndrea Arcangeli {
1607677f7fdSAxel Rasmussen 	return vma->vm_flags & __VM_UFFD_FLAGS;
161932b18e0SAndrea Arcangeli }
162932b18e0SAndrea Arcangeli 
vma_can_userfault(struct vm_area_struct * vma,unsigned long vm_flags)163b1f9e876SPeter Xu static inline bool vma_can_userfault(struct vm_area_struct *vma,
164b1f9e876SPeter Xu 				     unsigned long vm_flags)
165b1f9e876SPeter Xu {
16667eae54bSPeter Xu 	if ((vm_flags & VM_UFFD_MINOR) &&
16767eae54bSPeter Xu 	    (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
16867eae54bSPeter Xu 		return false;
169b1f9e876SPeter Xu #ifndef CONFIG_PTE_MARKER_UFFD_WP
170b1f9e876SPeter Xu 	/*
171b1f9e876SPeter Xu 	 * If user requested uffd-wp but not enabled pte markers for
172b1f9e876SPeter Xu 	 * uffd-wp, then shmem & hugetlbfs are not supported but only
173b1f9e876SPeter Xu 	 * anonymous.
174b1f9e876SPeter Xu 	 */
175b1f9e876SPeter Xu 	if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
176b1f9e876SPeter Xu 		return false;
177b1f9e876SPeter Xu #endif
178b1f9e876SPeter Xu 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
179b1f9e876SPeter Xu 	    vma_is_shmem(vma);
180b1f9e876SPeter Xu }
181b1f9e876SPeter Xu 
182893e26e6SPavel Emelyanov extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
183893e26e6SPavel Emelyanov extern void dup_userfaultfd_complete(struct list_head *);
184893e26e6SPavel Emelyanov 
18572f87654SPavel Emelyanov extern void mremap_userfaultfd_prep(struct vm_area_struct *,
18672f87654SPavel Emelyanov 				    struct vm_userfaultfd_ctx *);
18790794bf1SAndrea Arcangeli extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
18872f87654SPavel Emelyanov 					unsigned long from, unsigned long to,
18972f87654SPavel Emelyanov 					unsigned long len);
19072f87654SPavel Emelyanov 
19170ccb92fSAndrea Arcangeli extern bool userfaultfd_remove(struct vm_area_struct *vma,
19205ce7724SPavel Emelyanov 			       unsigned long start,
19305ce7724SPavel Emelyanov 			       unsigned long end);
19405ce7724SPavel Emelyanov 
19565ac1320SLiam R. Howlett extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
19665ac1320SLiam R. Howlett 		unsigned long start, unsigned long end, struct list_head *uf);
197897ab3e0SMike Rapoport extern void userfaultfd_unmap_complete(struct mm_struct *mm,
198897ab3e0SMike Rapoport 				       struct list_head *uf);
1992bad466cSPeter Xu extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
200897ab3e0SMike Rapoport 
201932b18e0SAndrea Arcangeli #else /* CONFIG_USERFAULTFD */
202932b18e0SAndrea Arcangeli 
203932b18e0SAndrea Arcangeli /* mm helpers */
handle_userfault(struct vm_fault * vmf,unsigned long reason)2042b740303SSouptick Joarder static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
2052b740303SSouptick Joarder 				unsigned long reason)
206932b18e0SAndrea Arcangeli {
207932b18e0SAndrea Arcangeli 	return VM_FAULT_SIGBUS;
208932b18e0SAndrea Arcangeli }
209932b18e0SAndrea Arcangeli 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)210932b18e0SAndrea Arcangeli static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
211932b18e0SAndrea Arcangeli 					struct vm_userfaultfd_ctx vm_ctx)
212932b18e0SAndrea Arcangeli {
213932b18e0SAndrea Arcangeli 	return true;
214932b18e0SAndrea Arcangeli }
215932b18e0SAndrea Arcangeli 
userfaultfd_missing(struct vm_area_struct * vma)216932b18e0SAndrea Arcangeli static inline bool userfaultfd_missing(struct vm_area_struct *vma)
217932b18e0SAndrea Arcangeli {
218932b18e0SAndrea Arcangeli 	return false;
219932b18e0SAndrea Arcangeli }
220932b18e0SAndrea Arcangeli 
userfaultfd_wp(struct vm_area_struct * vma)2211df319e0SShaohua Li static inline bool userfaultfd_wp(struct vm_area_struct *vma)
2221df319e0SShaohua Li {
2231df319e0SShaohua Li 	return false;
2241df319e0SShaohua Li }
2251df319e0SShaohua Li 
userfaultfd_minor(struct vm_area_struct * vma)2267677f7fdSAxel Rasmussen static inline bool userfaultfd_minor(struct vm_area_struct *vma)
2277677f7fdSAxel Rasmussen {
2287677f7fdSAxel Rasmussen 	return false;
2297677f7fdSAxel Rasmussen }
2307677f7fdSAxel Rasmussen 
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)23155adf4deSAndrea Arcangeli static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
23255adf4deSAndrea Arcangeli 				      pte_t pte)
23355adf4deSAndrea Arcangeli {
23455adf4deSAndrea Arcangeli 	return false;
23555adf4deSAndrea Arcangeli }
23655adf4deSAndrea Arcangeli 
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)23755adf4deSAndrea Arcangeli static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
23855adf4deSAndrea Arcangeli 					   pmd_t pmd)
23955adf4deSAndrea Arcangeli {
24055adf4deSAndrea Arcangeli 	return false;
24155adf4deSAndrea Arcangeli }
24255adf4deSAndrea Arcangeli 
24355adf4deSAndrea Arcangeli 
userfaultfd_armed(struct vm_area_struct * vma)244932b18e0SAndrea Arcangeli static inline bool userfaultfd_armed(struct vm_area_struct *vma)
245932b18e0SAndrea Arcangeli {
246932b18e0SAndrea Arcangeli 	return false;
247932b18e0SAndrea Arcangeli }
248932b18e0SAndrea Arcangeli 
dup_userfaultfd(struct vm_area_struct * vma,struct list_head * l)249893e26e6SPavel Emelyanov static inline int dup_userfaultfd(struct vm_area_struct *vma,
250893e26e6SPavel Emelyanov 				  struct list_head *l)
251893e26e6SPavel Emelyanov {
252893e26e6SPavel Emelyanov 	return 0;
253893e26e6SPavel Emelyanov }
254893e26e6SPavel Emelyanov 
dup_userfaultfd_complete(struct list_head * l)255893e26e6SPavel Emelyanov static inline void dup_userfaultfd_complete(struct list_head *l)
256893e26e6SPavel Emelyanov {
257893e26e6SPavel Emelyanov }
258893e26e6SPavel Emelyanov 
mremap_userfaultfd_prep(struct vm_area_struct * vma,struct vm_userfaultfd_ctx * ctx)25972f87654SPavel Emelyanov static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
26072f87654SPavel Emelyanov 					   struct vm_userfaultfd_ctx *ctx)
26172f87654SPavel Emelyanov {
26272f87654SPavel Emelyanov }
26372f87654SPavel Emelyanov 
mremap_userfaultfd_complete(struct vm_userfaultfd_ctx * ctx,unsigned long from,unsigned long to,unsigned long len)26490794bf1SAndrea Arcangeli static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
26572f87654SPavel Emelyanov 					       unsigned long from,
26672f87654SPavel Emelyanov 					       unsigned long to,
26772f87654SPavel Emelyanov 					       unsigned long len)
26872f87654SPavel Emelyanov {
26972f87654SPavel Emelyanov }
27005ce7724SPavel Emelyanov 
userfaultfd_remove(struct vm_area_struct * vma,unsigned long start,unsigned long end)27170ccb92fSAndrea Arcangeli static inline bool userfaultfd_remove(struct vm_area_struct *vma,
27205ce7724SPavel Emelyanov 				      unsigned long start,
27305ce7724SPavel Emelyanov 				      unsigned long end)
27405ce7724SPavel Emelyanov {
27570ccb92fSAndrea Arcangeli 	return true;
27605ce7724SPavel Emelyanov }
277897ab3e0SMike Rapoport 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf)27865ac1320SLiam R. Howlett static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
279897ab3e0SMike Rapoport 					 unsigned long start, unsigned long end,
280897ab3e0SMike Rapoport 					 struct list_head *uf)
281897ab3e0SMike Rapoport {
282897ab3e0SMike Rapoport 	return 0;
283897ab3e0SMike Rapoport }
284897ab3e0SMike Rapoport 
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)285897ab3e0SMike Rapoport static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
286897ab3e0SMike Rapoport 					      struct list_head *uf)
287897ab3e0SMike Rapoport {
288897ab3e0SMike Rapoport }
289ca49ca71SMike Rapoport 
uffd_disable_fault_around(struct vm_area_struct * vma)2909c28a205SPeter Xu static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
2919c28a205SPeter Xu {
2929c28a205SPeter Xu 	return false;
2939c28a205SPeter Xu }
2949c28a205SPeter Xu 
userfaultfd_wp_unpopulated(struct vm_area_struct * vma)2952bad466cSPeter Xu static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
2962bad466cSPeter Xu {
2972bad466cSPeter Xu 	return false;
2982bad466cSPeter Xu }
2992bad466cSPeter Xu 
300932b18e0SAndrea Arcangeli #endif /* CONFIG_USERFAULTFD */
301932b18e0SAndrea Arcangeli 
userfaultfd_wp_use_markers(struct vm_area_struct * vma)3022bad466cSPeter Xu static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
3032bad466cSPeter Xu {
3042bad466cSPeter Xu 	/* Only wr-protect mode uses pte markers */
3052bad466cSPeter Xu 	if (!userfaultfd_wp(vma))
3062bad466cSPeter Xu 		return false;
3072bad466cSPeter Xu 
3082bad466cSPeter Xu 	/* File-based uffd-wp always need markers */
3092bad466cSPeter Xu 	if (!vma_is_anonymous(vma))
3102bad466cSPeter Xu 		return true;
3112bad466cSPeter Xu 
3122bad466cSPeter Xu 	/*
3132bad466cSPeter Xu 	 * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
3142bad466cSPeter Xu 	 * enabled (to apply markers on zero pages).
3152bad466cSPeter Xu 	 */
3162bad466cSPeter Xu 	return userfaultfd_wp_unpopulated(vma);
3172bad466cSPeter Xu }
3182bad466cSPeter Xu 
pte_marker_entry_uffd_wp(swp_entry_t entry)3191db9dbc2SPeter Xu static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
3201db9dbc2SPeter Xu {
3211db9dbc2SPeter Xu #ifdef CONFIG_PTE_MARKER_UFFD_WP
3221db9dbc2SPeter Xu 	return is_pte_marker_entry(entry) &&
3231db9dbc2SPeter Xu 	    (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
3241db9dbc2SPeter Xu #else
3251db9dbc2SPeter Xu 	return false;
3261db9dbc2SPeter Xu #endif
3271db9dbc2SPeter Xu }
3281db9dbc2SPeter Xu 
pte_marker_uffd_wp(pte_t pte)3291db9dbc2SPeter Xu static inline bool pte_marker_uffd_wp(pte_t pte)
3301db9dbc2SPeter Xu {
3311db9dbc2SPeter Xu #ifdef CONFIG_PTE_MARKER_UFFD_WP
3321db9dbc2SPeter Xu 	swp_entry_t entry;
3331db9dbc2SPeter Xu 
3341db9dbc2SPeter Xu 	if (!is_swap_pte(pte))
3351db9dbc2SPeter Xu 		return false;
3361db9dbc2SPeter Xu 
3371db9dbc2SPeter Xu 	entry = pte_to_swp_entry(pte);
3381db9dbc2SPeter Xu 
3391db9dbc2SPeter Xu 	return pte_marker_entry_uffd_wp(entry);
3401db9dbc2SPeter Xu #else
3411db9dbc2SPeter Xu 	return false;
3421db9dbc2SPeter Xu #endif
3431db9dbc2SPeter Xu }
3441db9dbc2SPeter Xu 
3451db9dbc2SPeter Xu /*
3461db9dbc2SPeter Xu  * Returns true if this is a swap pte and was uffd-wp wr-protected in either
3471db9dbc2SPeter Xu  * forms (pte marker or a normal swap pte), false otherwise.
3481db9dbc2SPeter Xu  */
pte_swp_uffd_wp_any(pte_t pte)3491db9dbc2SPeter Xu static inline bool pte_swp_uffd_wp_any(pte_t pte)
3501db9dbc2SPeter Xu {
3511db9dbc2SPeter Xu #ifdef CONFIG_PTE_MARKER_UFFD_WP
3521db9dbc2SPeter Xu 	if (!is_swap_pte(pte))
3531db9dbc2SPeter Xu 		return false;
3541db9dbc2SPeter Xu 
3551db9dbc2SPeter Xu 	if (pte_swp_uffd_wp(pte))
3561db9dbc2SPeter Xu 		return true;
3571db9dbc2SPeter Xu 
3581db9dbc2SPeter Xu 	if (pte_marker_uffd_wp(pte))
3591db9dbc2SPeter Xu 		return true;
3601db9dbc2SPeter Xu #endif
3611db9dbc2SPeter Xu 	return false;
3621db9dbc2SPeter Xu }
3631db9dbc2SPeter Xu 
364932b18e0SAndrea Arcangeli #endif /* _LINUX_USERFAULTFD_K_H */
365