xref: /openbmc/linux/include/linux/userfaultfd_k.h (revision fc71884a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  include/linux/userfaultfd_k.h
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  *
7  */
8 
9 #ifndef _LINUX_USERFAULTFD_K_H
10 #define _LINUX_USERFAULTFD_K_H
11 
12 #ifdef CONFIG_USERFAULTFD
13 
14 #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15 
16 #include <linux/fcntl.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <asm-generic/pgtable_uffd.h>
21 #include <linux/hugetlb_inline.h>
22 
23 /* The set of all possible UFFD-related VM flags. */
24 #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
25 
26 /*
27  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
28  * new flags, since they might collide with O_* ones. We want
29  * to re-use O_* flags that couldn't possibly have a meaning
30  * from userfaultfd, in order to leave a free define-space for
31  * shared O_* flags.
32  */
33 #define UFFD_CLOEXEC O_CLOEXEC
34 #define UFFD_NONBLOCK O_NONBLOCK
35 
36 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
37 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
38 
39 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
40 
41 /* A combined operation mode + behavior flags. */
42 typedef unsigned int __bitwise uffd_flags_t;
43 
44 /* Mutually exclusive modes of operation. */
45 enum mfill_atomic_mode {
46 	MFILL_ATOMIC_COPY,
47 	MFILL_ATOMIC_ZEROPAGE,
48 	MFILL_ATOMIC_CONTINUE,
49 	MFILL_ATOMIC_POISON,
50 	NR_MFILL_ATOMIC_MODES,
51 };
52 
53 #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
54 #define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
55 #define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
56 #define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
57 
uffd_flags_mode_is(uffd_flags_t flags,enum mfill_atomic_mode expected)58 static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
59 {
60 	return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
61 }
62 
uffd_flags_set_mode(uffd_flags_t flags,enum mfill_atomic_mode mode)63 static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
64 {
65 	flags &= ~MFILL_ATOMIC_MODE_MASK;
66 	return flags | ((__force uffd_flags_t) mode);
67 }
68 
69 /* Flags controlling behavior. These behavior changes are mode-independent. */
70 #define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
71 
72 extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
73 				    struct vm_area_struct *dst_vma,
74 				    unsigned long dst_addr, struct page *page,
75 				    bool newly_allocated, uffd_flags_t flags);
76 
77 extern ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
78 				 unsigned long src_start, unsigned long len,
79 				 atomic_t *mmap_changing, uffd_flags_t flags);
80 extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
81 				     unsigned long dst_start,
82 				     unsigned long len,
83 				     atomic_t *mmap_changing);
84 extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
85 				     unsigned long len, atomic_t *mmap_changing,
86 				     uffd_flags_t flags);
87 extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
88 				   unsigned long len, atomic_t *mmap_changing,
89 				   uffd_flags_t flags);
90 extern int mwriteprotect_range(struct mm_struct *dst_mm,
91 			       unsigned long start, unsigned long len,
92 			       bool enable_wp, atomic_t *mmap_changing);
93 extern long uffd_wp_range(struct vm_area_struct *vma,
94 			  unsigned long start, unsigned long len, bool enable_wp);
95 
96 /* mm helpers */
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)97 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
98 					struct vm_userfaultfd_ctx vm_ctx)
99 {
100 	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
101 }
102 
103 /*
104  * Never enable huge pmd sharing on some uffd registered vmas:
105  *
106  * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
107  *
108  * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
109  *   VMAs which share huge pmds. (If you have two mappings to the same
110  *   underlying pages, and fault in the non-UFFD-registered one with a write,
111  *   with huge pmd sharing this would *also* setup the second UFFD-registered
112  *   mapping, and we'd not get minor faults.)
113  */
uffd_disable_huge_pmd_share(struct vm_area_struct * vma)114 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
115 {
116 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
117 }
118 
119 /*
120  * Don't do fault around for either WP or MINOR registered uffd range.  For
121  * MINOR registered range, fault around will be a total disaster and ptes can
122  * be installed without notifications; for WP it should mostly be fine as long
123  * as the fault around checks for pte_none() before the installation, however
124  * to be super safe we just forbid it.
125  */
uffd_disable_fault_around(struct vm_area_struct * vma)126 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
127 {
128 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
129 }
130 
userfaultfd_missing(struct vm_area_struct * vma)131 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
132 {
133 	return vma->vm_flags & VM_UFFD_MISSING;
134 }
135 
userfaultfd_wp(struct vm_area_struct * vma)136 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
137 {
138 	return vma->vm_flags & VM_UFFD_WP;
139 }
140 
userfaultfd_minor(struct vm_area_struct * vma)141 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
142 {
143 	return vma->vm_flags & VM_UFFD_MINOR;
144 }
145 
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)146 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
147 				      pte_t pte)
148 {
149 	return userfaultfd_wp(vma) && pte_uffd_wp(pte);
150 }
151 
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)152 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
153 					   pmd_t pmd)
154 {
155 	return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
156 }
157 
userfaultfd_armed(struct vm_area_struct * vma)158 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
159 {
160 	return vma->vm_flags & __VM_UFFD_FLAGS;
161 }
162 
vma_can_userfault(struct vm_area_struct * vma,unsigned long vm_flags)163 static inline bool vma_can_userfault(struct vm_area_struct *vma,
164 				     unsigned long vm_flags)
165 {
166 	if ((vm_flags & VM_UFFD_MINOR) &&
167 	    (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
168 		return false;
169 #ifndef CONFIG_PTE_MARKER_UFFD_WP
170 	/*
171 	 * If user requested uffd-wp but not enabled pte markers for
172 	 * uffd-wp, then shmem & hugetlbfs are not supported but only
173 	 * anonymous.
174 	 */
175 	if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
176 		return false;
177 #endif
178 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
179 	    vma_is_shmem(vma);
180 }
181 
182 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
183 extern void dup_userfaultfd_complete(struct list_head *);
184 
185 extern void mremap_userfaultfd_prep(struct vm_area_struct *,
186 				    struct vm_userfaultfd_ctx *);
187 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
188 					unsigned long from, unsigned long to,
189 					unsigned long len);
190 
191 extern bool userfaultfd_remove(struct vm_area_struct *vma,
192 			       unsigned long start,
193 			       unsigned long end);
194 
195 extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
196 		unsigned long start, unsigned long end, struct list_head *uf);
197 extern void userfaultfd_unmap_complete(struct mm_struct *mm,
198 				       struct list_head *uf);
199 extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
200 
201 #else /* CONFIG_USERFAULTFD */
202 
203 /* mm helpers */
handle_userfault(struct vm_fault * vmf,unsigned long reason)204 static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
205 				unsigned long reason)
206 {
207 	return VM_FAULT_SIGBUS;
208 }
209 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)210 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
211 					struct vm_userfaultfd_ctx vm_ctx)
212 {
213 	return true;
214 }
215 
userfaultfd_missing(struct vm_area_struct * vma)216 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
217 {
218 	return false;
219 }
220 
userfaultfd_wp(struct vm_area_struct * vma)221 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
222 {
223 	return false;
224 }
225 
userfaultfd_minor(struct vm_area_struct * vma)226 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
227 {
228 	return false;
229 }
230 
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)231 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
232 				      pte_t pte)
233 {
234 	return false;
235 }
236 
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)237 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
238 					   pmd_t pmd)
239 {
240 	return false;
241 }
242 
243 
userfaultfd_armed(struct vm_area_struct * vma)244 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
245 {
246 	return false;
247 }
248 
dup_userfaultfd(struct vm_area_struct * vma,struct list_head * l)249 static inline int dup_userfaultfd(struct vm_area_struct *vma,
250 				  struct list_head *l)
251 {
252 	return 0;
253 }
254 
dup_userfaultfd_complete(struct list_head * l)255 static inline void dup_userfaultfd_complete(struct list_head *l)
256 {
257 }
258 
mremap_userfaultfd_prep(struct vm_area_struct * vma,struct vm_userfaultfd_ctx * ctx)259 static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
260 					   struct vm_userfaultfd_ctx *ctx)
261 {
262 }
263 
mremap_userfaultfd_complete(struct vm_userfaultfd_ctx * ctx,unsigned long from,unsigned long to,unsigned long len)264 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
265 					       unsigned long from,
266 					       unsigned long to,
267 					       unsigned long len)
268 {
269 }
270 
userfaultfd_remove(struct vm_area_struct * vma,unsigned long start,unsigned long end)271 static inline bool userfaultfd_remove(struct vm_area_struct *vma,
272 				      unsigned long start,
273 				      unsigned long end)
274 {
275 	return true;
276 }
277 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf)278 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
279 					 unsigned long start, unsigned long end,
280 					 struct list_head *uf)
281 {
282 	return 0;
283 }
284 
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)285 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
286 					      struct list_head *uf)
287 {
288 }
289 
uffd_disable_fault_around(struct vm_area_struct * vma)290 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
291 {
292 	return false;
293 }
294 
userfaultfd_wp_unpopulated(struct vm_area_struct * vma)295 static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
296 {
297 	return false;
298 }
299 
300 #endif /* CONFIG_USERFAULTFD */
301 
userfaultfd_wp_use_markers(struct vm_area_struct * vma)302 static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
303 {
304 	/* Only wr-protect mode uses pte markers */
305 	if (!userfaultfd_wp(vma))
306 		return false;
307 
308 	/* File-based uffd-wp always need markers */
309 	if (!vma_is_anonymous(vma))
310 		return true;
311 
312 	/*
313 	 * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
314 	 * enabled (to apply markers on zero pages).
315 	 */
316 	return userfaultfd_wp_unpopulated(vma);
317 }
318 
pte_marker_entry_uffd_wp(swp_entry_t entry)319 static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
320 {
321 #ifdef CONFIG_PTE_MARKER_UFFD_WP
322 	return is_pte_marker_entry(entry) &&
323 	    (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
324 #else
325 	return false;
326 #endif
327 }
328 
pte_marker_uffd_wp(pte_t pte)329 static inline bool pte_marker_uffd_wp(pte_t pte)
330 {
331 #ifdef CONFIG_PTE_MARKER_UFFD_WP
332 	swp_entry_t entry;
333 
334 	if (!is_swap_pte(pte))
335 		return false;
336 
337 	entry = pte_to_swp_entry(pte);
338 
339 	return pte_marker_entry_uffd_wp(entry);
340 #else
341 	return false;
342 #endif
343 }
344 
345 /*
346  * Returns true if this is a swap pte and was uffd-wp wr-protected in either
347  * forms (pte marker or a normal swap pte), false otherwise.
348  */
pte_swp_uffd_wp_any(pte_t pte)349 static inline bool pte_swp_uffd_wp_any(pte_t pte)
350 {
351 #ifdef CONFIG_PTE_MARKER_UFFD_WP
352 	if (!is_swap_pte(pte))
353 		return false;
354 
355 	if (pte_swp_uffd_wp(pte))
356 		return true;
357 
358 	if (pte_marker_uffd_wp(pte))
359 		return true;
360 #endif
361 	return false;
362 }
363 
364 #endif /* _LINUX_USERFAULTFD_K_H */
365