xref: /openbmc/linux/include/linux/userfaultfd_k.h (revision c494a447)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  include/linux/userfaultfd_k.h
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  *
7  */
8 
9 #ifndef _LINUX_USERFAULTFD_K_H
10 #define _LINUX_USERFAULTFD_K_H
11 
12 #ifdef CONFIG_USERFAULTFD
13 
14 #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15 
16 #include <linux/fcntl.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <asm-generic/pgtable_uffd.h>
21 #include <linux/hugetlb_inline.h>
22 
23 /* The set of all possible UFFD-related VM flags. */
24 #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
25 
26 /*
27  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
28  * new flags, since they might collide with O_* ones. We want
29  * to re-use O_* flags that couldn't possibly have a meaning
30  * from userfaultfd, in order to leave a free define-space for
31  * shared O_* flags.
32  */
33 #define UFFD_CLOEXEC O_CLOEXEC
34 #define UFFD_NONBLOCK O_NONBLOCK
35 
36 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
37 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
38 
39 extern int sysctl_unprivileged_userfaultfd;
40 
41 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
42 
43 /*
44  * The mode of operation for __mcopy_atomic and its helpers.
45  *
46  * This is almost an implementation detail (mcopy_atomic below doesn't take this
47  * as a parameter), but it's exposed here because memory-kind-specific
48  * implementations (e.g. hugetlbfs) need to know the mode of operation.
49  */
50 enum mcopy_atomic_mode {
51 	/* A normal copy_from_user into the destination range. */
52 	MCOPY_ATOMIC_NORMAL,
53 	/* Don't copy; map the destination range to the zero page. */
54 	MCOPY_ATOMIC_ZEROPAGE,
55 	/* Just install pte(s) with the existing page(s) in the page cache. */
56 	MCOPY_ATOMIC_CONTINUE,
57 };
58 
59 extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
60 				    struct vm_area_struct *dst_vma,
61 				    unsigned long dst_addr, struct page *page,
62 				    bool newly_allocated, bool wp_copy);
63 
64 extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
65 			    unsigned long src_start, unsigned long len,
66 			    atomic_t *mmap_changing, __u64 mode);
67 extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
68 			      unsigned long dst_start,
69 			      unsigned long len,
70 			      atomic_t *mmap_changing);
71 extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
72 			      unsigned long len, atomic_t *mmap_changing);
73 extern int mwriteprotect_range(struct mm_struct *dst_mm,
74 			       unsigned long start, unsigned long len,
75 			       bool enable_wp, atomic_t *mmap_changing);
76 
77 /* mm helpers */
78 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
79 					struct vm_userfaultfd_ctx vm_ctx)
80 {
81 	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
82 }
83 
84 /*
85  * Never enable huge pmd sharing on some uffd registered vmas:
86  *
87  * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
88  *
89  * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
90  *   VMAs which share huge pmds. (If you have two mappings to the same
91  *   underlying pages, and fault in the non-UFFD-registered one with a write,
92  *   with huge pmd sharing this would *also* setup the second UFFD-registered
93  *   mapping, and we'd not get minor faults.)
94  */
95 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
96 {
97 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
98 }
99 
100 /*
101  * Don't do fault around for either WP or MINOR registered uffd range.  For
102  * MINOR registered range, fault around will be a total disaster and ptes can
103  * be installed without notifications; for WP it should mostly be fine as long
104  * as the fault around checks for pte_none() before the installation, however
105  * to be super safe we just forbid it.
106  */
107 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
108 {
109 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
110 }
111 
112 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
113 {
114 	return vma->vm_flags & VM_UFFD_MISSING;
115 }
116 
117 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
118 {
119 	return vma->vm_flags & VM_UFFD_WP;
120 }
121 
122 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
123 {
124 	return vma->vm_flags & VM_UFFD_MINOR;
125 }
126 
127 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
128 				      pte_t pte)
129 {
130 	return userfaultfd_wp(vma) && pte_uffd_wp(pte);
131 }
132 
133 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
134 					   pmd_t pmd)
135 {
136 	return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
137 }
138 
139 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
140 {
141 	return vma->vm_flags & __VM_UFFD_FLAGS;
142 }
143 
144 static inline bool vma_can_userfault(struct vm_area_struct *vma,
145 				     unsigned long vm_flags)
146 {
147 	if (vm_flags & VM_UFFD_MINOR)
148 		return is_vm_hugetlb_page(vma) || vma_is_shmem(vma);
149 
150 #ifndef CONFIG_PTE_MARKER_UFFD_WP
151 	/*
152 	 * If user requested uffd-wp but not enabled pte markers for
153 	 * uffd-wp, then shmem & hugetlbfs are not supported but only
154 	 * anonymous.
155 	 */
156 	if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
157 		return false;
158 #endif
159 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
160 	    vma_is_shmem(vma);
161 }
162 
163 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
164 extern void dup_userfaultfd_complete(struct list_head *);
165 
166 extern void mremap_userfaultfd_prep(struct vm_area_struct *,
167 				    struct vm_userfaultfd_ctx *);
168 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
169 					unsigned long from, unsigned long to,
170 					unsigned long len);
171 
172 extern bool userfaultfd_remove(struct vm_area_struct *vma,
173 			       unsigned long start,
174 			       unsigned long end);
175 
176 extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
177 				  unsigned long start, unsigned long end,
178 				  struct list_head *uf);
179 extern void userfaultfd_unmap_complete(struct mm_struct *mm,
180 				       struct list_head *uf);
181 
182 #else /* CONFIG_USERFAULTFD */
183 
184 /* mm helpers */
185 static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
186 				unsigned long reason)
187 {
188 	return VM_FAULT_SIGBUS;
189 }
190 
191 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
192 					struct vm_userfaultfd_ctx vm_ctx)
193 {
194 	return true;
195 }
196 
197 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
198 {
199 	return false;
200 }
201 
202 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
203 {
204 	return false;
205 }
206 
207 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
208 {
209 	return false;
210 }
211 
212 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
213 				      pte_t pte)
214 {
215 	return false;
216 }
217 
218 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
219 					   pmd_t pmd)
220 {
221 	return false;
222 }
223 
224 
225 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
226 {
227 	return false;
228 }
229 
230 static inline int dup_userfaultfd(struct vm_area_struct *vma,
231 				  struct list_head *l)
232 {
233 	return 0;
234 }
235 
236 static inline void dup_userfaultfd_complete(struct list_head *l)
237 {
238 }
239 
240 static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
241 					   struct vm_userfaultfd_ctx *ctx)
242 {
243 }
244 
245 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
246 					       unsigned long from,
247 					       unsigned long to,
248 					       unsigned long len)
249 {
250 }
251 
252 static inline bool userfaultfd_remove(struct vm_area_struct *vma,
253 				      unsigned long start,
254 				      unsigned long end)
255 {
256 	return true;
257 }
258 
259 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
260 					 unsigned long start, unsigned long end,
261 					 struct list_head *uf)
262 {
263 	return 0;
264 }
265 
266 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
267 					      struct list_head *uf)
268 {
269 }
270 
271 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
272 {
273 	return false;
274 }
275 
276 #endif /* CONFIG_USERFAULTFD */
277 
278 static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
279 {
280 #ifdef CONFIG_PTE_MARKER_UFFD_WP
281 	return is_pte_marker_entry(entry) &&
282 	    (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
283 #else
284 	return false;
285 #endif
286 }
287 
288 static inline bool pte_marker_uffd_wp(pte_t pte)
289 {
290 #ifdef CONFIG_PTE_MARKER_UFFD_WP
291 	swp_entry_t entry;
292 
293 	if (!is_swap_pte(pte))
294 		return false;
295 
296 	entry = pte_to_swp_entry(pte);
297 
298 	return pte_marker_entry_uffd_wp(entry);
299 #else
300 	return false;
301 #endif
302 }
303 
304 /*
305  * Returns true if this is a swap pte and was uffd-wp wr-protected in either
306  * forms (pte marker or a normal swap pte), false otherwise.
307  */
308 static inline bool pte_swp_uffd_wp_any(pte_t pte)
309 {
310 #ifdef CONFIG_PTE_MARKER_UFFD_WP
311 	if (!is_swap_pte(pte))
312 		return false;
313 
314 	if (pte_swp_uffd_wp(pte))
315 		return true;
316 
317 	if (pte_marker_uffd_wp(pte))
318 		return true;
319 #endif
320 	return false;
321 }
322 
323 #endif /* _LINUX_USERFAULTFD_K_H */
324