xref: /openbmc/linux/include/linux/userfaultfd_k.h (revision 83946783)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  include/linux/userfaultfd_k.h
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  *
7  */
8 
9 #ifndef _LINUX_USERFAULTFD_K_H
10 #define _LINUX_USERFAULTFD_K_H
11 
12 #ifdef CONFIG_USERFAULTFD
13 
14 #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15 
16 #include <linux/fcntl.h>
17 #include <linux/mm.h>
18 #include <asm-generic/pgtable_uffd.h>
19 
20 /* The set of all possible UFFD-related VM flags. */
21 #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
22 
23 /*
24  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
25  * new flags, since they might collide with O_* ones. We want
26  * to re-use O_* flags that couldn't possibly have a meaning
27  * from userfaultfd, in order to leave a free define-space for
28  * shared O_* flags.
29  */
30 #define UFFD_CLOEXEC O_CLOEXEC
31 #define UFFD_NONBLOCK O_NONBLOCK
32 
33 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
34 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
35 
36 extern int sysctl_unprivileged_userfaultfd;
37 
38 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
39 
40 /*
41  * The mode of operation for __mcopy_atomic and its helpers.
42  *
43  * This is almost an implementation detail (mcopy_atomic below doesn't take this
44  * as a parameter), but it's exposed here because memory-kind-specific
45  * implementations (e.g. hugetlbfs) need to know the mode of operation.
46  */
47 enum mcopy_atomic_mode {
48 	/* A normal copy_from_user into the destination range. */
49 	MCOPY_ATOMIC_NORMAL,
50 	/* Don't copy; map the destination range to the zero page. */
51 	MCOPY_ATOMIC_ZEROPAGE,
52 	/* Just install pte(s) with the existing page(s) in the page cache. */
53 	MCOPY_ATOMIC_CONTINUE,
54 };
55 
56 extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
57 				    struct vm_area_struct *dst_vma,
58 				    unsigned long dst_addr, struct page *page,
59 				    bool newly_allocated, bool wp_copy);
60 
61 extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
62 			    unsigned long src_start, unsigned long len,
63 			    atomic_t *mmap_changing, __u64 mode);
64 extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
65 			      unsigned long dst_start,
66 			      unsigned long len,
67 			      atomic_t *mmap_changing);
68 extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
69 			      unsigned long len, atomic_t *mmap_changing);
70 extern int mwriteprotect_range(struct mm_struct *dst_mm,
71 			       unsigned long start, unsigned long len,
72 			       bool enable_wp, atomic_t *mmap_changing);
73 
74 /* mm helpers */
75 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
76 					struct vm_userfaultfd_ctx vm_ctx)
77 {
78 	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
79 }
80 
81 /*
82  * Never enable huge pmd sharing on some uffd registered vmas:
83  *
84  * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
85  *
86  * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
87  *   VMAs which share huge pmds. (If you have two mappings to the same
88  *   underlying pages, and fault in the non-UFFD-registered one with a write,
89  *   with huge pmd sharing this would *also* setup the second UFFD-registered
90  *   mapping, and we'd not get minor faults.)
91  */
92 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
93 {
94 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
95 }
96 
97 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
98 {
99 	return vma->vm_flags & VM_UFFD_MISSING;
100 }
101 
102 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
103 {
104 	return vma->vm_flags & VM_UFFD_WP;
105 }
106 
107 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
108 {
109 	return vma->vm_flags & VM_UFFD_MINOR;
110 }
111 
112 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
113 				      pte_t pte)
114 {
115 	return userfaultfd_wp(vma) && pte_uffd_wp(pte);
116 }
117 
118 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
119 					   pmd_t pmd)
120 {
121 	return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
122 }
123 
124 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
125 {
126 	return vma->vm_flags & __VM_UFFD_FLAGS;
127 }
128 
129 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
130 extern void dup_userfaultfd_complete(struct list_head *);
131 
132 extern void mremap_userfaultfd_prep(struct vm_area_struct *,
133 				    struct vm_userfaultfd_ctx *);
134 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
135 					unsigned long from, unsigned long to,
136 					unsigned long len);
137 
138 extern bool userfaultfd_remove(struct vm_area_struct *vma,
139 			       unsigned long start,
140 			       unsigned long end);
141 
142 extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
143 				  unsigned long start, unsigned long end,
144 				  struct list_head *uf);
145 extern void userfaultfd_unmap_complete(struct mm_struct *mm,
146 				       struct list_head *uf);
147 
148 #else /* CONFIG_USERFAULTFD */
149 
150 /* mm helpers */
151 static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
152 				unsigned long reason)
153 {
154 	return VM_FAULT_SIGBUS;
155 }
156 
157 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
158 					struct vm_userfaultfd_ctx vm_ctx)
159 {
160 	return true;
161 }
162 
163 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
164 {
165 	return false;
166 }
167 
168 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
169 {
170 	return false;
171 }
172 
173 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
174 {
175 	return false;
176 }
177 
178 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
179 				      pte_t pte)
180 {
181 	return false;
182 }
183 
184 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
185 					   pmd_t pmd)
186 {
187 	return false;
188 }
189 
190 
191 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
192 {
193 	return false;
194 }
195 
196 static inline int dup_userfaultfd(struct vm_area_struct *vma,
197 				  struct list_head *l)
198 {
199 	return 0;
200 }
201 
202 static inline void dup_userfaultfd_complete(struct list_head *l)
203 {
204 }
205 
206 static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
207 					   struct vm_userfaultfd_ctx *ctx)
208 {
209 }
210 
211 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
212 					       unsigned long from,
213 					       unsigned long to,
214 					       unsigned long len)
215 {
216 }
217 
218 static inline bool userfaultfd_remove(struct vm_area_struct *vma,
219 				      unsigned long start,
220 				      unsigned long end)
221 {
222 	return true;
223 }
224 
225 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
226 					 unsigned long start, unsigned long end,
227 					 struct list_head *uf)
228 {
229 	return 0;
230 }
231 
232 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
233 					      struct list_head *uf)
234 {
235 }
236 
237 #endif /* CONFIG_USERFAULTFD */
238 
239 #endif /* _LINUX_USERFAULTFD_K_H */
240