xref: /openbmc/linux/fs/notify/fanotify/fanotify.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fdtable.h>
4 #include <linux/fsnotify_backend.h>
5 #include <linux/init.h>
6 #include <linux/jiffies.h>
7 #include <linux/kernel.h> /* UINT_MAX */
8 #include <linux/mount.h>
9 #include <linux/sched.h>
10 #include <linux/sched/user.h>
11 #include <linux/types.h>
12 #include <linux/wait.h>
13 #include <linux/audit.h>
14 
15 #include "fanotify.h"
16 
17 static bool should_merge(struct fsnotify_event *old_fsn,
18 			 struct fsnotify_event *new_fsn)
19 {
20 	struct fanotify_event_info *old, *new;
21 
22 	pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
23 	old = FANOTIFY_E(old_fsn);
24 	new = FANOTIFY_E(new_fsn);
25 
26 	if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
27 	    old->path.mnt == new->path.mnt &&
28 	    old->path.dentry == new->path.dentry)
29 		return true;
30 	return false;
31 }
32 
33 /* and the list better be locked by something too! */
34 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
35 {
36 	struct fsnotify_event *test_event;
37 
38 	pr_debug("%s: list=%p event=%p\n", __func__, list, event);
39 
40 	/*
41 	 * Don't merge a permission event with any other event so that we know
42 	 * the event structure we have created in fanotify_handle_event() is the
43 	 * one we should check for permission response.
44 	 */
45 	if (fanotify_is_perm_event(event->mask))
46 		return 0;
47 
48 	list_for_each_entry_reverse(test_event, list, list) {
49 		if (should_merge(test_event, event)) {
50 			test_event->mask |= event->mask;
51 			return 1;
52 		}
53 	}
54 
55 	return 0;
56 }
57 
58 static int fanotify_get_response(struct fsnotify_group *group,
59 				 struct fanotify_perm_event_info *event,
60 				 struct fsnotify_iter_info *iter_info)
61 {
62 	int ret;
63 
64 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
65 
66 	wait_event(group->fanotify_data.access_waitq, event->response);
67 
68 	/* userspace responded, convert to something usable */
69 	switch (event->response & ~FAN_AUDIT) {
70 	case FAN_ALLOW:
71 		ret = 0;
72 		break;
73 	case FAN_DENY:
74 	default:
75 		ret = -EPERM;
76 	}
77 
78 	/* Check if the response should be audited */
79 	if (event->response & FAN_AUDIT)
80 		audit_fanotify(event->response & ~FAN_AUDIT);
81 
82 	event->response = 0;
83 
84 	pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
85 		 group, event, ret);
86 
87 	return ret;
88 }
89 
90 static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
91 				       u32 event_mask, const void *data,
92 				       int data_type)
93 {
94 	__u32 marks_mask = 0, marks_ignored_mask = 0;
95 	const struct path *path = data;
96 	struct fsnotify_mark *mark;
97 	int type;
98 
99 	pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
100 		 __func__, iter_info->report_mask, event_mask, data, data_type);
101 
102 	/* if we don't have enough info to send an event to userspace say no */
103 	if (data_type != FSNOTIFY_EVENT_PATH)
104 		return false;
105 
106 	/* sorry, fanotify only gives a damn about files and dirs */
107 	if (!d_is_reg(path->dentry) &&
108 	    !d_can_lookup(path->dentry))
109 		return false;
110 
111 	fsnotify_foreach_obj_type(type) {
112 		if (!fsnotify_iter_should_report_type(iter_info, type))
113 			continue;
114 		mark = iter_info->marks[type];
115 		/*
116 		 * if the event is for a child and this inode doesn't care about
117 		 * events on the child, don't send it!
118 		 */
119 		if (type == FSNOTIFY_OBJ_TYPE_INODE &&
120 		    (event_mask & FS_EVENT_ON_CHILD) &&
121 		    !(mark->mask & FS_EVENT_ON_CHILD))
122 			continue;
123 
124 		marks_mask |= mark->mask;
125 		marks_ignored_mask |= mark->ignored_mask;
126 	}
127 
128 	if (d_is_dir(path->dentry) &&
129 	    !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
130 		return false;
131 
132 	if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
133 				 ~marks_ignored_mask)
134 		return true;
135 
136 	return false;
137 }
138 
139 struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
140 						 struct inode *inode, u32 mask,
141 						 const struct path *path)
142 {
143 	struct fanotify_event_info *event;
144 	gfp_t gfp = GFP_KERNEL;
145 
146 	/*
147 	 * For queues with unlimited length lost events are not expected and
148 	 * can possibly have security implications. Avoid losing events when
149 	 * memory is short.
150 	 */
151 	if (group->max_events == UINT_MAX)
152 		gfp |= __GFP_NOFAIL;
153 
154 	if (fanotify_is_perm_event(mask)) {
155 		struct fanotify_perm_event_info *pevent;
156 
157 		pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
158 		if (!pevent)
159 			return NULL;
160 		event = &pevent->fae;
161 		pevent->response = 0;
162 		goto init;
163 	}
164 	event = kmem_cache_alloc(fanotify_event_cachep, gfp);
165 	if (!event)
166 		return NULL;
167 init: __maybe_unused
168 	fsnotify_init_event(&event->fse, inode, mask);
169 	event->tgid = get_pid(task_tgid(current));
170 	if (path) {
171 		event->path = *path;
172 		path_get(&event->path);
173 	} else {
174 		event->path.mnt = NULL;
175 		event->path.dentry = NULL;
176 	}
177 	return event;
178 }
179 
180 static int fanotify_handle_event(struct fsnotify_group *group,
181 				 struct inode *inode,
182 				 u32 mask, const void *data, int data_type,
183 				 const unsigned char *file_name, u32 cookie,
184 				 struct fsnotify_iter_info *iter_info)
185 {
186 	int ret = 0;
187 	struct fanotify_event_info *event;
188 	struct fsnotify_event *fsn_event;
189 
190 	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
191 	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
192 	BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
193 	BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
194 	BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
195 	BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
196 	BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
197 	BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
198 	BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
199 	BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
200 
201 	if (!fanotify_should_send_event(iter_info, mask, data, data_type))
202 		return 0;
203 
204 	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
205 		 mask);
206 
207 	if (fanotify_is_perm_event(mask)) {
208 		/*
209 		 * fsnotify_prepare_user_wait() fails if we race with mark
210 		 * deletion.  Just let the operation pass in that case.
211 		 */
212 		if (!fsnotify_prepare_user_wait(iter_info))
213 			return 0;
214 	}
215 
216 	event = fanotify_alloc_event(group, inode, mask, data);
217 	ret = -ENOMEM;
218 	if (unlikely(!event)) {
219 		/*
220 		 * We don't queue overflow events for permission events as
221 		 * there the access is denied and so no event is in fact lost.
222 		 */
223 		if (!fanotify_is_perm_event(mask))
224 			fsnotify_queue_overflow(group);
225 		goto finish;
226 	}
227 
228 	fsn_event = &event->fse;
229 	ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
230 	if (ret) {
231 		/* Permission events shouldn't be merged */
232 		BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
233 		/* Our event wasn't used in the end. Free it. */
234 		fsnotify_destroy_event(group, fsn_event);
235 
236 		ret = 0;
237 	} else if (fanotify_is_perm_event(mask)) {
238 		ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
239 					    iter_info);
240 		fsnotify_destroy_event(group, fsn_event);
241 	}
242 finish:
243 	if (fanotify_is_perm_event(mask))
244 		fsnotify_finish_user_wait(iter_info);
245 
246 	return ret;
247 }
248 
249 static void fanotify_free_group_priv(struct fsnotify_group *group)
250 {
251 	struct user_struct *user;
252 
253 	user = group->fanotify_data.user;
254 	atomic_dec(&user->fanotify_listeners);
255 	free_uid(user);
256 }
257 
258 static void fanotify_free_event(struct fsnotify_event *fsn_event)
259 {
260 	struct fanotify_event_info *event;
261 
262 	event = FANOTIFY_E(fsn_event);
263 	path_put(&event->path);
264 	put_pid(event->tgid);
265 	if (fanotify_is_perm_event(fsn_event->mask)) {
266 		kmem_cache_free(fanotify_perm_event_cachep,
267 				FANOTIFY_PE(fsn_event));
268 		return;
269 	}
270 	kmem_cache_free(fanotify_event_cachep, event);
271 }
272 
273 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
274 {
275 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
276 }
277 
278 const struct fsnotify_ops fanotify_fsnotify_ops = {
279 	.handle_event = fanotify_handle_event,
280 	.free_group_priv = fanotify_free_group_priv,
281 	.free_event = fanotify_free_event,
282 	.free_mark = fanotify_free_mark,
283 };
284