1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fcntl.h>
4 #include <linux/file.h>
5 #include <linux/fs.h>
6 #include <linux/anon_inodes.h>
7 #include <linux/fsnotify_backend.h>
8 #include <linux/init.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/poll.h>
12 #include <linux/security.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 #include <linux/compat.h>
18 #include <linux/sched/signal.h>
19 #include <linux/memcontrol.h>
20 
21 #include <asm/ioctls.h>
22 
23 #include "../../mount.h"
24 #include "../fdinfo.h"
25 #include "fanotify.h"
26 
27 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
28 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
29 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
30 
31 /*
32  * All flags that may be specified in parameter event_f_flags of fanotify_init.
33  *
34  * Internal and external open flags are stored together in field f_flags of
35  * struct file. Only external open flags shall be allowed in event_f_flags.
36  * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
37  * excluded.
38  */
39 #define	FANOTIFY_INIT_ALL_EVENT_F_BITS				( \
40 		O_ACCMODE	| O_APPEND	| O_NONBLOCK	| \
41 		__O_SYNC	| O_DSYNC	| O_CLOEXEC     | \
42 		O_LARGEFILE	| O_NOATIME	)
43 
44 extern const struct fsnotify_ops fanotify_fsnotify_ops;
45 
46 struct kmem_cache *fanotify_mark_cache __read_mostly;
47 struct kmem_cache *fanotify_event_cachep __read_mostly;
48 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
49 
50 /*
51  * Get an fsnotify notification event if one exists and is small
52  * enough to fit in "count". Return an error pointer if the count
53  * is not large enough.
54  *
55  * Called with the group->notification_lock held.
56  */
57 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
58 					    size_t count)
59 {
60 	assert_spin_locked(&group->notification_lock);
61 
62 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
63 
64 	if (fsnotify_notify_queue_is_empty(group))
65 		return NULL;
66 
67 	if (FAN_EVENT_METADATA_LEN > count)
68 		return ERR_PTR(-EINVAL);
69 
70 	/* held the notification_lock the whole time, so this is the
71 	 * same event we peeked above */
72 	return fsnotify_remove_first_event(group);
73 }
74 
75 static int create_fd(struct fsnotify_group *group,
76 		     struct fanotify_event_info *event,
77 		     struct file **file)
78 {
79 	int client_fd;
80 	struct file *new_file;
81 
82 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
83 
84 	client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
85 	if (client_fd < 0)
86 		return client_fd;
87 
88 	/*
89 	 * we need a new file handle for the userspace program so it can read even if it was
90 	 * originally opened O_WRONLY.
91 	 */
92 	/* it's possible this event was an overflow event.  in that case dentry and mnt
93 	 * are NULL;  That's fine, just don't call dentry open */
94 	if (event->path.dentry && event->path.mnt)
95 		new_file = dentry_open(&event->path,
96 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
97 				       current_cred());
98 	else
99 		new_file = ERR_PTR(-EOVERFLOW);
100 	if (IS_ERR(new_file)) {
101 		/*
102 		 * we still send an event even if we can't open the file.  this
103 		 * can happen when say tasks are gone and we try to open their
104 		 * /proc files or we try to open a WRONLY file like in sysfs
105 		 * we just send the errno to userspace since there isn't much
106 		 * else we can do.
107 		 */
108 		put_unused_fd(client_fd);
109 		client_fd = PTR_ERR(new_file);
110 	} else {
111 		*file = new_file;
112 	}
113 
114 	return client_fd;
115 }
116 
117 static int fill_event_metadata(struct fsnotify_group *group,
118 			       struct fanotify_event_metadata *metadata,
119 			       struct fsnotify_event *fsn_event,
120 			       struct file **file)
121 {
122 	int ret = 0;
123 	struct fanotify_event_info *event;
124 
125 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
126 		 group, metadata, fsn_event);
127 
128 	*file = NULL;
129 	event = container_of(fsn_event, struct fanotify_event_info, fse);
130 	metadata->event_len = FAN_EVENT_METADATA_LEN;
131 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
132 	metadata->vers = FANOTIFY_METADATA_VERSION;
133 	metadata->reserved = 0;
134 	metadata->mask = fsn_event->mask & FANOTIFY_OUTGOING_EVENTS;
135 	metadata->pid = pid_vnr(event->pid);
136 	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
137 		metadata->fd = FAN_NOFD;
138 	else {
139 		metadata->fd = create_fd(group, event, file);
140 		if (metadata->fd < 0)
141 			ret = metadata->fd;
142 	}
143 
144 	return ret;
145 }
146 
147 static struct fanotify_perm_event_info *dequeue_event(
148 				struct fsnotify_group *group, int fd)
149 {
150 	struct fanotify_perm_event_info *event, *return_e = NULL;
151 
152 	spin_lock(&group->notification_lock);
153 	list_for_each_entry(event, &group->fanotify_data.access_list,
154 			    fae.fse.list) {
155 		if (event->fd != fd)
156 			continue;
157 
158 		list_del_init(&event->fae.fse.list);
159 		return_e = event;
160 		break;
161 	}
162 	spin_unlock(&group->notification_lock);
163 
164 	pr_debug("%s: found return_re=%p\n", __func__, return_e);
165 
166 	return return_e;
167 }
168 
169 static int process_access_response(struct fsnotify_group *group,
170 				   struct fanotify_response *response_struct)
171 {
172 	struct fanotify_perm_event_info *event;
173 	int fd = response_struct->fd;
174 	int response = response_struct->response;
175 
176 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
177 		 fd, response);
178 	/*
179 	 * make sure the response is valid, if invalid we do nothing and either
180 	 * userspace can send a valid response or we will clean it up after the
181 	 * timeout
182 	 */
183 	switch (response & ~FAN_AUDIT) {
184 	case FAN_ALLOW:
185 	case FAN_DENY:
186 		break;
187 	default:
188 		return -EINVAL;
189 	}
190 
191 	if (fd < 0)
192 		return -EINVAL;
193 
194 	if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT))
195 		return -EINVAL;
196 
197 	event = dequeue_event(group, fd);
198 	if (!event)
199 		return -ENOENT;
200 
201 	event->response = response;
202 	wake_up(&group->fanotify_data.access_waitq);
203 
204 	return 0;
205 }
206 
207 static ssize_t copy_event_to_user(struct fsnotify_group *group,
208 				  struct fsnotify_event *event,
209 				  char __user *buf, size_t count)
210 {
211 	struct fanotify_event_metadata fanotify_event_metadata;
212 	struct file *f;
213 	int fd, ret;
214 
215 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
216 
217 	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
218 	if (ret < 0)
219 		return ret;
220 
221 	fd = fanotify_event_metadata.fd;
222 	ret = -EFAULT;
223 	/*
224 	 * Sanity check copy size in case get_one_event() and
225 	 * fill_event_metadata() event_len sizes ever get out of sync.
226 	 */
227 	if (WARN_ON_ONCE(fanotify_event_metadata.event_len > count))
228 		goto out_close_fd;
229 	if (copy_to_user(buf, &fanotify_event_metadata,
230 			 fanotify_event_metadata.event_len))
231 		goto out_close_fd;
232 
233 	if (fanotify_is_perm_event(event->mask))
234 		FANOTIFY_PE(event)->fd = fd;
235 
236 	if (fd != FAN_NOFD)
237 		fd_install(fd, f);
238 	return fanotify_event_metadata.event_len;
239 
240 out_close_fd:
241 	if (fd != FAN_NOFD) {
242 		put_unused_fd(fd);
243 		fput(f);
244 	}
245 	return ret;
246 }
247 
248 /* intofiy userspace file descriptor functions */
249 static __poll_t fanotify_poll(struct file *file, poll_table *wait)
250 {
251 	struct fsnotify_group *group = file->private_data;
252 	__poll_t ret = 0;
253 
254 	poll_wait(file, &group->notification_waitq, wait);
255 	spin_lock(&group->notification_lock);
256 	if (!fsnotify_notify_queue_is_empty(group))
257 		ret = EPOLLIN | EPOLLRDNORM;
258 	spin_unlock(&group->notification_lock);
259 
260 	return ret;
261 }
262 
263 static ssize_t fanotify_read(struct file *file, char __user *buf,
264 			     size_t count, loff_t *pos)
265 {
266 	struct fsnotify_group *group;
267 	struct fsnotify_event *kevent;
268 	char __user *start;
269 	int ret;
270 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
271 
272 	start = buf;
273 	group = file->private_data;
274 
275 	pr_debug("%s: group=%p\n", __func__, group);
276 
277 	add_wait_queue(&group->notification_waitq, &wait);
278 	while (1) {
279 		spin_lock(&group->notification_lock);
280 		kevent = get_one_event(group, count);
281 		spin_unlock(&group->notification_lock);
282 
283 		if (IS_ERR(kevent)) {
284 			ret = PTR_ERR(kevent);
285 			break;
286 		}
287 
288 		if (!kevent) {
289 			ret = -EAGAIN;
290 			if (file->f_flags & O_NONBLOCK)
291 				break;
292 
293 			ret = -ERESTARTSYS;
294 			if (signal_pending(current))
295 				break;
296 
297 			if (start != buf)
298 				break;
299 
300 			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
301 			continue;
302 		}
303 
304 		ret = copy_event_to_user(group, kevent, buf, count);
305 		if (unlikely(ret == -EOPENSTALE)) {
306 			/*
307 			 * We cannot report events with stale fd so drop it.
308 			 * Setting ret to 0 will continue the event loop and
309 			 * do the right thing if there are no more events to
310 			 * read (i.e. return bytes read, -EAGAIN or wait).
311 			 */
312 			ret = 0;
313 		}
314 
315 		/*
316 		 * Permission events get queued to wait for response.  Other
317 		 * events can be destroyed now.
318 		 */
319 		if (!fanotify_is_perm_event(kevent->mask)) {
320 			fsnotify_destroy_event(group, kevent);
321 		} else {
322 			if (ret <= 0) {
323 				FANOTIFY_PE(kevent)->response = FAN_DENY;
324 				wake_up(&group->fanotify_data.access_waitq);
325 			} else {
326 				spin_lock(&group->notification_lock);
327 				list_add_tail(&kevent->list,
328 					&group->fanotify_data.access_list);
329 				spin_unlock(&group->notification_lock);
330 			}
331 		}
332 		if (ret < 0)
333 			break;
334 		buf += ret;
335 		count -= ret;
336 	}
337 	remove_wait_queue(&group->notification_waitq, &wait);
338 
339 	if (start != buf && ret != -EFAULT)
340 		ret = buf - start;
341 	return ret;
342 }
343 
344 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
345 {
346 	struct fanotify_response response = { .fd = -1, .response = -1 };
347 	struct fsnotify_group *group;
348 	int ret;
349 
350 	if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
351 		return -EINVAL;
352 
353 	group = file->private_data;
354 
355 	if (count > sizeof(response))
356 		count = sizeof(response);
357 
358 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
359 
360 	if (copy_from_user(&response, buf, count))
361 		return -EFAULT;
362 
363 	ret = process_access_response(group, &response);
364 	if (ret < 0)
365 		count = ret;
366 
367 	return count;
368 }
369 
370 static int fanotify_release(struct inode *ignored, struct file *file)
371 {
372 	struct fsnotify_group *group = file->private_data;
373 	struct fanotify_perm_event_info *event, *next;
374 	struct fsnotify_event *fsn_event;
375 
376 	/*
377 	 * Stop new events from arriving in the notification queue. since
378 	 * userspace cannot use fanotify fd anymore, no event can enter or
379 	 * leave access_list by now either.
380 	 */
381 	fsnotify_group_stop_queueing(group);
382 
383 	/*
384 	 * Process all permission events on access_list and notification queue
385 	 * and simulate reply from userspace.
386 	 */
387 	spin_lock(&group->notification_lock);
388 	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
389 				 fae.fse.list) {
390 		pr_debug("%s: found group=%p event=%p\n", __func__, group,
391 			 event);
392 
393 		list_del_init(&event->fae.fse.list);
394 		event->response = FAN_ALLOW;
395 	}
396 
397 	/*
398 	 * Destroy all non-permission events. For permission events just
399 	 * dequeue them and set the response. They will be freed once the
400 	 * response is consumed and fanotify_get_response() returns.
401 	 */
402 	while (!fsnotify_notify_queue_is_empty(group)) {
403 		fsn_event = fsnotify_remove_first_event(group);
404 		if (!(fsn_event->mask & FANOTIFY_PERM_EVENTS)) {
405 			spin_unlock(&group->notification_lock);
406 			fsnotify_destroy_event(group, fsn_event);
407 			spin_lock(&group->notification_lock);
408 		} else {
409 			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
410 		}
411 	}
412 	spin_unlock(&group->notification_lock);
413 
414 	/* Response for all permission events it set, wakeup waiters */
415 	wake_up(&group->fanotify_data.access_waitq);
416 
417 	/* matches the fanotify_init->fsnotify_alloc_group */
418 	fsnotify_destroy_group(group);
419 
420 	return 0;
421 }
422 
423 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
424 {
425 	struct fsnotify_group *group;
426 	struct fsnotify_event *fsn_event;
427 	void __user *p;
428 	int ret = -ENOTTY;
429 	size_t send_len = 0;
430 
431 	group = file->private_data;
432 
433 	p = (void __user *) arg;
434 
435 	switch (cmd) {
436 	case FIONREAD:
437 		spin_lock(&group->notification_lock);
438 		list_for_each_entry(fsn_event, &group->notification_list, list)
439 			send_len += FAN_EVENT_METADATA_LEN;
440 		spin_unlock(&group->notification_lock);
441 		ret = put_user(send_len, (int __user *) p);
442 		break;
443 	}
444 
445 	return ret;
446 }
447 
448 static const struct file_operations fanotify_fops = {
449 	.show_fdinfo	= fanotify_show_fdinfo,
450 	.poll		= fanotify_poll,
451 	.read		= fanotify_read,
452 	.write		= fanotify_write,
453 	.fasync		= NULL,
454 	.release	= fanotify_release,
455 	.unlocked_ioctl	= fanotify_ioctl,
456 	.compat_ioctl	= fanotify_ioctl,
457 	.llseek		= noop_llseek,
458 };
459 
460 static int fanotify_find_path(int dfd, const char __user *filename,
461 			      struct path *path, unsigned int flags)
462 {
463 	int ret;
464 
465 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
466 		 dfd, filename, flags);
467 
468 	if (filename == NULL) {
469 		struct fd f = fdget(dfd);
470 
471 		ret = -EBADF;
472 		if (!f.file)
473 			goto out;
474 
475 		ret = -ENOTDIR;
476 		if ((flags & FAN_MARK_ONLYDIR) &&
477 		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
478 			fdput(f);
479 			goto out;
480 		}
481 
482 		*path = f.file->f_path;
483 		path_get(path);
484 		fdput(f);
485 	} else {
486 		unsigned int lookup_flags = 0;
487 
488 		if (!(flags & FAN_MARK_DONT_FOLLOW))
489 			lookup_flags |= LOOKUP_FOLLOW;
490 		if (flags & FAN_MARK_ONLYDIR)
491 			lookup_flags |= LOOKUP_DIRECTORY;
492 
493 		ret = user_path_at(dfd, filename, lookup_flags, path);
494 		if (ret)
495 			goto out;
496 	}
497 
498 	/* you can only watch an inode if you have read permissions on it */
499 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
500 	if (ret)
501 		path_put(path);
502 out:
503 	return ret;
504 }
505 
506 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
507 					    __u32 mask,
508 					    unsigned int flags,
509 					    int *destroy)
510 {
511 	__u32 oldmask = 0;
512 
513 	spin_lock(&fsn_mark->lock);
514 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
515 		oldmask = fsn_mark->mask;
516 		fsn_mark->mask &= ~mask;
517 	} else {
518 		fsn_mark->ignored_mask &= ~mask;
519 	}
520 	*destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
521 	spin_unlock(&fsn_mark->lock);
522 
523 	return mask & oldmask;
524 }
525 
526 static int fanotify_remove_mark(struct fsnotify_group *group,
527 				fsnotify_connp_t *connp, __u32 mask,
528 				unsigned int flags)
529 {
530 	struct fsnotify_mark *fsn_mark = NULL;
531 	__u32 removed;
532 	int destroy_mark;
533 
534 	mutex_lock(&group->mark_mutex);
535 	fsn_mark = fsnotify_find_mark(connp, group);
536 	if (!fsn_mark) {
537 		mutex_unlock(&group->mark_mutex);
538 		return -ENOENT;
539 	}
540 
541 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
542 						 &destroy_mark);
543 	if (removed & fsnotify_conn_mask(fsn_mark->connector))
544 		fsnotify_recalc_mask(fsn_mark->connector);
545 	if (destroy_mark)
546 		fsnotify_detach_mark(fsn_mark);
547 	mutex_unlock(&group->mark_mutex);
548 	if (destroy_mark)
549 		fsnotify_free_mark(fsn_mark);
550 
551 	/* matches the fsnotify_find_mark() */
552 	fsnotify_put_mark(fsn_mark);
553 	return 0;
554 }
555 
556 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
557 					 struct vfsmount *mnt, __u32 mask,
558 					 unsigned int flags)
559 {
560 	return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
561 				    mask, flags);
562 }
563 
564 static int fanotify_remove_sb_mark(struct fsnotify_group *group,
565 				      struct super_block *sb, __u32 mask,
566 				      unsigned int flags)
567 {
568 	return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, flags);
569 }
570 
571 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
572 				      struct inode *inode, __u32 mask,
573 				      unsigned int flags)
574 {
575 	return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask,
576 				    flags);
577 }
578 
579 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
580 				       __u32 mask,
581 				       unsigned int flags)
582 {
583 	__u32 oldmask = -1;
584 
585 	spin_lock(&fsn_mark->lock);
586 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
587 		oldmask = fsn_mark->mask;
588 		fsn_mark->mask |= mask;
589 	} else {
590 		fsn_mark->ignored_mask |= mask;
591 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
592 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
593 	}
594 	spin_unlock(&fsn_mark->lock);
595 
596 	return mask & ~oldmask;
597 }
598 
599 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
600 						   fsnotify_connp_t *connp,
601 						   unsigned int type)
602 {
603 	struct fsnotify_mark *mark;
604 	int ret;
605 
606 	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
607 		return ERR_PTR(-ENOSPC);
608 
609 	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
610 	if (!mark)
611 		return ERR_PTR(-ENOMEM);
612 
613 	fsnotify_init_mark(mark, group);
614 	ret = fsnotify_add_mark_locked(mark, connp, type, 0);
615 	if (ret) {
616 		fsnotify_put_mark(mark);
617 		return ERR_PTR(ret);
618 	}
619 
620 	return mark;
621 }
622 
623 
624 static int fanotify_add_mark(struct fsnotify_group *group,
625 			     fsnotify_connp_t *connp, unsigned int type,
626 			     __u32 mask, unsigned int flags)
627 {
628 	struct fsnotify_mark *fsn_mark;
629 	__u32 added;
630 
631 	mutex_lock(&group->mark_mutex);
632 	fsn_mark = fsnotify_find_mark(connp, group);
633 	if (!fsn_mark) {
634 		fsn_mark = fanotify_add_new_mark(group, connp, type);
635 		if (IS_ERR(fsn_mark)) {
636 			mutex_unlock(&group->mark_mutex);
637 			return PTR_ERR(fsn_mark);
638 		}
639 	}
640 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
641 	if (added & ~fsnotify_conn_mask(fsn_mark->connector))
642 		fsnotify_recalc_mask(fsn_mark->connector);
643 	mutex_unlock(&group->mark_mutex);
644 
645 	fsnotify_put_mark(fsn_mark);
646 	return 0;
647 }
648 
649 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
650 				      struct vfsmount *mnt, __u32 mask,
651 				      unsigned int flags)
652 {
653 	return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
654 				 FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags);
655 }
656 
657 static int fanotify_add_sb_mark(struct fsnotify_group *group,
658 				      struct super_block *sb, __u32 mask,
659 				      unsigned int flags)
660 {
661 	return fanotify_add_mark(group, &sb->s_fsnotify_marks,
662 				 FSNOTIFY_OBJ_TYPE_SB, mask, flags);
663 }
664 
665 static int fanotify_add_inode_mark(struct fsnotify_group *group,
666 				   struct inode *inode, __u32 mask,
667 				   unsigned int flags)
668 {
669 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
670 
671 	/*
672 	 * If some other task has this inode open for write we should not add
673 	 * an ignored mark, unless that ignored mark is supposed to survive
674 	 * modification changes anyway.
675 	 */
676 	if ((flags & FAN_MARK_IGNORED_MASK) &&
677 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
678 	    inode_is_open_for_write(inode))
679 		return 0;
680 
681 	return fanotify_add_mark(group, &inode->i_fsnotify_marks,
682 				 FSNOTIFY_OBJ_TYPE_INODE, mask, flags);
683 }
684 
685 /* fanotify syscalls */
686 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
687 {
688 	struct fsnotify_group *group;
689 	int f_flags, fd;
690 	struct user_struct *user;
691 	struct fanotify_event_info *oevent;
692 
693 	pr_debug("%s: flags=%x event_f_flags=%x\n",
694 		 __func__, flags, event_f_flags);
695 
696 	if (!capable(CAP_SYS_ADMIN))
697 		return -EPERM;
698 
699 #ifdef CONFIG_AUDITSYSCALL
700 	if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT))
701 #else
702 	if (flags & ~FANOTIFY_INIT_FLAGS)
703 #endif
704 		return -EINVAL;
705 
706 	if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
707 		return -EINVAL;
708 
709 	switch (event_f_flags & O_ACCMODE) {
710 	case O_RDONLY:
711 	case O_RDWR:
712 	case O_WRONLY:
713 		break;
714 	default:
715 		return -EINVAL;
716 	}
717 
718 	user = get_current_user();
719 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
720 		free_uid(user);
721 		return -EMFILE;
722 	}
723 
724 	f_flags = O_RDWR | FMODE_NONOTIFY;
725 	if (flags & FAN_CLOEXEC)
726 		f_flags |= O_CLOEXEC;
727 	if (flags & FAN_NONBLOCK)
728 		f_flags |= O_NONBLOCK;
729 
730 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
731 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
732 	if (IS_ERR(group)) {
733 		free_uid(user);
734 		return PTR_ERR(group);
735 	}
736 
737 	group->fanotify_data.user = user;
738 	group->fanotify_data.flags = flags;
739 	atomic_inc(&user->fanotify_listeners);
740 	group->memcg = get_mem_cgroup_from_mm(current->mm);
741 
742 	oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
743 	if (unlikely(!oevent)) {
744 		fd = -ENOMEM;
745 		goto out_destroy_group;
746 	}
747 	group->overflow_event = &oevent->fse;
748 
749 	if (force_o_largefile())
750 		event_f_flags |= O_LARGEFILE;
751 	group->fanotify_data.f_flags = event_f_flags;
752 	init_waitqueue_head(&group->fanotify_data.access_waitq);
753 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
754 	switch (flags & FANOTIFY_CLASS_BITS) {
755 	case FAN_CLASS_NOTIF:
756 		group->priority = FS_PRIO_0;
757 		break;
758 	case FAN_CLASS_CONTENT:
759 		group->priority = FS_PRIO_1;
760 		break;
761 	case FAN_CLASS_PRE_CONTENT:
762 		group->priority = FS_PRIO_2;
763 		break;
764 	default:
765 		fd = -EINVAL;
766 		goto out_destroy_group;
767 	}
768 
769 	if (flags & FAN_UNLIMITED_QUEUE) {
770 		fd = -EPERM;
771 		if (!capable(CAP_SYS_ADMIN))
772 			goto out_destroy_group;
773 		group->max_events = UINT_MAX;
774 	} else {
775 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
776 	}
777 
778 	if (flags & FAN_UNLIMITED_MARKS) {
779 		fd = -EPERM;
780 		if (!capable(CAP_SYS_ADMIN))
781 			goto out_destroy_group;
782 		group->fanotify_data.max_marks = UINT_MAX;
783 	} else {
784 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
785 	}
786 
787 	if (flags & FAN_ENABLE_AUDIT) {
788 		fd = -EPERM;
789 		if (!capable(CAP_AUDIT_WRITE))
790 			goto out_destroy_group;
791 	}
792 
793 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
794 	if (fd < 0)
795 		goto out_destroy_group;
796 
797 	return fd;
798 
799 out_destroy_group:
800 	fsnotify_destroy_group(group);
801 	return fd;
802 }
803 
804 static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
805 			    int dfd, const char  __user *pathname)
806 {
807 	struct inode *inode = NULL;
808 	struct vfsmount *mnt = NULL;
809 	struct fsnotify_group *group;
810 	struct fd f;
811 	struct path path;
812 	u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS;
813 	unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
814 	int ret;
815 
816 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
817 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
818 
819 	/* we only use the lower 32 bits as of right now. */
820 	if (mask & ((__u64)0xffffffff << 32))
821 		return -EINVAL;
822 
823 	if (flags & ~FANOTIFY_MARK_FLAGS)
824 		return -EINVAL;
825 
826 	switch (mark_type) {
827 	case FAN_MARK_INODE:
828 	case FAN_MARK_MOUNT:
829 	case FAN_MARK_FILESYSTEM:
830 		break;
831 	default:
832 		return -EINVAL;
833 	}
834 
835 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
836 	case FAN_MARK_ADD:		/* fallthrough */
837 	case FAN_MARK_REMOVE:
838 		if (!mask)
839 			return -EINVAL;
840 		break;
841 	case FAN_MARK_FLUSH:
842 		if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH))
843 			return -EINVAL;
844 		break;
845 	default:
846 		return -EINVAL;
847 	}
848 
849 	if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
850 		valid_mask |= FANOTIFY_PERM_EVENTS;
851 
852 	if (mask & ~valid_mask)
853 		return -EINVAL;
854 
855 	f = fdget(fanotify_fd);
856 	if (unlikely(!f.file))
857 		return -EBADF;
858 
859 	/* verify that this is indeed an fanotify instance */
860 	ret = -EINVAL;
861 	if (unlikely(f.file->f_op != &fanotify_fops))
862 		goto fput_and_out;
863 	group = f.file->private_data;
864 
865 	/*
866 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
867 	 * allowed to set permissions events.
868 	 */
869 	ret = -EINVAL;
870 	if (mask & FANOTIFY_PERM_EVENTS &&
871 	    group->priority == FS_PRIO_0)
872 		goto fput_and_out;
873 
874 	if (flags & FAN_MARK_FLUSH) {
875 		ret = 0;
876 		if (mark_type == FAN_MARK_MOUNT)
877 			fsnotify_clear_vfsmount_marks_by_group(group);
878 		else if (mark_type == FAN_MARK_FILESYSTEM)
879 			fsnotify_clear_sb_marks_by_group(group);
880 		else
881 			fsnotify_clear_inode_marks_by_group(group);
882 		goto fput_and_out;
883 	}
884 
885 	ret = fanotify_find_path(dfd, pathname, &path, flags);
886 	if (ret)
887 		goto fput_and_out;
888 
889 	/* inode held in place by reference to path; group by fget on fd */
890 	if (mark_type == FAN_MARK_INODE)
891 		inode = path.dentry->d_inode;
892 	else
893 		mnt = path.mnt;
894 
895 	/* create/update an inode mark */
896 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
897 	case FAN_MARK_ADD:
898 		if (mark_type == FAN_MARK_MOUNT)
899 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
900 		else if (mark_type == FAN_MARK_FILESYSTEM)
901 			ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask, flags);
902 		else
903 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
904 		break;
905 	case FAN_MARK_REMOVE:
906 		if (mark_type == FAN_MARK_MOUNT)
907 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
908 		else if (mark_type == FAN_MARK_FILESYSTEM)
909 			ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, flags);
910 		else
911 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
912 		break;
913 	default:
914 		ret = -EINVAL;
915 	}
916 
917 	path_put(&path);
918 fput_and_out:
919 	fdput(f);
920 	return ret;
921 }
922 
923 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
924 			      __u64, mask, int, dfd,
925 			      const char  __user *, pathname)
926 {
927 	return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
928 }
929 
930 #ifdef CONFIG_COMPAT
931 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
932 				int, fanotify_fd, unsigned int, flags,
933 				__u32, mask0, __u32, mask1, int, dfd,
934 				const char  __user *, pathname)
935 {
936 	return do_fanotify_mark(fanotify_fd, flags,
937 #ifdef __BIG_ENDIAN
938 				((__u64)mask0 << 32) | mask1,
939 #else
940 				((__u64)mask1 << 32) | mask0,
941 #endif
942 				 dfd, pathname);
943 }
944 #endif
945 
946 /*
947  * fanotify_user_setup - Our initialization function.  Note that we cannot return
948  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
949  * must result in panic().
950  */
951 static int __init fanotify_user_setup(void)
952 {
953 	BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 7);
954 	BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
955 
956 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
957 					 SLAB_PANIC|SLAB_ACCOUNT);
958 	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
959 	if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
960 		fanotify_perm_event_cachep =
961 			KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
962 	}
963 
964 	return 0;
965 }
966 device_initcall(fanotify_user_setup);
967