1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17 
18 #include <asm/ioctls.h>
19 
20 #include "../../mount.h"
21 #include "../fdinfo.h"
22 
23 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
24 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
25 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
26 
27 extern const struct fsnotify_ops fanotify_fsnotify_ops;
28 
29 static struct kmem_cache *fanotify_mark_cache __read_mostly;
30 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
31 
32 struct fanotify_response_event {
33 	struct list_head list;
34 	__s32 fd;
35 	struct fsnotify_event *event;
36 };
37 
38 /*
39  * Get an fsnotify notification event if one exists and is small
40  * enough to fit in "count". Return an error pointer if the count
41  * is not large enough.
42  *
43  * Called with the group->notification_mutex held.
44  */
45 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
46 					    size_t count)
47 {
48 	BUG_ON(!mutex_is_locked(&group->notification_mutex));
49 
50 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
51 
52 	if (fsnotify_notify_queue_is_empty(group))
53 		return NULL;
54 
55 	if (FAN_EVENT_METADATA_LEN > count)
56 		return ERR_PTR(-EINVAL);
57 
58 	/* held the notification_mutex the whole time, so this is the
59 	 * same event we peeked above */
60 	return fsnotify_remove_notify_event(group);
61 }
62 
63 static int create_fd(struct fsnotify_group *group,
64 			struct fsnotify_event *event,
65 			struct file **file)
66 {
67 	int client_fd;
68 	struct file *new_file;
69 
70 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
71 
72 	client_fd = get_unused_fd();
73 	if (client_fd < 0)
74 		return client_fd;
75 
76 	if (event->data_type != FSNOTIFY_EVENT_PATH) {
77 		WARN_ON(1);
78 		put_unused_fd(client_fd);
79 		return -EINVAL;
80 	}
81 
82 	/*
83 	 * we need a new file handle for the userspace program so it can read even if it was
84 	 * originally opened O_WRONLY.
85 	 */
86 	/* it's possible this event was an overflow event.  in that case dentry and mnt
87 	 * are NULL;  That's fine, just don't call dentry open */
88 	if (event->path.dentry && event->path.mnt)
89 		new_file = dentry_open(&event->path,
90 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
91 				       current_cred());
92 	else
93 		new_file = ERR_PTR(-EOVERFLOW);
94 	if (IS_ERR(new_file)) {
95 		/*
96 		 * we still send an event even if we can't open the file.  this
97 		 * can happen when say tasks are gone and we try to open their
98 		 * /proc files or we try to open a WRONLY file like in sysfs
99 		 * we just send the errno to userspace since there isn't much
100 		 * else we can do.
101 		 */
102 		put_unused_fd(client_fd);
103 		client_fd = PTR_ERR(new_file);
104 	} else {
105 		*file = new_file;
106 	}
107 
108 	return client_fd;
109 }
110 
111 static int fill_event_metadata(struct fsnotify_group *group,
112 				   struct fanotify_event_metadata *metadata,
113 				   struct fsnotify_event *event,
114 				   struct file **file)
115 {
116 	int ret = 0;
117 
118 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
119 		 group, metadata, event);
120 
121 	*file = NULL;
122 	metadata->event_len = FAN_EVENT_METADATA_LEN;
123 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
124 	metadata->vers = FANOTIFY_METADATA_VERSION;
125 	metadata->reserved = 0;
126 	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
127 	metadata->pid = pid_vnr(event->tgid);
128 	if (unlikely(event->mask & FAN_Q_OVERFLOW))
129 		metadata->fd = FAN_NOFD;
130 	else {
131 		metadata->fd = create_fd(group, event, file);
132 		if (metadata->fd < 0)
133 			ret = metadata->fd;
134 	}
135 
136 	return ret;
137 }
138 
139 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
140 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
141 						  __s32 fd)
142 {
143 	struct fanotify_response_event *re, *return_re = NULL;
144 
145 	mutex_lock(&group->fanotify_data.access_mutex);
146 	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
147 		if (re->fd != fd)
148 			continue;
149 
150 		list_del_init(&re->list);
151 		return_re = re;
152 		break;
153 	}
154 	mutex_unlock(&group->fanotify_data.access_mutex);
155 
156 	pr_debug("%s: found return_re=%p\n", __func__, return_re);
157 
158 	return return_re;
159 }
160 
161 static int process_access_response(struct fsnotify_group *group,
162 				   struct fanotify_response *response_struct)
163 {
164 	struct fanotify_response_event *re;
165 	__s32 fd = response_struct->fd;
166 	__u32 response = response_struct->response;
167 
168 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
169 		 fd, response);
170 	/*
171 	 * make sure the response is valid, if invalid we do nothing and either
172 	 * userspace can send a valid response or we will clean it up after the
173 	 * timeout
174 	 */
175 	switch (response) {
176 	case FAN_ALLOW:
177 	case FAN_DENY:
178 		break;
179 	default:
180 		return -EINVAL;
181 	}
182 
183 	if (fd < 0)
184 		return -EINVAL;
185 
186 	re = dequeue_re(group, fd);
187 	if (!re)
188 		return -ENOENT;
189 
190 	re->event->response = response;
191 
192 	wake_up(&group->fanotify_data.access_waitq);
193 
194 	kmem_cache_free(fanotify_response_event_cache, re);
195 
196 	return 0;
197 }
198 
199 static int prepare_for_access_response(struct fsnotify_group *group,
200 				       struct fsnotify_event *event,
201 				       __s32 fd)
202 {
203 	struct fanotify_response_event *re;
204 
205 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
206 		return 0;
207 
208 	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
209 	if (!re)
210 		return -ENOMEM;
211 
212 	re->event = event;
213 	re->fd = fd;
214 
215 	mutex_lock(&group->fanotify_data.access_mutex);
216 
217 	if (atomic_read(&group->fanotify_data.bypass_perm)) {
218 		mutex_unlock(&group->fanotify_data.access_mutex);
219 		kmem_cache_free(fanotify_response_event_cache, re);
220 		event->response = FAN_ALLOW;
221 		return 0;
222 	}
223 
224 	list_add_tail(&re->list, &group->fanotify_data.access_list);
225 	mutex_unlock(&group->fanotify_data.access_mutex);
226 
227 	return 0;
228 }
229 
230 #else
231 static int prepare_for_access_response(struct fsnotify_group *group,
232 				       struct fsnotify_event *event,
233 				       __s32 fd)
234 {
235 	return 0;
236 }
237 
238 #endif
239 
240 static ssize_t copy_event_to_user(struct fsnotify_group *group,
241 				  struct fsnotify_event *event,
242 				  char __user *buf)
243 {
244 	struct fanotify_event_metadata fanotify_event_metadata;
245 	struct file *f;
246 	int fd, ret;
247 
248 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
249 
250 	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
251 	if (ret < 0)
252 		goto out;
253 
254 	fd = fanotify_event_metadata.fd;
255 	ret = -EFAULT;
256 	if (copy_to_user(buf, &fanotify_event_metadata,
257 			 fanotify_event_metadata.event_len))
258 		goto out_close_fd;
259 
260 	ret = prepare_for_access_response(group, event, fd);
261 	if (ret)
262 		goto out_close_fd;
263 
264 	if (fd != FAN_NOFD)
265 		fd_install(fd, f);
266 	return fanotify_event_metadata.event_len;
267 
268 out_close_fd:
269 	if (fd != FAN_NOFD) {
270 		put_unused_fd(fd);
271 		fput(f);
272 	}
273 out:
274 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
275 	if (event->mask & FAN_ALL_PERM_EVENTS) {
276 		event->response = FAN_DENY;
277 		wake_up(&group->fanotify_data.access_waitq);
278 	}
279 #endif
280 	return ret;
281 }
282 
283 /* intofiy userspace file descriptor functions */
284 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
285 {
286 	struct fsnotify_group *group = file->private_data;
287 	int ret = 0;
288 
289 	poll_wait(file, &group->notification_waitq, wait);
290 	mutex_lock(&group->notification_mutex);
291 	if (!fsnotify_notify_queue_is_empty(group))
292 		ret = POLLIN | POLLRDNORM;
293 	mutex_unlock(&group->notification_mutex);
294 
295 	return ret;
296 }
297 
298 static ssize_t fanotify_read(struct file *file, char __user *buf,
299 			     size_t count, loff_t *pos)
300 {
301 	struct fsnotify_group *group;
302 	struct fsnotify_event *kevent;
303 	char __user *start;
304 	int ret;
305 	DEFINE_WAIT(wait);
306 
307 	start = buf;
308 	group = file->private_data;
309 
310 	pr_debug("%s: group=%p\n", __func__, group);
311 
312 	while (1) {
313 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
314 
315 		mutex_lock(&group->notification_mutex);
316 		kevent = get_one_event(group, count);
317 		mutex_unlock(&group->notification_mutex);
318 
319 		if (kevent) {
320 			ret = PTR_ERR(kevent);
321 			if (IS_ERR(kevent))
322 				break;
323 			ret = copy_event_to_user(group, kevent, buf);
324 			fsnotify_put_event(kevent);
325 			if (ret < 0)
326 				break;
327 			buf += ret;
328 			count -= ret;
329 			continue;
330 		}
331 
332 		ret = -EAGAIN;
333 		if (file->f_flags & O_NONBLOCK)
334 			break;
335 		ret = -ERESTARTSYS;
336 		if (signal_pending(current))
337 			break;
338 
339 		if (start != buf)
340 			break;
341 
342 		schedule();
343 	}
344 
345 	finish_wait(&group->notification_waitq, &wait);
346 	if (start != buf && ret != -EFAULT)
347 		ret = buf - start;
348 	return ret;
349 }
350 
351 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
352 {
353 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
354 	struct fanotify_response response = { .fd = -1, .response = -1 };
355 	struct fsnotify_group *group;
356 	int ret;
357 
358 	group = file->private_data;
359 
360 	if (count > sizeof(response))
361 		count = sizeof(response);
362 
363 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
364 
365 	if (copy_from_user(&response, buf, count))
366 		return -EFAULT;
367 
368 	ret = process_access_response(group, &response);
369 	if (ret < 0)
370 		count = ret;
371 
372 	return count;
373 #else
374 	return -EINVAL;
375 #endif
376 }
377 
378 static int fanotify_release(struct inode *ignored, struct file *file)
379 {
380 	struct fsnotify_group *group = file->private_data;
381 
382 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
383 	struct fanotify_response_event *re, *lre;
384 
385 	mutex_lock(&group->fanotify_data.access_mutex);
386 
387 	atomic_inc(&group->fanotify_data.bypass_perm);
388 
389 	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
390 		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
391 			 re, re->event);
392 
393 		list_del_init(&re->list);
394 		re->event->response = FAN_ALLOW;
395 
396 		kmem_cache_free(fanotify_response_event_cache, re);
397 	}
398 	mutex_unlock(&group->fanotify_data.access_mutex);
399 
400 	wake_up(&group->fanotify_data.access_waitq);
401 #endif
402 
403 	/* matches the fanotify_init->fsnotify_alloc_group */
404 	fsnotify_destroy_group(group);
405 
406 	return 0;
407 }
408 
409 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
410 {
411 	struct fsnotify_group *group;
412 	struct fsnotify_event_holder *holder;
413 	void __user *p;
414 	int ret = -ENOTTY;
415 	size_t send_len = 0;
416 
417 	group = file->private_data;
418 
419 	p = (void __user *) arg;
420 
421 	switch (cmd) {
422 	case FIONREAD:
423 		mutex_lock(&group->notification_mutex);
424 		list_for_each_entry(holder, &group->notification_list, event_list)
425 			send_len += FAN_EVENT_METADATA_LEN;
426 		mutex_unlock(&group->notification_mutex);
427 		ret = put_user(send_len, (int __user *) p);
428 		break;
429 	}
430 
431 	return ret;
432 }
433 
434 static const struct file_operations fanotify_fops = {
435 	.show_fdinfo	= fanotify_show_fdinfo,
436 	.poll		= fanotify_poll,
437 	.read		= fanotify_read,
438 	.write		= fanotify_write,
439 	.fasync		= NULL,
440 	.release	= fanotify_release,
441 	.unlocked_ioctl	= fanotify_ioctl,
442 	.compat_ioctl	= fanotify_ioctl,
443 	.llseek		= noop_llseek,
444 };
445 
446 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
447 {
448 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
449 }
450 
451 static int fanotify_find_path(int dfd, const char __user *filename,
452 			      struct path *path, unsigned int flags)
453 {
454 	int ret;
455 
456 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
457 		 dfd, filename, flags);
458 
459 	if (filename == NULL) {
460 		struct fd f = fdget(dfd);
461 
462 		ret = -EBADF;
463 		if (!f.file)
464 			goto out;
465 
466 		ret = -ENOTDIR;
467 		if ((flags & FAN_MARK_ONLYDIR) &&
468 		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
469 			fdput(f);
470 			goto out;
471 		}
472 
473 		*path = f.file->f_path;
474 		path_get(path);
475 		fdput(f);
476 	} else {
477 		unsigned int lookup_flags = 0;
478 
479 		if (!(flags & FAN_MARK_DONT_FOLLOW))
480 			lookup_flags |= LOOKUP_FOLLOW;
481 		if (flags & FAN_MARK_ONLYDIR)
482 			lookup_flags |= LOOKUP_DIRECTORY;
483 
484 		ret = user_path_at(dfd, filename, lookup_flags, path);
485 		if (ret)
486 			goto out;
487 	}
488 
489 	/* you can only watch an inode if you have read permissions on it */
490 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
491 	if (ret)
492 		path_put(path);
493 out:
494 	return ret;
495 }
496 
497 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
498 					    __u32 mask,
499 					    unsigned int flags,
500 					    int *destroy)
501 {
502 	__u32 oldmask;
503 
504 	spin_lock(&fsn_mark->lock);
505 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
506 		oldmask = fsn_mark->mask;
507 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
508 	} else {
509 		oldmask = fsn_mark->ignored_mask;
510 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
511 	}
512 	spin_unlock(&fsn_mark->lock);
513 
514 	*destroy = !(oldmask & ~mask);
515 
516 	return mask & oldmask;
517 }
518 
519 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
520 					 struct vfsmount *mnt, __u32 mask,
521 					 unsigned int flags)
522 {
523 	struct fsnotify_mark *fsn_mark = NULL;
524 	__u32 removed;
525 	int destroy_mark;
526 
527 	mutex_lock(&group->mark_mutex);
528 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
529 	if (!fsn_mark) {
530 		mutex_unlock(&group->mark_mutex);
531 		return -ENOENT;
532 	}
533 
534 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
535 						 &destroy_mark);
536 	if (destroy_mark)
537 		fsnotify_destroy_mark_locked(fsn_mark, group);
538 	mutex_unlock(&group->mark_mutex);
539 
540 	fsnotify_put_mark(fsn_mark);
541 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
542 		fsnotify_recalc_vfsmount_mask(mnt);
543 
544 	return 0;
545 }
546 
547 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
548 				      struct inode *inode, __u32 mask,
549 				      unsigned int flags)
550 {
551 	struct fsnotify_mark *fsn_mark = NULL;
552 	__u32 removed;
553 	int destroy_mark;
554 
555 	mutex_lock(&group->mark_mutex);
556 	fsn_mark = fsnotify_find_inode_mark(group, inode);
557 	if (!fsn_mark) {
558 		mutex_unlock(&group->mark_mutex);
559 		return -ENOENT;
560 	}
561 
562 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
563 						 &destroy_mark);
564 	if (destroy_mark)
565 		fsnotify_destroy_mark_locked(fsn_mark, group);
566 	mutex_unlock(&group->mark_mutex);
567 
568 	/* matches the fsnotify_find_inode_mark() */
569 	fsnotify_put_mark(fsn_mark);
570 	if (removed & inode->i_fsnotify_mask)
571 		fsnotify_recalc_inode_mask(inode);
572 
573 	return 0;
574 }
575 
576 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
577 				       __u32 mask,
578 				       unsigned int flags)
579 {
580 	__u32 oldmask = -1;
581 
582 	spin_lock(&fsn_mark->lock);
583 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
584 		oldmask = fsn_mark->mask;
585 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
586 	} else {
587 		__u32 tmask = fsn_mark->ignored_mask | mask;
588 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
589 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
590 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
591 	}
592 
593 	if (!(flags & FAN_MARK_ONDIR)) {
594 		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
595 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
596 	}
597 
598 	spin_unlock(&fsn_mark->lock);
599 
600 	return mask & ~oldmask;
601 }
602 
603 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
604 						   struct inode *inode,
605 						   struct vfsmount *mnt)
606 {
607 	struct fsnotify_mark *mark;
608 	int ret;
609 
610 	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
611 		return ERR_PTR(-ENOSPC);
612 
613 	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
614 	if (!mark)
615 		return ERR_PTR(-ENOMEM);
616 
617 	fsnotify_init_mark(mark, fanotify_free_mark);
618 	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
619 	if (ret) {
620 		fsnotify_put_mark(mark);
621 		return ERR_PTR(ret);
622 	}
623 
624 	return mark;
625 }
626 
627 
628 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
629 				      struct vfsmount *mnt, __u32 mask,
630 				      unsigned int flags)
631 {
632 	struct fsnotify_mark *fsn_mark;
633 	__u32 added;
634 
635 	mutex_lock(&group->mark_mutex);
636 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
637 	if (!fsn_mark) {
638 		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
639 		if (IS_ERR(fsn_mark)) {
640 			mutex_unlock(&group->mark_mutex);
641 			return PTR_ERR(fsn_mark);
642 		}
643 	}
644 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
645 	mutex_unlock(&group->mark_mutex);
646 
647 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
648 		fsnotify_recalc_vfsmount_mask(mnt);
649 
650 	fsnotify_put_mark(fsn_mark);
651 	return 0;
652 }
653 
654 static int fanotify_add_inode_mark(struct fsnotify_group *group,
655 				   struct inode *inode, __u32 mask,
656 				   unsigned int flags)
657 {
658 	struct fsnotify_mark *fsn_mark;
659 	__u32 added;
660 
661 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
662 
663 	/*
664 	 * If some other task has this inode open for write we should not add
665 	 * an ignored mark, unless that ignored mark is supposed to survive
666 	 * modification changes anyway.
667 	 */
668 	if ((flags & FAN_MARK_IGNORED_MASK) &&
669 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
670 	    (atomic_read(&inode->i_writecount) > 0))
671 		return 0;
672 
673 	mutex_lock(&group->mark_mutex);
674 	fsn_mark = fsnotify_find_inode_mark(group, inode);
675 	if (!fsn_mark) {
676 		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
677 		if (IS_ERR(fsn_mark)) {
678 			mutex_unlock(&group->mark_mutex);
679 			return PTR_ERR(fsn_mark);
680 		}
681 	}
682 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
683 	mutex_unlock(&group->mark_mutex);
684 
685 	if (added & ~inode->i_fsnotify_mask)
686 		fsnotify_recalc_inode_mask(inode);
687 
688 	fsnotify_put_mark(fsn_mark);
689 	return 0;
690 }
691 
692 /* fanotify syscalls */
693 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
694 {
695 	struct fsnotify_group *group;
696 	int f_flags, fd;
697 	struct user_struct *user;
698 
699 	pr_debug("%s: flags=%d event_f_flags=%d\n",
700 		__func__, flags, event_f_flags);
701 
702 	if (!capable(CAP_SYS_ADMIN))
703 		return -EPERM;
704 
705 	if (flags & ~FAN_ALL_INIT_FLAGS)
706 		return -EINVAL;
707 
708 	user = get_current_user();
709 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
710 		free_uid(user);
711 		return -EMFILE;
712 	}
713 
714 	f_flags = O_RDWR | FMODE_NONOTIFY;
715 	if (flags & FAN_CLOEXEC)
716 		f_flags |= O_CLOEXEC;
717 	if (flags & FAN_NONBLOCK)
718 		f_flags |= O_NONBLOCK;
719 
720 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
721 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
722 	if (IS_ERR(group)) {
723 		free_uid(user);
724 		return PTR_ERR(group);
725 	}
726 
727 	group->fanotify_data.user = user;
728 	atomic_inc(&user->fanotify_listeners);
729 
730 	group->fanotify_data.f_flags = event_f_flags;
731 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
732 	mutex_init(&group->fanotify_data.access_mutex);
733 	init_waitqueue_head(&group->fanotify_data.access_waitq);
734 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
735 	atomic_set(&group->fanotify_data.bypass_perm, 0);
736 #endif
737 	switch (flags & FAN_ALL_CLASS_BITS) {
738 	case FAN_CLASS_NOTIF:
739 		group->priority = FS_PRIO_0;
740 		break;
741 	case FAN_CLASS_CONTENT:
742 		group->priority = FS_PRIO_1;
743 		break;
744 	case FAN_CLASS_PRE_CONTENT:
745 		group->priority = FS_PRIO_2;
746 		break;
747 	default:
748 		fd = -EINVAL;
749 		goto out_destroy_group;
750 	}
751 
752 	if (flags & FAN_UNLIMITED_QUEUE) {
753 		fd = -EPERM;
754 		if (!capable(CAP_SYS_ADMIN))
755 			goto out_destroy_group;
756 		group->max_events = UINT_MAX;
757 	} else {
758 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
759 	}
760 
761 	if (flags & FAN_UNLIMITED_MARKS) {
762 		fd = -EPERM;
763 		if (!capable(CAP_SYS_ADMIN))
764 			goto out_destroy_group;
765 		group->fanotify_data.max_marks = UINT_MAX;
766 	} else {
767 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
768 	}
769 
770 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
771 	if (fd < 0)
772 		goto out_destroy_group;
773 
774 	return fd;
775 
776 out_destroy_group:
777 	fsnotify_destroy_group(group);
778 	return fd;
779 }
780 
781 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
782 			      __u64, mask, int, dfd,
783 			      const char  __user *, pathname)
784 {
785 	struct inode *inode = NULL;
786 	struct vfsmount *mnt = NULL;
787 	struct fsnotify_group *group;
788 	struct fd f;
789 	struct path path;
790 	int ret;
791 
792 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
793 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
794 
795 	/* we only use the lower 32 bits as of right now. */
796 	if (mask & ((__u64)0xffffffff << 32))
797 		return -EINVAL;
798 
799 	if (flags & ~FAN_ALL_MARK_FLAGS)
800 		return -EINVAL;
801 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
802 	case FAN_MARK_ADD:		/* fallthrough */
803 	case FAN_MARK_REMOVE:
804 		if (!mask)
805 			return -EINVAL;
806 	case FAN_MARK_FLUSH:
807 		break;
808 	default:
809 		return -EINVAL;
810 	}
811 
812 	if (mask & FAN_ONDIR) {
813 		flags |= FAN_MARK_ONDIR;
814 		mask &= ~FAN_ONDIR;
815 	}
816 
817 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
818 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
819 #else
820 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
821 #endif
822 		return -EINVAL;
823 
824 	f = fdget(fanotify_fd);
825 	if (unlikely(!f.file))
826 		return -EBADF;
827 
828 	/* verify that this is indeed an fanotify instance */
829 	ret = -EINVAL;
830 	if (unlikely(f.file->f_op != &fanotify_fops))
831 		goto fput_and_out;
832 	group = f.file->private_data;
833 
834 	/*
835 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
836 	 * allowed to set permissions events.
837 	 */
838 	ret = -EINVAL;
839 	if (mask & FAN_ALL_PERM_EVENTS &&
840 	    group->priority == FS_PRIO_0)
841 		goto fput_and_out;
842 
843 	ret = fanotify_find_path(dfd, pathname, &path, flags);
844 	if (ret)
845 		goto fput_and_out;
846 
847 	/* inode held in place by reference to path; group by fget on fd */
848 	if (!(flags & FAN_MARK_MOUNT))
849 		inode = path.dentry->d_inode;
850 	else
851 		mnt = path.mnt;
852 
853 	/* create/update an inode mark */
854 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
855 	case FAN_MARK_ADD:
856 		if (flags & FAN_MARK_MOUNT)
857 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
858 		else
859 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
860 		break;
861 	case FAN_MARK_REMOVE:
862 		if (flags & FAN_MARK_MOUNT)
863 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
864 		else
865 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
866 		break;
867 	case FAN_MARK_FLUSH:
868 		if (flags & FAN_MARK_MOUNT)
869 			fsnotify_clear_vfsmount_marks_by_group(group);
870 		else
871 			fsnotify_clear_inode_marks_by_group(group);
872 		break;
873 	default:
874 		ret = -EINVAL;
875 	}
876 
877 	path_put(&path);
878 fput_and_out:
879 	fdput(f);
880 	return ret;
881 }
882 
883 #ifdef CONFIG_COMPAT
884 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
885 				int, fanotify_fd, unsigned int, flags,
886 				__u32, mask0, __u32, mask1, int, dfd,
887 				const char  __user *, pathname)
888 {
889 	return sys_fanotify_mark(fanotify_fd, flags,
890 #ifdef __BIG_ENDIAN
891 				((__u64)mask1 << 32) | mask0,
892 #else
893 				((__u64)mask0 << 32) | mask1,
894 #endif
895 				 dfd, pathname);
896 }
897 #endif
898 
899 /*
900  * fanotify_user_setup - Our initialization function.  Note that we cannot return
901  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
902  * must result in panic().
903  */
904 static int __init fanotify_user_setup(void)
905 {
906 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
907 	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
908 						   SLAB_PANIC);
909 
910 	return 0;
911 }
912 device_initcall(fanotify_user_setup);
913