xref: /openbmc/linux/fs/notify/fanotify/fanotify_user.c (revision ce932d0c5589e9766e089c22c66890dfc48fbd94)
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ioctls.h>
18 
19 #include "../../mount.h"
20 
21 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
22 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
23 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
24 
25 extern const struct fsnotify_ops fanotify_fsnotify_ops;
26 
27 static struct kmem_cache *fanotify_mark_cache __read_mostly;
28 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
29 
30 struct fanotify_response_event {
31 	struct list_head list;
32 	__s32 fd;
33 	struct fsnotify_event *event;
34 };
35 
36 /*
37  * Get an fsnotify notification event if one exists and is small
38  * enough to fit in "count". Return an error pointer if the count
39  * is not large enough.
40  *
41  * Called with the group->notification_mutex held.
42  */
43 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
44 					    size_t count)
45 {
46 	BUG_ON(!mutex_is_locked(&group->notification_mutex));
47 
48 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
49 
50 	if (fsnotify_notify_queue_is_empty(group))
51 		return NULL;
52 
53 	if (FAN_EVENT_METADATA_LEN > count)
54 		return ERR_PTR(-EINVAL);
55 
56 	/* held the notification_mutex the whole time, so this is the
57 	 * same event we peeked above */
58 	return fsnotify_remove_notify_event(group);
59 }
60 
61 static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
62 {
63 	int client_fd;
64 	struct dentry *dentry;
65 	struct vfsmount *mnt;
66 	struct file *new_file;
67 
68 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
69 
70 	client_fd = get_unused_fd();
71 	if (client_fd < 0)
72 		return client_fd;
73 
74 	if (event->data_type != FSNOTIFY_EVENT_PATH) {
75 		WARN_ON(1);
76 		put_unused_fd(client_fd);
77 		return -EINVAL;
78 	}
79 
80 	/*
81 	 * we need a new file handle for the userspace program so it can read even if it was
82 	 * originally opened O_WRONLY.
83 	 */
84 	dentry = dget(event->path.dentry);
85 	mnt = mntget(event->path.mnt);
86 	/* it's possible this event was an overflow event.  in that case dentry and mnt
87 	 * are NULL;  That's fine, just don't call dentry open */
88 	if (dentry && mnt)
89 		new_file = dentry_open(dentry, mnt,
90 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
91 				       current_cred());
92 	else
93 		new_file = ERR_PTR(-EOVERFLOW);
94 	if (IS_ERR(new_file)) {
95 		/*
96 		 * we still send an event even if we can't open the file.  this
97 		 * can happen when say tasks are gone and we try to open their
98 		 * /proc files or we try to open a WRONLY file like in sysfs
99 		 * we just send the errno to userspace since there isn't much
100 		 * else we can do.
101 		 */
102 		put_unused_fd(client_fd);
103 		client_fd = PTR_ERR(new_file);
104 	} else {
105 		fd_install(client_fd, new_file);
106 	}
107 
108 	return client_fd;
109 }
110 
111 static int fill_event_metadata(struct fsnotify_group *group,
112 				   struct fanotify_event_metadata *metadata,
113 				   struct fsnotify_event *event)
114 {
115 	int ret = 0;
116 
117 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
118 		 group, metadata, event);
119 
120 	metadata->event_len = FAN_EVENT_METADATA_LEN;
121 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
122 	metadata->vers = FANOTIFY_METADATA_VERSION;
123 	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
124 	metadata->pid = pid_vnr(event->tgid);
125 	if (unlikely(event->mask & FAN_Q_OVERFLOW))
126 		metadata->fd = FAN_NOFD;
127 	else {
128 		metadata->fd = create_fd(group, event);
129 		if (metadata->fd < 0)
130 			ret = metadata->fd;
131 	}
132 
133 	return ret;
134 }
135 
136 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
137 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
138 						  __s32 fd)
139 {
140 	struct fanotify_response_event *re, *return_re = NULL;
141 
142 	mutex_lock(&group->fanotify_data.access_mutex);
143 	list_for_each_entry(re, &group->fanotify_data.access_list, list) {
144 		if (re->fd != fd)
145 			continue;
146 
147 		list_del_init(&re->list);
148 		return_re = re;
149 		break;
150 	}
151 	mutex_unlock(&group->fanotify_data.access_mutex);
152 
153 	pr_debug("%s: found return_re=%p\n", __func__, return_re);
154 
155 	return return_re;
156 }
157 
158 static int process_access_response(struct fsnotify_group *group,
159 				   struct fanotify_response *response_struct)
160 {
161 	struct fanotify_response_event *re;
162 	__s32 fd = response_struct->fd;
163 	__u32 response = response_struct->response;
164 
165 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
166 		 fd, response);
167 	/*
168 	 * make sure the response is valid, if invalid we do nothing and either
169 	 * userspace can send a valid response or we will clean it up after the
170 	 * timeout
171 	 */
172 	switch (response) {
173 	case FAN_ALLOW:
174 	case FAN_DENY:
175 		break;
176 	default:
177 		return -EINVAL;
178 	}
179 
180 	if (fd < 0)
181 		return -EINVAL;
182 
183 	re = dequeue_re(group, fd);
184 	if (!re)
185 		return -ENOENT;
186 
187 	re->event->response = response;
188 
189 	wake_up(&group->fanotify_data.access_waitq);
190 
191 	kmem_cache_free(fanotify_response_event_cache, re);
192 
193 	return 0;
194 }
195 
196 static int prepare_for_access_response(struct fsnotify_group *group,
197 				       struct fsnotify_event *event,
198 				       __s32 fd)
199 {
200 	struct fanotify_response_event *re;
201 
202 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
203 		return 0;
204 
205 	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
206 	if (!re)
207 		return -ENOMEM;
208 
209 	re->event = event;
210 	re->fd = fd;
211 
212 	mutex_lock(&group->fanotify_data.access_mutex);
213 
214 	if (atomic_read(&group->fanotify_data.bypass_perm)) {
215 		mutex_unlock(&group->fanotify_data.access_mutex);
216 		kmem_cache_free(fanotify_response_event_cache, re);
217 		event->response = FAN_ALLOW;
218 		return 0;
219 	}
220 
221 	list_add_tail(&re->list, &group->fanotify_data.access_list);
222 	mutex_unlock(&group->fanotify_data.access_mutex);
223 
224 	return 0;
225 }
226 
227 static void remove_access_response(struct fsnotify_group *group,
228 				   struct fsnotify_event *event,
229 				   __s32 fd)
230 {
231 	struct fanotify_response_event *re;
232 
233 	if (!(event->mask & FAN_ALL_PERM_EVENTS))
234 		return;
235 
236 	re = dequeue_re(group, fd);
237 	if (!re)
238 		return;
239 
240 	BUG_ON(re->event != event);
241 
242 	kmem_cache_free(fanotify_response_event_cache, re);
243 
244 	return;
245 }
246 #else
247 static int prepare_for_access_response(struct fsnotify_group *group,
248 				       struct fsnotify_event *event,
249 				       __s32 fd)
250 {
251 	return 0;
252 }
253 
254 static void remove_access_response(struct fsnotify_group *group,
255 				   struct fsnotify_event *event,
256 				   __s32 fd)
257 {
258 	return;
259 }
260 #endif
261 
262 static ssize_t copy_event_to_user(struct fsnotify_group *group,
263 				  struct fsnotify_event *event,
264 				  char __user *buf)
265 {
266 	struct fanotify_event_metadata fanotify_event_metadata;
267 	int fd, ret;
268 
269 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
270 
271 	ret = fill_event_metadata(group, &fanotify_event_metadata, event);
272 	if (ret < 0)
273 		goto out;
274 
275 	fd = fanotify_event_metadata.fd;
276 	ret = prepare_for_access_response(group, event, fd);
277 	if (ret)
278 		goto out_close_fd;
279 
280 	ret = -EFAULT;
281 	if (copy_to_user(buf, &fanotify_event_metadata,
282 			 fanotify_event_metadata.event_len))
283 		goto out_kill_access_response;
284 
285 	return fanotify_event_metadata.event_len;
286 
287 out_kill_access_response:
288 	remove_access_response(group, event, fd);
289 out_close_fd:
290 	if (fd != FAN_NOFD)
291 		sys_close(fd);
292 out:
293 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
294 	if (event->mask & FAN_ALL_PERM_EVENTS) {
295 		event->response = FAN_DENY;
296 		wake_up(&group->fanotify_data.access_waitq);
297 	}
298 #endif
299 	return ret;
300 }
301 
302 /* intofiy userspace file descriptor functions */
303 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
304 {
305 	struct fsnotify_group *group = file->private_data;
306 	int ret = 0;
307 
308 	poll_wait(file, &group->notification_waitq, wait);
309 	mutex_lock(&group->notification_mutex);
310 	if (!fsnotify_notify_queue_is_empty(group))
311 		ret = POLLIN | POLLRDNORM;
312 	mutex_unlock(&group->notification_mutex);
313 
314 	return ret;
315 }
316 
317 static ssize_t fanotify_read(struct file *file, char __user *buf,
318 			     size_t count, loff_t *pos)
319 {
320 	struct fsnotify_group *group;
321 	struct fsnotify_event *kevent;
322 	char __user *start;
323 	int ret;
324 	DEFINE_WAIT(wait);
325 
326 	start = buf;
327 	group = file->private_data;
328 
329 	pr_debug("%s: group=%p\n", __func__, group);
330 
331 	while (1) {
332 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
333 
334 		mutex_lock(&group->notification_mutex);
335 		kevent = get_one_event(group, count);
336 		mutex_unlock(&group->notification_mutex);
337 
338 		if (kevent) {
339 			ret = PTR_ERR(kevent);
340 			if (IS_ERR(kevent))
341 				break;
342 			ret = copy_event_to_user(group, kevent, buf);
343 			fsnotify_put_event(kevent);
344 			if (ret < 0)
345 				break;
346 			buf += ret;
347 			count -= ret;
348 			continue;
349 		}
350 
351 		ret = -EAGAIN;
352 		if (file->f_flags & O_NONBLOCK)
353 			break;
354 		ret = -ERESTARTSYS;
355 		if (signal_pending(current))
356 			break;
357 
358 		if (start != buf)
359 			break;
360 
361 		schedule();
362 	}
363 
364 	finish_wait(&group->notification_waitq, &wait);
365 	if (start != buf && ret != -EFAULT)
366 		ret = buf - start;
367 	return ret;
368 }
369 
370 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
371 {
372 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
373 	struct fanotify_response response = { .fd = -1, .response = -1 };
374 	struct fsnotify_group *group;
375 	int ret;
376 
377 	group = file->private_data;
378 
379 	if (count > sizeof(response))
380 		count = sizeof(response);
381 
382 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
383 
384 	if (copy_from_user(&response, buf, count))
385 		return -EFAULT;
386 
387 	ret = process_access_response(group, &response);
388 	if (ret < 0)
389 		count = ret;
390 
391 	return count;
392 #else
393 	return -EINVAL;
394 #endif
395 }
396 
397 static int fanotify_release(struct inode *ignored, struct file *file)
398 {
399 	struct fsnotify_group *group = file->private_data;
400 
401 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
402 	struct fanotify_response_event *re, *lre;
403 
404 	mutex_lock(&group->fanotify_data.access_mutex);
405 
406 	atomic_inc(&group->fanotify_data.bypass_perm);
407 
408 	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
409 		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
410 			 re, re->event);
411 
412 		list_del_init(&re->list);
413 		re->event->response = FAN_ALLOW;
414 
415 		kmem_cache_free(fanotify_response_event_cache, re);
416 	}
417 	mutex_unlock(&group->fanotify_data.access_mutex);
418 
419 	wake_up(&group->fanotify_data.access_waitq);
420 #endif
421 	/* matches the fanotify_init->fsnotify_alloc_group */
422 	fsnotify_put_group(group);
423 
424 	return 0;
425 }
426 
427 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
428 {
429 	struct fsnotify_group *group;
430 	struct fsnotify_event_holder *holder;
431 	void __user *p;
432 	int ret = -ENOTTY;
433 	size_t send_len = 0;
434 
435 	group = file->private_data;
436 
437 	p = (void __user *) arg;
438 
439 	switch (cmd) {
440 	case FIONREAD:
441 		mutex_lock(&group->notification_mutex);
442 		list_for_each_entry(holder, &group->notification_list, event_list)
443 			send_len += FAN_EVENT_METADATA_LEN;
444 		mutex_unlock(&group->notification_mutex);
445 		ret = put_user(send_len, (int __user *) p);
446 		break;
447 	}
448 
449 	return ret;
450 }
451 
452 static const struct file_operations fanotify_fops = {
453 	.poll		= fanotify_poll,
454 	.read		= fanotify_read,
455 	.write		= fanotify_write,
456 	.fasync		= NULL,
457 	.release	= fanotify_release,
458 	.unlocked_ioctl	= fanotify_ioctl,
459 	.compat_ioctl	= fanotify_ioctl,
460 	.llseek		= noop_llseek,
461 };
462 
463 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
464 {
465 	kmem_cache_free(fanotify_mark_cache, fsn_mark);
466 }
467 
468 static int fanotify_find_path(int dfd, const char __user *filename,
469 			      struct path *path, unsigned int flags)
470 {
471 	int ret;
472 
473 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
474 		 dfd, filename, flags);
475 
476 	if (filename == NULL) {
477 		struct file *file;
478 		int fput_needed;
479 
480 		ret = -EBADF;
481 		file = fget_light(dfd, &fput_needed);
482 		if (!file)
483 			goto out;
484 
485 		ret = -ENOTDIR;
486 		if ((flags & FAN_MARK_ONLYDIR) &&
487 		    !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
488 			fput_light(file, fput_needed);
489 			goto out;
490 		}
491 
492 		*path = file->f_path;
493 		path_get(path);
494 		fput_light(file, fput_needed);
495 	} else {
496 		unsigned int lookup_flags = 0;
497 
498 		if (!(flags & FAN_MARK_DONT_FOLLOW))
499 			lookup_flags |= LOOKUP_FOLLOW;
500 		if (flags & FAN_MARK_ONLYDIR)
501 			lookup_flags |= LOOKUP_DIRECTORY;
502 
503 		ret = user_path_at(dfd, filename, lookup_flags, path);
504 		if (ret)
505 			goto out;
506 	}
507 
508 	/* you can only watch an inode if you have read permissions on it */
509 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
510 	if (ret)
511 		path_put(path);
512 out:
513 	return ret;
514 }
515 
516 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
517 					    __u32 mask,
518 					    unsigned int flags)
519 {
520 	__u32 oldmask;
521 
522 	spin_lock(&fsn_mark->lock);
523 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
524 		oldmask = fsn_mark->mask;
525 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
526 	} else {
527 		oldmask = fsn_mark->ignored_mask;
528 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
529 	}
530 	spin_unlock(&fsn_mark->lock);
531 
532 	if (!(oldmask & ~mask))
533 		fsnotify_destroy_mark(fsn_mark);
534 
535 	return mask & oldmask;
536 }
537 
538 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
539 					 struct vfsmount *mnt, __u32 mask,
540 					 unsigned int flags)
541 {
542 	struct fsnotify_mark *fsn_mark = NULL;
543 	__u32 removed;
544 
545 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
546 	if (!fsn_mark)
547 		return -ENOENT;
548 
549 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
550 	fsnotify_put_mark(fsn_mark);
551 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
552 		fsnotify_recalc_vfsmount_mask(mnt);
553 
554 	return 0;
555 }
556 
557 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
558 				      struct inode *inode, __u32 mask,
559 				      unsigned int flags)
560 {
561 	struct fsnotify_mark *fsn_mark = NULL;
562 	__u32 removed;
563 
564 	fsn_mark = fsnotify_find_inode_mark(group, inode);
565 	if (!fsn_mark)
566 		return -ENOENT;
567 
568 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
569 	/* matches the fsnotify_find_inode_mark() */
570 	fsnotify_put_mark(fsn_mark);
571 	if (removed & inode->i_fsnotify_mask)
572 		fsnotify_recalc_inode_mask(inode);
573 
574 	return 0;
575 }
576 
577 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
578 				       __u32 mask,
579 				       unsigned int flags)
580 {
581 	__u32 oldmask = -1;
582 
583 	spin_lock(&fsn_mark->lock);
584 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
585 		oldmask = fsn_mark->mask;
586 		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
587 	} else {
588 		__u32 tmask = fsn_mark->ignored_mask | mask;
589 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
590 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
591 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
592 	}
593 
594 	if (!(flags & FAN_MARK_ONDIR)) {
595 		__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
596 		fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
597 	}
598 
599 	spin_unlock(&fsn_mark->lock);
600 
601 	return mask & ~oldmask;
602 }
603 
604 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
605 				      struct vfsmount *mnt, __u32 mask,
606 				      unsigned int flags)
607 {
608 	struct fsnotify_mark *fsn_mark;
609 	__u32 added;
610 	int ret = 0;
611 
612 	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
613 	if (!fsn_mark) {
614 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
615 			return -ENOSPC;
616 
617 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
618 		if (!fsn_mark)
619 			return -ENOMEM;
620 
621 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
622 		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
623 		if (ret)
624 			goto err;
625 	}
626 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
627 
628 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
629 		fsnotify_recalc_vfsmount_mask(mnt);
630 err:
631 	fsnotify_put_mark(fsn_mark);
632 	return ret;
633 }
634 
635 static int fanotify_add_inode_mark(struct fsnotify_group *group,
636 				   struct inode *inode, __u32 mask,
637 				   unsigned int flags)
638 {
639 	struct fsnotify_mark *fsn_mark;
640 	__u32 added;
641 	int ret = 0;
642 
643 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
644 
645 	/*
646 	 * If some other task has this inode open for write we should not add
647 	 * an ignored mark, unless that ignored mark is supposed to survive
648 	 * modification changes anyway.
649 	 */
650 	if ((flags & FAN_MARK_IGNORED_MASK) &&
651 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
652 	    (atomic_read(&inode->i_writecount) > 0))
653 		return 0;
654 
655 	fsn_mark = fsnotify_find_inode_mark(group, inode);
656 	if (!fsn_mark) {
657 		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
658 			return -ENOSPC;
659 
660 		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
661 		if (!fsn_mark)
662 			return -ENOMEM;
663 
664 		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
665 		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
666 		if (ret)
667 			goto err;
668 	}
669 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
670 
671 	if (added & ~inode->i_fsnotify_mask)
672 		fsnotify_recalc_inode_mask(inode);
673 err:
674 	fsnotify_put_mark(fsn_mark);
675 	return ret;
676 }
677 
678 /* fanotify syscalls */
679 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
680 {
681 	struct fsnotify_group *group;
682 	int f_flags, fd;
683 	struct user_struct *user;
684 
685 	pr_debug("%s: flags=%d event_f_flags=%d\n",
686 		__func__, flags, event_f_flags);
687 
688 	if (!capable(CAP_SYS_ADMIN))
689 		return -EPERM;
690 
691 	if (flags & ~FAN_ALL_INIT_FLAGS)
692 		return -EINVAL;
693 
694 	user = get_current_user();
695 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
696 		free_uid(user);
697 		return -EMFILE;
698 	}
699 
700 	f_flags = O_RDWR | FMODE_NONOTIFY;
701 	if (flags & FAN_CLOEXEC)
702 		f_flags |= O_CLOEXEC;
703 	if (flags & FAN_NONBLOCK)
704 		f_flags |= O_NONBLOCK;
705 
706 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
707 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
708 	if (IS_ERR(group)) {
709 		free_uid(user);
710 		return PTR_ERR(group);
711 	}
712 
713 	group->fanotify_data.user = user;
714 	atomic_inc(&user->fanotify_listeners);
715 
716 	group->fanotify_data.f_flags = event_f_flags;
717 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
718 	mutex_init(&group->fanotify_data.access_mutex);
719 	init_waitqueue_head(&group->fanotify_data.access_waitq);
720 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
721 	atomic_set(&group->fanotify_data.bypass_perm, 0);
722 #endif
723 	switch (flags & FAN_ALL_CLASS_BITS) {
724 	case FAN_CLASS_NOTIF:
725 		group->priority = FS_PRIO_0;
726 		break;
727 	case FAN_CLASS_CONTENT:
728 		group->priority = FS_PRIO_1;
729 		break;
730 	case FAN_CLASS_PRE_CONTENT:
731 		group->priority = FS_PRIO_2;
732 		break;
733 	default:
734 		fd = -EINVAL;
735 		goto out_put_group;
736 	}
737 
738 	if (flags & FAN_UNLIMITED_QUEUE) {
739 		fd = -EPERM;
740 		if (!capable(CAP_SYS_ADMIN))
741 			goto out_put_group;
742 		group->max_events = UINT_MAX;
743 	} else {
744 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
745 	}
746 
747 	if (flags & FAN_UNLIMITED_MARKS) {
748 		fd = -EPERM;
749 		if (!capable(CAP_SYS_ADMIN))
750 			goto out_put_group;
751 		group->fanotify_data.max_marks = UINT_MAX;
752 	} else {
753 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
754 	}
755 
756 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
757 	if (fd < 0)
758 		goto out_put_group;
759 
760 	return fd;
761 
762 out_put_group:
763 	fsnotify_put_group(group);
764 	return fd;
765 }
766 
767 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
768 			      __u64 mask, int dfd,
769 			      const char  __user * pathname)
770 {
771 	struct inode *inode = NULL;
772 	struct vfsmount *mnt = NULL;
773 	struct fsnotify_group *group;
774 	struct file *filp;
775 	struct path path;
776 	int ret, fput_needed;
777 
778 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
779 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
780 
781 	/* we only use the lower 32 bits as of right now. */
782 	if (mask & ((__u64)0xffffffff << 32))
783 		return -EINVAL;
784 
785 	if (flags & ~FAN_ALL_MARK_FLAGS)
786 		return -EINVAL;
787 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
788 	case FAN_MARK_ADD:		/* fallthrough */
789 	case FAN_MARK_REMOVE:
790 		if (!mask)
791 			return -EINVAL;
792 	case FAN_MARK_FLUSH:
793 		break;
794 	default:
795 		return -EINVAL;
796 	}
797 
798 	if (mask & FAN_ONDIR) {
799 		flags |= FAN_MARK_ONDIR;
800 		mask &= ~FAN_ONDIR;
801 	}
802 
803 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
804 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
805 #else
806 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
807 #endif
808 		return -EINVAL;
809 
810 	filp = fget_light(fanotify_fd, &fput_needed);
811 	if (unlikely(!filp))
812 		return -EBADF;
813 
814 	/* verify that this is indeed an fanotify instance */
815 	ret = -EINVAL;
816 	if (unlikely(filp->f_op != &fanotify_fops))
817 		goto fput_and_out;
818 	group = filp->private_data;
819 
820 	/*
821 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
822 	 * allowed to set permissions events.
823 	 */
824 	ret = -EINVAL;
825 	if (mask & FAN_ALL_PERM_EVENTS &&
826 	    group->priority == FS_PRIO_0)
827 		goto fput_and_out;
828 
829 	ret = fanotify_find_path(dfd, pathname, &path, flags);
830 	if (ret)
831 		goto fput_and_out;
832 
833 	/* inode held in place by reference to path; group by fget on fd */
834 	if (!(flags & FAN_MARK_MOUNT))
835 		inode = path.dentry->d_inode;
836 	else
837 		mnt = path.mnt;
838 
839 	/* create/update an inode mark */
840 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
841 	case FAN_MARK_ADD:
842 		if (flags & FAN_MARK_MOUNT)
843 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
844 		else
845 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
846 		break;
847 	case FAN_MARK_REMOVE:
848 		if (flags & FAN_MARK_MOUNT)
849 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
850 		else
851 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
852 		break;
853 	case FAN_MARK_FLUSH:
854 		if (flags & FAN_MARK_MOUNT)
855 			fsnotify_clear_vfsmount_marks_by_group(group);
856 		else
857 			fsnotify_clear_inode_marks_by_group(group);
858 		break;
859 	default:
860 		ret = -EINVAL;
861 	}
862 
863 	path_put(&path);
864 fput_and_out:
865 	fput_light(filp, fput_needed);
866 	return ret;
867 }
868 
869 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
870 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
871 				  long dfd, long pathname)
872 {
873 	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
874 				  mask, (int) dfd,
875 				  (const char  __user *) pathname);
876 }
877 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
878 #endif
879 
880 /*
881  * fanotify_user_setup - Our initialization function.  Note that we cannot return
882  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
883  * must result in panic().
884  */
885 static int __init fanotify_user_setup(void)
886 {
887 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
888 	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
889 						   SLAB_PANIC);
890 
891 	return 0;
892 }
893 device_initcall(fanotify_user_setup);
894