1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17 #include <linux/sched/signal.h>
18 
19 #include <asm/ioctls.h>
20 
21 #include "../../mount.h"
22 #include "../fdinfo.h"
23 #include "fanotify.h"
24 
25 #define FANOTIFY_DEFAULT_MAX_EVENTS	16384
26 #define FANOTIFY_DEFAULT_MAX_MARKS	8192
27 #define FANOTIFY_DEFAULT_MAX_LISTENERS	128
28 
29 /*
30  * All flags that may be specified in parameter event_f_flags of fanotify_init.
31  *
32  * Internal and external open flags are stored together in field f_flags of
33  * struct file. Only external open flags shall be allowed in event_f_flags.
34  * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
35  * excluded.
36  */
37 #define	FANOTIFY_INIT_ALL_EVENT_F_BITS				( \
38 		O_ACCMODE	| O_APPEND	| O_NONBLOCK	| \
39 		__O_SYNC	| O_DSYNC	| O_CLOEXEC     | \
40 		O_LARGEFILE	| O_NOATIME	)
41 
42 extern const struct fsnotify_ops fanotify_fsnotify_ops;
43 
44 struct kmem_cache *fanotify_mark_cache __read_mostly;
45 struct kmem_cache *fanotify_event_cachep __read_mostly;
46 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
47 
48 /*
49  * Get an fsnotify notification event if one exists and is small
50  * enough to fit in "count". Return an error pointer if the count
51  * is not large enough.
52  *
53  * Called with the group->notification_lock held.
54  */
55 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
56 					    size_t count)
57 {
58 	assert_spin_locked(&group->notification_lock);
59 
60 	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
61 
62 	if (fsnotify_notify_queue_is_empty(group))
63 		return NULL;
64 
65 	if (FAN_EVENT_METADATA_LEN > count)
66 		return ERR_PTR(-EINVAL);
67 
68 	/* held the notification_lock the whole time, so this is the
69 	 * same event we peeked above */
70 	return fsnotify_remove_first_event(group);
71 }
72 
73 static int create_fd(struct fsnotify_group *group,
74 		     struct fanotify_event_info *event,
75 		     struct file **file)
76 {
77 	int client_fd;
78 	struct file *new_file;
79 
80 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
81 
82 	client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
83 	if (client_fd < 0)
84 		return client_fd;
85 
86 	/*
87 	 * we need a new file handle for the userspace program so it can read even if it was
88 	 * originally opened O_WRONLY.
89 	 */
90 	/* it's possible this event was an overflow event.  in that case dentry and mnt
91 	 * are NULL;  That's fine, just don't call dentry open */
92 	if (event->path.dentry && event->path.mnt)
93 		new_file = dentry_open(&event->path,
94 				       group->fanotify_data.f_flags | FMODE_NONOTIFY,
95 				       current_cred());
96 	else
97 		new_file = ERR_PTR(-EOVERFLOW);
98 	if (IS_ERR(new_file)) {
99 		/*
100 		 * we still send an event even if we can't open the file.  this
101 		 * can happen when say tasks are gone and we try to open their
102 		 * /proc files or we try to open a WRONLY file like in sysfs
103 		 * we just send the errno to userspace since there isn't much
104 		 * else we can do.
105 		 */
106 		put_unused_fd(client_fd);
107 		client_fd = PTR_ERR(new_file);
108 	} else {
109 		*file = new_file;
110 	}
111 
112 	return client_fd;
113 }
114 
115 static int fill_event_metadata(struct fsnotify_group *group,
116 			       struct fanotify_event_metadata *metadata,
117 			       struct fsnotify_event *fsn_event,
118 			       struct file **file)
119 {
120 	int ret = 0;
121 	struct fanotify_event_info *event;
122 
123 	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
124 		 group, metadata, fsn_event);
125 
126 	*file = NULL;
127 	event = container_of(fsn_event, struct fanotify_event_info, fse);
128 	metadata->event_len = FAN_EVENT_METADATA_LEN;
129 	metadata->metadata_len = FAN_EVENT_METADATA_LEN;
130 	metadata->vers = FANOTIFY_METADATA_VERSION;
131 	metadata->reserved = 0;
132 	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
133 	metadata->pid = pid_vnr(event->tgid);
134 	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
135 		metadata->fd = FAN_NOFD;
136 	else {
137 		metadata->fd = create_fd(group, event, file);
138 		if (metadata->fd < 0)
139 			ret = metadata->fd;
140 	}
141 
142 	return ret;
143 }
144 
145 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
146 static struct fanotify_perm_event_info *dequeue_event(
147 				struct fsnotify_group *group, int fd)
148 {
149 	struct fanotify_perm_event_info *event, *return_e = NULL;
150 
151 	spin_lock(&group->notification_lock);
152 	list_for_each_entry(event, &group->fanotify_data.access_list,
153 			    fae.fse.list) {
154 		if (event->fd != fd)
155 			continue;
156 
157 		list_del_init(&event->fae.fse.list);
158 		return_e = event;
159 		break;
160 	}
161 	spin_unlock(&group->notification_lock);
162 
163 	pr_debug("%s: found return_re=%p\n", __func__, return_e);
164 
165 	return return_e;
166 }
167 
168 static int process_access_response(struct fsnotify_group *group,
169 				   struct fanotify_response *response_struct)
170 {
171 	struct fanotify_perm_event_info *event;
172 	int fd = response_struct->fd;
173 	int response = response_struct->response;
174 
175 	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
176 		 fd, response);
177 	/*
178 	 * make sure the response is valid, if invalid we do nothing and either
179 	 * userspace can send a valid response or we will clean it up after the
180 	 * timeout
181 	 */
182 	switch (response) {
183 	case FAN_ALLOW:
184 	case FAN_DENY:
185 		break;
186 	default:
187 		return -EINVAL;
188 	}
189 
190 	if (fd < 0)
191 		return -EINVAL;
192 
193 	event = dequeue_event(group, fd);
194 	if (!event)
195 		return -ENOENT;
196 
197 	event->response = response;
198 	wake_up(&group->fanotify_data.access_waitq);
199 
200 	return 0;
201 }
202 #endif
203 
204 static ssize_t copy_event_to_user(struct fsnotify_group *group,
205 				  struct fsnotify_event *event,
206 				  char __user *buf)
207 {
208 	struct fanotify_event_metadata fanotify_event_metadata;
209 	struct file *f;
210 	int fd, ret;
211 
212 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
213 
214 	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
215 	if (ret < 0)
216 		return ret;
217 
218 	fd = fanotify_event_metadata.fd;
219 	ret = -EFAULT;
220 	if (copy_to_user(buf, &fanotify_event_metadata,
221 			 fanotify_event_metadata.event_len))
222 		goto out_close_fd;
223 
224 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
225 	if (event->mask & FAN_ALL_PERM_EVENTS)
226 		FANOTIFY_PE(event)->fd = fd;
227 #endif
228 
229 	if (fd != FAN_NOFD)
230 		fd_install(fd, f);
231 	return fanotify_event_metadata.event_len;
232 
233 out_close_fd:
234 	if (fd != FAN_NOFD) {
235 		put_unused_fd(fd);
236 		fput(f);
237 	}
238 	return ret;
239 }
240 
241 /* intofiy userspace file descriptor functions */
242 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
243 {
244 	struct fsnotify_group *group = file->private_data;
245 	int ret = 0;
246 
247 	poll_wait(file, &group->notification_waitq, wait);
248 	spin_lock(&group->notification_lock);
249 	if (!fsnotify_notify_queue_is_empty(group))
250 		ret = POLLIN | POLLRDNORM;
251 	spin_unlock(&group->notification_lock);
252 
253 	return ret;
254 }
255 
256 static ssize_t fanotify_read(struct file *file, char __user *buf,
257 			     size_t count, loff_t *pos)
258 {
259 	struct fsnotify_group *group;
260 	struct fsnotify_event *kevent;
261 	char __user *start;
262 	int ret;
263 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
264 
265 	start = buf;
266 	group = file->private_data;
267 
268 	pr_debug("%s: group=%p\n", __func__, group);
269 
270 	add_wait_queue(&group->notification_waitq, &wait);
271 	while (1) {
272 		spin_lock(&group->notification_lock);
273 		kevent = get_one_event(group, count);
274 		spin_unlock(&group->notification_lock);
275 
276 		if (IS_ERR(kevent)) {
277 			ret = PTR_ERR(kevent);
278 			break;
279 		}
280 
281 		if (!kevent) {
282 			ret = -EAGAIN;
283 			if (file->f_flags & O_NONBLOCK)
284 				break;
285 
286 			ret = -ERESTARTSYS;
287 			if (signal_pending(current))
288 				break;
289 
290 			if (start != buf)
291 				break;
292 
293 			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
294 			continue;
295 		}
296 
297 		ret = copy_event_to_user(group, kevent, buf);
298 		if (unlikely(ret == -EOPENSTALE)) {
299 			/*
300 			 * We cannot report events with stale fd so drop it.
301 			 * Setting ret to 0 will continue the event loop and
302 			 * do the right thing if there are no more events to
303 			 * read (i.e. return bytes read, -EAGAIN or wait).
304 			 */
305 			ret = 0;
306 		}
307 
308 		/*
309 		 * Permission events get queued to wait for response.  Other
310 		 * events can be destroyed now.
311 		 */
312 		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
313 			fsnotify_destroy_event(group, kevent);
314 		} else {
315 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
316 			if (ret <= 0) {
317 				FANOTIFY_PE(kevent)->response = FAN_DENY;
318 				wake_up(&group->fanotify_data.access_waitq);
319 			} else {
320 				spin_lock(&group->notification_lock);
321 				list_add_tail(&kevent->list,
322 					&group->fanotify_data.access_list);
323 				spin_unlock(&group->notification_lock);
324 			}
325 #endif
326 		}
327 		if (ret < 0)
328 			break;
329 		buf += ret;
330 		count -= ret;
331 	}
332 	remove_wait_queue(&group->notification_waitq, &wait);
333 
334 	if (start != buf && ret != -EFAULT)
335 		ret = buf - start;
336 	return ret;
337 }
338 
339 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
340 {
341 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
342 	struct fanotify_response response = { .fd = -1, .response = -1 };
343 	struct fsnotify_group *group;
344 	int ret;
345 
346 	group = file->private_data;
347 
348 	if (count > sizeof(response))
349 		count = sizeof(response);
350 
351 	pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
352 
353 	if (copy_from_user(&response, buf, count))
354 		return -EFAULT;
355 
356 	ret = process_access_response(group, &response);
357 	if (ret < 0)
358 		count = ret;
359 
360 	return count;
361 #else
362 	return -EINVAL;
363 #endif
364 }
365 
366 static int fanotify_release(struct inode *ignored, struct file *file)
367 {
368 	struct fsnotify_group *group = file->private_data;
369 
370 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
371 	struct fanotify_perm_event_info *event, *next;
372 	struct fsnotify_event *fsn_event;
373 
374 	/*
375 	 * Stop new events from arriving in the notification queue. since
376 	 * userspace cannot use fanotify fd anymore, no event can enter or
377 	 * leave access_list by now either.
378 	 */
379 	fsnotify_group_stop_queueing(group);
380 
381 	/*
382 	 * Process all permission events on access_list and notification queue
383 	 * and simulate reply from userspace.
384 	 */
385 	spin_lock(&group->notification_lock);
386 	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
387 				 fae.fse.list) {
388 		pr_debug("%s: found group=%p event=%p\n", __func__, group,
389 			 event);
390 
391 		list_del_init(&event->fae.fse.list);
392 		event->response = FAN_ALLOW;
393 	}
394 
395 	/*
396 	 * Destroy all non-permission events. For permission events just
397 	 * dequeue them and set the response. They will be freed once the
398 	 * response is consumed and fanotify_get_response() returns.
399 	 */
400 	while (!fsnotify_notify_queue_is_empty(group)) {
401 		fsn_event = fsnotify_remove_first_event(group);
402 		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
403 			spin_unlock(&group->notification_lock);
404 			fsnotify_destroy_event(group, fsn_event);
405 			spin_lock(&group->notification_lock);
406 		} else
407 			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
408 	}
409 	spin_unlock(&group->notification_lock);
410 
411 	/* Response for all permission events it set, wakeup waiters */
412 	wake_up(&group->fanotify_data.access_waitq);
413 #endif
414 
415 	/* matches the fanotify_init->fsnotify_alloc_group */
416 	fsnotify_destroy_group(group);
417 
418 	return 0;
419 }
420 
421 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
422 {
423 	struct fsnotify_group *group;
424 	struct fsnotify_event *fsn_event;
425 	void __user *p;
426 	int ret = -ENOTTY;
427 	size_t send_len = 0;
428 
429 	group = file->private_data;
430 
431 	p = (void __user *) arg;
432 
433 	switch (cmd) {
434 	case FIONREAD:
435 		spin_lock(&group->notification_lock);
436 		list_for_each_entry(fsn_event, &group->notification_list, list)
437 			send_len += FAN_EVENT_METADATA_LEN;
438 		spin_unlock(&group->notification_lock);
439 		ret = put_user(send_len, (int __user *) p);
440 		break;
441 	}
442 
443 	return ret;
444 }
445 
446 static const struct file_operations fanotify_fops = {
447 	.show_fdinfo	= fanotify_show_fdinfo,
448 	.poll		= fanotify_poll,
449 	.read		= fanotify_read,
450 	.write		= fanotify_write,
451 	.fasync		= NULL,
452 	.release	= fanotify_release,
453 	.unlocked_ioctl	= fanotify_ioctl,
454 	.compat_ioctl	= fanotify_ioctl,
455 	.llseek		= noop_llseek,
456 };
457 
458 static int fanotify_find_path(int dfd, const char __user *filename,
459 			      struct path *path, unsigned int flags)
460 {
461 	int ret;
462 
463 	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
464 		 dfd, filename, flags);
465 
466 	if (filename == NULL) {
467 		struct fd f = fdget(dfd);
468 
469 		ret = -EBADF;
470 		if (!f.file)
471 			goto out;
472 
473 		ret = -ENOTDIR;
474 		if ((flags & FAN_MARK_ONLYDIR) &&
475 		    !(S_ISDIR(file_inode(f.file)->i_mode))) {
476 			fdput(f);
477 			goto out;
478 		}
479 
480 		*path = f.file->f_path;
481 		path_get(path);
482 		fdput(f);
483 	} else {
484 		unsigned int lookup_flags = 0;
485 
486 		if (!(flags & FAN_MARK_DONT_FOLLOW))
487 			lookup_flags |= LOOKUP_FOLLOW;
488 		if (flags & FAN_MARK_ONLYDIR)
489 			lookup_flags |= LOOKUP_DIRECTORY;
490 
491 		ret = user_path_at(dfd, filename, lookup_flags, path);
492 		if (ret)
493 			goto out;
494 	}
495 
496 	/* you can only watch an inode if you have read permissions on it */
497 	ret = inode_permission(path->dentry->d_inode, MAY_READ);
498 	if (ret)
499 		path_put(path);
500 out:
501 	return ret;
502 }
503 
504 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
505 					    __u32 mask,
506 					    unsigned int flags,
507 					    int *destroy)
508 {
509 	__u32 oldmask = 0;
510 
511 	spin_lock(&fsn_mark->lock);
512 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
513 		__u32 tmask = fsn_mark->mask & ~mask;
514 
515 		if (flags & FAN_MARK_ONDIR)
516 			tmask &= ~FAN_ONDIR;
517 
518 		oldmask = fsn_mark->mask;
519 		fsn_mark->mask = tmask;
520 	} else {
521 		__u32 tmask = fsn_mark->ignored_mask & ~mask;
522 		if (flags & FAN_MARK_ONDIR)
523 			tmask &= ~FAN_ONDIR;
524 		fsn_mark->ignored_mask = tmask;
525 	}
526 	*destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
527 	spin_unlock(&fsn_mark->lock);
528 
529 	return mask & oldmask;
530 }
531 
532 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
533 					 struct vfsmount *mnt, __u32 mask,
534 					 unsigned int flags)
535 {
536 	struct fsnotify_mark *fsn_mark = NULL;
537 	__u32 removed;
538 	int destroy_mark;
539 
540 	mutex_lock(&group->mark_mutex);
541 	fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
542 				      group);
543 	if (!fsn_mark) {
544 		mutex_unlock(&group->mark_mutex);
545 		return -ENOENT;
546 	}
547 
548 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
549 						 &destroy_mark);
550 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
551 		fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
552 	if (destroy_mark)
553 		fsnotify_detach_mark(fsn_mark);
554 	mutex_unlock(&group->mark_mutex);
555 	if (destroy_mark)
556 		fsnotify_free_mark(fsn_mark);
557 
558 	fsnotify_put_mark(fsn_mark);
559 	return 0;
560 }
561 
562 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
563 				      struct inode *inode, __u32 mask,
564 				      unsigned int flags)
565 {
566 	struct fsnotify_mark *fsn_mark = NULL;
567 	__u32 removed;
568 	int destroy_mark;
569 
570 	mutex_lock(&group->mark_mutex);
571 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
572 	if (!fsn_mark) {
573 		mutex_unlock(&group->mark_mutex);
574 		return -ENOENT;
575 	}
576 
577 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
578 						 &destroy_mark);
579 	if (removed & inode->i_fsnotify_mask)
580 		fsnotify_recalc_mask(inode->i_fsnotify_marks);
581 	if (destroy_mark)
582 		fsnotify_detach_mark(fsn_mark);
583 	mutex_unlock(&group->mark_mutex);
584 	if (destroy_mark)
585 		fsnotify_free_mark(fsn_mark);
586 
587 	/* matches the fsnotify_find_mark() */
588 	fsnotify_put_mark(fsn_mark);
589 
590 	return 0;
591 }
592 
593 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
594 				       __u32 mask,
595 				       unsigned int flags)
596 {
597 	__u32 oldmask = -1;
598 
599 	spin_lock(&fsn_mark->lock);
600 	if (!(flags & FAN_MARK_IGNORED_MASK)) {
601 		__u32 tmask = fsn_mark->mask | mask;
602 
603 		if (flags & FAN_MARK_ONDIR)
604 			tmask |= FAN_ONDIR;
605 
606 		oldmask = fsn_mark->mask;
607 		fsn_mark->mask = tmask;
608 	} else {
609 		__u32 tmask = fsn_mark->ignored_mask | mask;
610 		if (flags & FAN_MARK_ONDIR)
611 			tmask |= FAN_ONDIR;
612 
613 		fsn_mark->ignored_mask = tmask;
614 		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
615 			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
616 	}
617 	spin_unlock(&fsn_mark->lock);
618 
619 	return mask & ~oldmask;
620 }
621 
622 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
623 						   struct inode *inode,
624 						   struct vfsmount *mnt)
625 {
626 	struct fsnotify_mark *mark;
627 	int ret;
628 
629 	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
630 		return ERR_PTR(-ENOSPC);
631 
632 	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
633 	if (!mark)
634 		return ERR_PTR(-ENOMEM);
635 
636 	fsnotify_init_mark(mark, group);
637 	ret = fsnotify_add_mark_locked(mark, inode, mnt, 0);
638 	if (ret) {
639 		fsnotify_put_mark(mark);
640 		return ERR_PTR(ret);
641 	}
642 
643 	return mark;
644 }
645 
646 
647 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
648 				      struct vfsmount *mnt, __u32 mask,
649 				      unsigned int flags)
650 {
651 	struct fsnotify_mark *fsn_mark;
652 	__u32 added;
653 
654 	mutex_lock(&group->mark_mutex);
655 	fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
656 				      group);
657 	if (!fsn_mark) {
658 		fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
659 		if (IS_ERR(fsn_mark)) {
660 			mutex_unlock(&group->mark_mutex);
661 			return PTR_ERR(fsn_mark);
662 		}
663 	}
664 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
665 	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
666 		fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
667 	mutex_unlock(&group->mark_mutex);
668 
669 	fsnotify_put_mark(fsn_mark);
670 	return 0;
671 }
672 
673 static int fanotify_add_inode_mark(struct fsnotify_group *group,
674 				   struct inode *inode, __u32 mask,
675 				   unsigned int flags)
676 {
677 	struct fsnotify_mark *fsn_mark;
678 	__u32 added;
679 
680 	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
681 
682 	/*
683 	 * If some other task has this inode open for write we should not add
684 	 * an ignored mark, unless that ignored mark is supposed to survive
685 	 * modification changes anyway.
686 	 */
687 	if ((flags & FAN_MARK_IGNORED_MASK) &&
688 	    !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
689 	    (atomic_read(&inode->i_writecount) > 0))
690 		return 0;
691 
692 	mutex_lock(&group->mark_mutex);
693 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
694 	if (!fsn_mark) {
695 		fsn_mark = fanotify_add_new_mark(group, inode, NULL);
696 		if (IS_ERR(fsn_mark)) {
697 			mutex_unlock(&group->mark_mutex);
698 			return PTR_ERR(fsn_mark);
699 		}
700 	}
701 	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
702 	if (added & ~inode->i_fsnotify_mask)
703 		fsnotify_recalc_mask(inode->i_fsnotify_marks);
704 	mutex_unlock(&group->mark_mutex);
705 
706 	fsnotify_put_mark(fsn_mark);
707 	return 0;
708 }
709 
710 /* fanotify syscalls */
711 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
712 {
713 	struct fsnotify_group *group;
714 	int f_flags, fd;
715 	struct user_struct *user;
716 	struct fanotify_event_info *oevent;
717 
718 	pr_debug("%s: flags=%d event_f_flags=%d\n",
719 		__func__, flags, event_f_flags);
720 
721 	if (!capable(CAP_SYS_ADMIN))
722 		return -EPERM;
723 
724 	if (flags & ~FAN_ALL_INIT_FLAGS)
725 		return -EINVAL;
726 
727 	if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
728 		return -EINVAL;
729 
730 	switch (event_f_flags & O_ACCMODE) {
731 	case O_RDONLY:
732 	case O_RDWR:
733 	case O_WRONLY:
734 		break;
735 	default:
736 		return -EINVAL;
737 	}
738 
739 	user = get_current_user();
740 	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
741 		free_uid(user);
742 		return -EMFILE;
743 	}
744 
745 	f_flags = O_RDWR | FMODE_NONOTIFY;
746 	if (flags & FAN_CLOEXEC)
747 		f_flags |= O_CLOEXEC;
748 	if (flags & FAN_NONBLOCK)
749 		f_flags |= O_NONBLOCK;
750 
751 	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
752 	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
753 	if (IS_ERR(group)) {
754 		free_uid(user);
755 		return PTR_ERR(group);
756 	}
757 
758 	group->fanotify_data.user = user;
759 	atomic_inc(&user->fanotify_listeners);
760 
761 	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
762 	if (unlikely(!oevent)) {
763 		fd = -ENOMEM;
764 		goto out_destroy_group;
765 	}
766 	group->overflow_event = &oevent->fse;
767 
768 	if (force_o_largefile())
769 		event_f_flags |= O_LARGEFILE;
770 	group->fanotify_data.f_flags = event_f_flags;
771 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
772 	init_waitqueue_head(&group->fanotify_data.access_waitq);
773 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
774 #endif
775 	switch (flags & FAN_ALL_CLASS_BITS) {
776 	case FAN_CLASS_NOTIF:
777 		group->priority = FS_PRIO_0;
778 		break;
779 	case FAN_CLASS_CONTENT:
780 		group->priority = FS_PRIO_1;
781 		break;
782 	case FAN_CLASS_PRE_CONTENT:
783 		group->priority = FS_PRIO_2;
784 		break;
785 	default:
786 		fd = -EINVAL;
787 		goto out_destroy_group;
788 	}
789 
790 	if (flags & FAN_UNLIMITED_QUEUE) {
791 		fd = -EPERM;
792 		if (!capable(CAP_SYS_ADMIN))
793 			goto out_destroy_group;
794 		group->max_events = UINT_MAX;
795 	} else {
796 		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
797 	}
798 
799 	if (flags & FAN_UNLIMITED_MARKS) {
800 		fd = -EPERM;
801 		if (!capable(CAP_SYS_ADMIN))
802 			goto out_destroy_group;
803 		group->fanotify_data.max_marks = UINT_MAX;
804 	} else {
805 		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
806 	}
807 
808 	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
809 	if (fd < 0)
810 		goto out_destroy_group;
811 
812 	return fd;
813 
814 out_destroy_group:
815 	fsnotify_destroy_group(group);
816 	return fd;
817 }
818 
819 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
820 			      __u64, mask, int, dfd,
821 			      const char  __user *, pathname)
822 {
823 	struct inode *inode = NULL;
824 	struct vfsmount *mnt = NULL;
825 	struct fsnotify_group *group;
826 	struct fd f;
827 	struct path path;
828 	int ret;
829 
830 	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
831 		 __func__, fanotify_fd, flags, dfd, pathname, mask);
832 
833 	/* we only use the lower 32 bits as of right now. */
834 	if (mask & ((__u64)0xffffffff << 32))
835 		return -EINVAL;
836 
837 	if (flags & ~FAN_ALL_MARK_FLAGS)
838 		return -EINVAL;
839 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
840 	case FAN_MARK_ADD:		/* fallthrough */
841 	case FAN_MARK_REMOVE:
842 		if (!mask)
843 			return -EINVAL;
844 		break;
845 	case FAN_MARK_FLUSH:
846 		if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
847 			return -EINVAL;
848 		break;
849 	default:
850 		return -EINVAL;
851 	}
852 
853 	if (mask & FAN_ONDIR) {
854 		flags |= FAN_MARK_ONDIR;
855 		mask &= ~FAN_ONDIR;
856 	}
857 
858 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
859 	if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
860 #else
861 	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
862 #endif
863 		return -EINVAL;
864 
865 	f = fdget(fanotify_fd);
866 	if (unlikely(!f.file))
867 		return -EBADF;
868 
869 	/* verify that this is indeed an fanotify instance */
870 	ret = -EINVAL;
871 	if (unlikely(f.file->f_op != &fanotify_fops))
872 		goto fput_and_out;
873 	group = f.file->private_data;
874 
875 	/*
876 	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not
877 	 * allowed to set permissions events.
878 	 */
879 	ret = -EINVAL;
880 	if (mask & FAN_ALL_PERM_EVENTS &&
881 	    group->priority == FS_PRIO_0)
882 		goto fput_and_out;
883 
884 	if (flags & FAN_MARK_FLUSH) {
885 		ret = 0;
886 		if (flags & FAN_MARK_MOUNT)
887 			fsnotify_clear_vfsmount_marks_by_group(group);
888 		else
889 			fsnotify_clear_inode_marks_by_group(group);
890 		goto fput_and_out;
891 	}
892 
893 	ret = fanotify_find_path(dfd, pathname, &path, flags);
894 	if (ret)
895 		goto fput_and_out;
896 
897 	/* inode held in place by reference to path; group by fget on fd */
898 	if (!(flags & FAN_MARK_MOUNT))
899 		inode = path.dentry->d_inode;
900 	else
901 		mnt = path.mnt;
902 
903 	/* create/update an inode mark */
904 	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
905 	case FAN_MARK_ADD:
906 		if (flags & FAN_MARK_MOUNT)
907 			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
908 		else
909 			ret = fanotify_add_inode_mark(group, inode, mask, flags);
910 		break;
911 	case FAN_MARK_REMOVE:
912 		if (flags & FAN_MARK_MOUNT)
913 			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
914 		else
915 			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
916 		break;
917 	default:
918 		ret = -EINVAL;
919 	}
920 
921 	path_put(&path);
922 fput_and_out:
923 	fdput(f);
924 	return ret;
925 }
926 
927 #ifdef CONFIG_COMPAT
928 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
929 				int, fanotify_fd, unsigned int, flags,
930 				__u32, mask0, __u32, mask1, int, dfd,
931 				const char  __user *, pathname)
932 {
933 	return sys_fanotify_mark(fanotify_fd, flags,
934 #ifdef __BIG_ENDIAN
935 				((__u64)mask0 << 32) | mask1,
936 #else
937 				((__u64)mask1 << 32) | mask0,
938 #endif
939 				 dfd, pathname);
940 }
941 #endif
942 
943 /*
944  * fanotify_user_setup - Our initialization function.  Note that we cannot return
945  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
946  * must result in panic().
947  */
948 static int __init fanotify_user_setup(void)
949 {
950 	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
951 	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
952 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
953 	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
954 						SLAB_PANIC);
955 #endif
956 
957 	return 0;
958 }
959 device_initcall(fanotify_user_setup);
960