xref: /openbmc/linux/fs/notify/inotify/inotify_user.c (revision 59b4412f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * fs/inotify_user.c - inotify support for userspace
4  *
5  * Authors:
6  *	John McCutchan	<ttb@tentacle.dhs.org>
7  *	Robert Love	<rml@novell.com>
8  *
9  * Copyright (C) 2005 John McCutchan
10  * Copyright 2006 Hewlett-Packard Development Company, L.P.
11  *
12  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13  * inotify was largely rewriten to make use of the fsnotify infrastructure
14  */
15 
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/security.h>
34 
35 #include "inotify.h"
36 #include "../fdinfo.h"
37 
38 #include <asm/ioctls.h>
39 
40 /* configurable via /proc/sys/fs/inotify/ */
41 static int inotify_max_queued_events __read_mostly;
42 
43 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
44 
45 #ifdef CONFIG_SYSCTL
46 
47 #include <linux/sysctl.h>
48 
49 struct ctl_table inotify_table[] = {
50 	{
51 		.procname	= "max_user_instances",
52 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
53 		.maxlen		= sizeof(int),
54 		.mode		= 0644,
55 		.proc_handler	= proc_dointvec_minmax,
56 		.extra1		= SYSCTL_ZERO,
57 	},
58 	{
59 		.procname	= "max_user_watches",
60 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
61 		.maxlen		= sizeof(int),
62 		.mode		= 0644,
63 		.proc_handler	= proc_dointvec_minmax,
64 		.extra1		= SYSCTL_ZERO,
65 	},
66 	{
67 		.procname	= "max_queued_events",
68 		.data		= &inotify_max_queued_events,
69 		.maxlen		= sizeof(int),
70 		.mode		= 0644,
71 		.proc_handler	= proc_dointvec_minmax,
72 		.extra1		= SYSCTL_ZERO
73 	},
74 	{ }
75 };
76 #endif /* CONFIG_SYSCTL */
77 
78 static inline __u32 inotify_arg_to_mask(u32 arg)
79 {
80 	__u32 mask;
81 
82 	/*
83 	 * everything should accept their own ignored, cares about children,
84 	 * and should receive events when the inode is unmounted
85 	 */
86 	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
87 
88 	/* mask off the flags used to open the fd */
89 	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
90 
91 	return mask;
92 }
93 
94 static inline u32 inotify_mask_to_arg(__u32 mask)
95 {
96 	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
97 		       IN_Q_OVERFLOW);
98 }
99 
100 /* intofiy userspace file descriptor functions */
101 static __poll_t inotify_poll(struct file *file, poll_table *wait)
102 {
103 	struct fsnotify_group *group = file->private_data;
104 	__poll_t ret = 0;
105 
106 	poll_wait(file, &group->notification_waitq, wait);
107 	spin_lock(&group->notification_lock);
108 	if (!fsnotify_notify_queue_is_empty(group))
109 		ret = EPOLLIN | EPOLLRDNORM;
110 	spin_unlock(&group->notification_lock);
111 
112 	return ret;
113 }
114 
115 static int round_event_name_len(struct fsnotify_event *fsn_event)
116 {
117 	struct inotify_event_info *event;
118 
119 	event = INOTIFY_E(fsn_event);
120 	if (!event->name_len)
121 		return 0;
122 	return roundup(event->name_len + 1, sizeof(struct inotify_event));
123 }
124 
125 /*
126  * Get an inotify_kernel_event if one exists and is small
127  * enough to fit in "count". Return an error pointer if
128  * not large enough.
129  *
130  * Called with the group->notification_lock held.
131  */
132 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
133 					    size_t count)
134 {
135 	size_t event_size = sizeof(struct inotify_event);
136 	struct fsnotify_event *event;
137 
138 	if (fsnotify_notify_queue_is_empty(group))
139 		return NULL;
140 
141 	event = fsnotify_peek_first_event(group);
142 
143 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
144 
145 	event_size += round_event_name_len(event);
146 	if (event_size > count)
147 		return ERR_PTR(-EINVAL);
148 
149 	/* held the notification_lock the whole time, so this is the
150 	 * same event we peeked above */
151 	fsnotify_remove_first_event(group);
152 
153 	return event;
154 }
155 
156 /*
157  * Copy an event to user space, returning how much we copied.
158  *
159  * We already checked that the event size is smaller than the
160  * buffer we had in "get_one_event()" above.
161  */
162 static ssize_t copy_event_to_user(struct fsnotify_group *group,
163 				  struct fsnotify_event *fsn_event,
164 				  char __user *buf)
165 {
166 	struct inotify_event inotify_event;
167 	struct inotify_event_info *event;
168 	size_t event_size = sizeof(struct inotify_event);
169 	size_t name_len;
170 	size_t pad_name_len;
171 
172 	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
173 
174 	event = INOTIFY_E(fsn_event);
175 	name_len = event->name_len;
176 	/*
177 	 * round up name length so it is a multiple of event_size
178 	 * plus an extra byte for the terminating '\0'.
179 	 */
180 	pad_name_len = round_event_name_len(fsn_event);
181 	inotify_event.len = pad_name_len;
182 	inotify_event.mask = inotify_mask_to_arg(event->mask);
183 	inotify_event.wd = event->wd;
184 	inotify_event.cookie = event->sync_cookie;
185 
186 	/* send the main event */
187 	if (copy_to_user(buf, &inotify_event, event_size))
188 		return -EFAULT;
189 
190 	buf += event_size;
191 
192 	/*
193 	 * fsnotify only stores the pathname, so here we have to send the pathname
194 	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
195 	 * with zeros.
196 	 */
197 	if (pad_name_len) {
198 		/* copy the path name */
199 		if (copy_to_user(buf, event->name, name_len))
200 			return -EFAULT;
201 		buf += name_len;
202 
203 		/* fill userspace with 0's */
204 		if (clear_user(buf, pad_name_len - name_len))
205 			return -EFAULT;
206 		event_size += pad_name_len;
207 	}
208 
209 	return event_size;
210 }
211 
212 static ssize_t inotify_read(struct file *file, char __user *buf,
213 			    size_t count, loff_t *pos)
214 {
215 	struct fsnotify_group *group;
216 	struct fsnotify_event *kevent;
217 	char __user *start;
218 	int ret;
219 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
220 
221 	start = buf;
222 	group = file->private_data;
223 
224 	add_wait_queue(&group->notification_waitq, &wait);
225 	while (1) {
226 		spin_lock(&group->notification_lock);
227 		kevent = get_one_event(group, count);
228 		spin_unlock(&group->notification_lock);
229 
230 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
231 
232 		if (kevent) {
233 			ret = PTR_ERR(kevent);
234 			if (IS_ERR(kevent))
235 				break;
236 			ret = copy_event_to_user(group, kevent, buf);
237 			fsnotify_destroy_event(group, kevent);
238 			if (ret < 0)
239 				break;
240 			buf += ret;
241 			count -= ret;
242 			continue;
243 		}
244 
245 		ret = -EAGAIN;
246 		if (file->f_flags & O_NONBLOCK)
247 			break;
248 		ret = -ERESTARTSYS;
249 		if (signal_pending(current))
250 			break;
251 
252 		if (start != buf)
253 			break;
254 
255 		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
256 	}
257 	remove_wait_queue(&group->notification_waitq, &wait);
258 
259 	if (start != buf && ret != -EFAULT)
260 		ret = buf - start;
261 	return ret;
262 }
263 
264 static int inotify_release(struct inode *ignored, struct file *file)
265 {
266 	struct fsnotify_group *group = file->private_data;
267 
268 	pr_debug("%s: group=%p\n", __func__, group);
269 
270 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
271 	fsnotify_destroy_group(group);
272 
273 	return 0;
274 }
275 
276 static long inotify_ioctl(struct file *file, unsigned int cmd,
277 			  unsigned long arg)
278 {
279 	struct fsnotify_group *group;
280 	struct fsnotify_event *fsn_event;
281 	void __user *p;
282 	int ret = -ENOTTY;
283 	size_t send_len = 0;
284 
285 	group = file->private_data;
286 	p = (void __user *) arg;
287 
288 	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
289 
290 	switch (cmd) {
291 	case FIONREAD:
292 		spin_lock(&group->notification_lock);
293 		list_for_each_entry(fsn_event, &group->notification_list,
294 				    list) {
295 			send_len += sizeof(struct inotify_event);
296 			send_len += round_event_name_len(fsn_event);
297 		}
298 		spin_unlock(&group->notification_lock);
299 		ret = put_user(send_len, (int __user *) p);
300 		break;
301 #ifdef CONFIG_CHECKPOINT_RESTORE
302 	case INOTIFY_IOC_SETNEXTWD:
303 		ret = -EINVAL;
304 		if (arg >= 1 && arg <= INT_MAX) {
305 			struct inotify_group_private_data *data;
306 
307 			data = &group->inotify_data;
308 			spin_lock(&data->idr_lock);
309 			idr_set_cursor(&data->idr, (unsigned int)arg);
310 			spin_unlock(&data->idr_lock);
311 			ret = 0;
312 		}
313 		break;
314 #endif /* CONFIG_CHECKPOINT_RESTORE */
315 	}
316 
317 	return ret;
318 }
319 
320 static const struct file_operations inotify_fops = {
321 	.show_fdinfo	= inotify_show_fdinfo,
322 	.poll		= inotify_poll,
323 	.read		= inotify_read,
324 	.fasync		= fsnotify_fasync,
325 	.release	= inotify_release,
326 	.unlocked_ioctl	= inotify_ioctl,
327 	.compat_ioctl	= inotify_ioctl,
328 	.llseek		= noop_llseek,
329 };
330 
331 
332 /*
333  * find_inode - resolve a user-given path to a specific inode
334  */
335 static int inotify_find_inode(const char __user *dirname, struct path *path,
336 						unsigned int flags, __u64 mask)
337 {
338 	int error;
339 
340 	error = user_path_at(AT_FDCWD, dirname, flags, path);
341 	if (error)
342 		return error;
343 	/* you can only watch an inode if you have read permissions on it */
344 	error = inode_permission(path->dentry->d_inode, MAY_READ);
345 	if (error) {
346 		path_put(path);
347 		return error;
348 	}
349 	error = security_path_notify(path, mask,
350 				FSNOTIFY_OBJ_TYPE_INODE);
351 	if (error)
352 		path_put(path);
353 
354 	return error;
355 }
356 
357 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
358 			      struct inotify_inode_mark *i_mark)
359 {
360 	int ret;
361 
362 	idr_preload(GFP_KERNEL);
363 	spin_lock(idr_lock);
364 
365 	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
366 	if (ret >= 0) {
367 		/* we added the mark to the idr, take a reference */
368 		i_mark->wd = ret;
369 		fsnotify_get_mark(&i_mark->fsn_mark);
370 	}
371 
372 	spin_unlock(idr_lock);
373 	idr_preload_end();
374 	return ret < 0 ? ret : 0;
375 }
376 
377 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
378 								int wd)
379 {
380 	struct idr *idr = &group->inotify_data.idr;
381 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
382 	struct inotify_inode_mark *i_mark;
383 
384 	assert_spin_locked(idr_lock);
385 
386 	i_mark = idr_find(idr, wd);
387 	if (i_mark) {
388 		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
389 
390 		fsnotify_get_mark(fsn_mark);
391 		/* One ref for being in the idr, one ref we just took */
392 		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
393 	}
394 
395 	return i_mark;
396 }
397 
398 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
399 							 int wd)
400 {
401 	struct inotify_inode_mark *i_mark;
402 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
403 
404 	spin_lock(idr_lock);
405 	i_mark = inotify_idr_find_locked(group, wd);
406 	spin_unlock(idr_lock);
407 
408 	return i_mark;
409 }
410 
411 /*
412  * Remove the mark from the idr (if present) and drop the reference
413  * on the mark because it was in the idr.
414  */
415 static void inotify_remove_from_idr(struct fsnotify_group *group,
416 				    struct inotify_inode_mark *i_mark)
417 {
418 	struct idr *idr = &group->inotify_data.idr;
419 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
420 	struct inotify_inode_mark *found_i_mark = NULL;
421 	int wd;
422 
423 	spin_lock(idr_lock);
424 	wd = i_mark->wd;
425 
426 	/*
427 	 * does this i_mark think it is in the idr?  we shouldn't get called
428 	 * if it wasn't....
429 	 */
430 	if (wd == -1) {
431 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
432 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
433 		goto out;
434 	}
435 
436 	/* Lets look in the idr to see if we find it */
437 	found_i_mark = inotify_idr_find_locked(group, wd);
438 	if (unlikely(!found_i_mark)) {
439 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
440 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
441 		goto out;
442 	}
443 
444 	/*
445 	 * We found an mark in the idr at the right wd, but it's
446 	 * not the mark we were told to remove.  eparis seriously
447 	 * fucked up somewhere.
448 	 */
449 	if (unlikely(found_i_mark != i_mark)) {
450 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
451 			"found_i_mark=%p found_i_mark->wd=%d "
452 			"found_i_mark->group=%p\n", __func__, i_mark,
453 			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
454 			found_i_mark->wd, found_i_mark->fsn_mark.group);
455 		goto out;
456 	}
457 
458 	/*
459 	 * One ref for being in the idr
460 	 * one ref grabbed by inotify_idr_find
461 	 */
462 	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
463 		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
464 			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
465 		/* we can't really recover with bad ref cnting.. */
466 		BUG();
467 	}
468 
469 	idr_remove(idr, wd);
470 	/* Removed from the idr, drop that ref. */
471 	fsnotify_put_mark(&i_mark->fsn_mark);
472 out:
473 	i_mark->wd = -1;
474 	spin_unlock(idr_lock);
475 	/* match the ref taken by inotify_idr_find_locked() */
476 	if (found_i_mark)
477 		fsnotify_put_mark(&found_i_mark->fsn_mark);
478 }
479 
480 /*
481  * Send IN_IGNORED for this wd, remove this wd from the idr.
482  */
483 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
484 				    struct fsnotify_group *group)
485 {
486 	struct inotify_inode_mark *i_mark;
487 	struct fsnotify_iter_info iter_info = { };
488 
489 	fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE,
490 					   fsn_mark);
491 
492 	/* Queue ignore event for the watch */
493 	inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL,
494 			     FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info);
495 
496 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
497 	/* remove this mark from the idr */
498 	inotify_remove_from_idr(group, i_mark);
499 
500 	dec_inotify_watches(group->inotify_data.ucounts);
501 }
502 
503 static int inotify_update_existing_watch(struct fsnotify_group *group,
504 					 struct inode *inode,
505 					 u32 arg)
506 {
507 	struct fsnotify_mark *fsn_mark;
508 	struct inotify_inode_mark *i_mark;
509 	__u32 old_mask, new_mask;
510 	__u32 mask;
511 	int add = (arg & IN_MASK_ADD);
512 	int create = (arg & IN_MASK_CREATE);
513 	int ret;
514 
515 	mask = inotify_arg_to_mask(arg);
516 
517 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
518 	if (!fsn_mark)
519 		return -ENOENT;
520 	else if (create) {
521 		ret = -EEXIST;
522 		goto out;
523 	}
524 
525 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
526 
527 	spin_lock(&fsn_mark->lock);
528 	old_mask = fsn_mark->mask;
529 	if (add)
530 		fsn_mark->mask |= mask;
531 	else
532 		fsn_mark->mask = mask;
533 	new_mask = fsn_mark->mask;
534 	spin_unlock(&fsn_mark->lock);
535 
536 	if (old_mask != new_mask) {
537 		/* more bits in old than in new? */
538 		int dropped = (old_mask & ~new_mask);
539 		/* more bits in this fsn_mark than the inode's mask? */
540 		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
541 
542 		/* update the inode with this new fsn_mark */
543 		if (dropped || do_inode)
544 			fsnotify_recalc_mask(inode->i_fsnotify_marks);
545 
546 	}
547 
548 	/* return the wd */
549 	ret = i_mark->wd;
550 
551 out:
552 	/* match the get from fsnotify_find_mark() */
553 	fsnotify_put_mark(fsn_mark);
554 
555 	return ret;
556 }
557 
558 static int inotify_new_watch(struct fsnotify_group *group,
559 			     struct inode *inode,
560 			     u32 arg)
561 {
562 	struct inotify_inode_mark *tmp_i_mark;
563 	__u32 mask;
564 	int ret;
565 	struct idr *idr = &group->inotify_data.idr;
566 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
567 
568 	mask = inotify_arg_to_mask(arg);
569 
570 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
571 	if (unlikely(!tmp_i_mark))
572 		return -ENOMEM;
573 
574 	fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
575 	tmp_i_mark->fsn_mark.mask = mask;
576 	tmp_i_mark->wd = -1;
577 
578 	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
579 	if (ret)
580 		goto out_err;
581 
582 	/* increment the number of watches the user has */
583 	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
584 		inotify_remove_from_idr(group, tmp_i_mark);
585 		ret = -ENOSPC;
586 		goto out_err;
587 	}
588 
589 	/* we are on the idr, now get on the inode */
590 	ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
591 	if (ret) {
592 		/* we failed to get on the inode, get off the idr */
593 		inotify_remove_from_idr(group, tmp_i_mark);
594 		goto out_err;
595 	}
596 
597 
598 	/* return the watch descriptor for this new mark */
599 	ret = tmp_i_mark->wd;
600 
601 out_err:
602 	/* match the ref from fsnotify_init_mark() */
603 	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
604 
605 	return ret;
606 }
607 
608 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
609 {
610 	int ret = 0;
611 
612 	mutex_lock(&group->mark_mutex);
613 	/* try to update and existing watch with the new arg */
614 	ret = inotify_update_existing_watch(group, inode, arg);
615 	/* no mark present, try to add a new one */
616 	if (ret == -ENOENT)
617 		ret = inotify_new_watch(group, inode, arg);
618 	mutex_unlock(&group->mark_mutex);
619 
620 	return ret;
621 }
622 
623 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
624 {
625 	struct fsnotify_group *group;
626 	struct inotify_event_info *oevent;
627 
628 	group = fsnotify_alloc_group(&inotify_fsnotify_ops);
629 	if (IS_ERR(group))
630 		return group;
631 
632 	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
633 	if (unlikely(!oevent)) {
634 		fsnotify_destroy_group(group);
635 		return ERR_PTR(-ENOMEM);
636 	}
637 	group->overflow_event = &oevent->fse;
638 	fsnotify_init_event(group->overflow_event, 0);
639 	oevent->mask = FS_Q_OVERFLOW;
640 	oevent->wd = -1;
641 	oevent->sync_cookie = 0;
642 	oevent->name_len = 0;
643 
644 	group->max_events = max_events;
645 	group->memcg = get_mem_cgroup_from_mm(current->mm);
646 
647 	spin_lock_init(&group->inotify_data.idr_lock);
648 	idr_init(&group->inotify_data.idr);
649 	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
650 						 current_euid(),
651 						 UCOUNT_INOTIFY_INSTANCES);
652 
653 	if (!group->inotify_data.ucounts) {
654 		fsnotify_destroy_group(group);
655 		return ERR_PTR(-EMFILE);
656 	}
657 
658 	return group;
659 }
660 
661 
662 /* inotify syscalls */
663 static int do_inotify_init(int flags)
664 {
665 	struct fsnotify_group *group;
666 	int ret;
667 
668 	/* Check the IN_* constants for consistency.  */
669 	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
670 	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
671 
672 	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
673 		return -EINVAL;
674 
675 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
676 	group = inotify_new_group(inotify_max_queued_events);
677 	if (IS_ERR(group))
678 		return PTR_ERR(group);
679 
680 	ret = anon_inode_getfd("inotify", &inotify_fops, group,
681 				  O_RDONLY | flags);
682 	if (ret < 0)
683 		fsnotify_destroy_group(group);
684 
685 	return ret;
686 }
687 
688 SYSCALL_DEFINE1(inotify_init1, int, flags)
689 {
690 	return do_inotify_init(flags);
691 }
692 
693 SYSCALL_DEFINE0(inotify_init)
694 {
695 	return do_inotify_init(0);
696 }
697 
698 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
699 		u32, mask)
700 {
701 	struct fsnotify_group *group;
702 	struct inode *inode;
703 	struct path path;
704 	struct fd f;
705 	int ret;
706 	unsigned flags = 0;
707 
708 	/*
709 	 * We share a lot of code with fs/dnotify.  We also share
710 	 * the bit layout between inotify's IN_* and the fsnotify
711 	 * FS_*.  This check ensures that only the inotify IN_*
712 	 * bits get passed in and set in watches/events.
713 	 */
714 	if (unlikely(mask & ~ALL_INOTIFY_BITS))
715 		return -EINVAL;
716 	/*
717 	 * Require at least one valid bit set in the mask.
718 	 * Without _something_ set, we would have no events to
719 	 * watch for.
720 	 */
721 	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
722 		return -EINVAL;
723 
724 	f = fdget(fd);
725 	if (unlikely(!f.file))
726 		return -EBADF;
727 
728 	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
729 	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
730 		ret = -EINVAL;
731 		goto fput_and_out;
732 	}
733 
734 	/* verify that this is indeed an inotify instance */
735 	if (unlikely(f.file->f_op != &inotify_fops)) {
736 		ret = -EINVAL;
737 		goto fput_and_out;
738 	}
739 
740 	if (!(mask & IN_DONT_FOLLOW))
741 		flags |= LOOKUP_FOLLOW;
742 	if (mask & IN_ONLYDIR)
743 		flags |= LOOKUP_DIRECTORY;
744 
745 	ret = inotify_find_inode(pathname, &path, flags,
746 			(mask & IN_ALL_EVENTS));
747 	if (ret)
748 		goto fput_and_out;
749 
750 	/* inode held in place by reference to path; group by fget on fd */
751 	inode = path.dentry->d_inode;
752 	group = f.file->private_data;
753 
754 	/* create/update an inode mark */
755 	ret = inotify_update_watch(group, inode, mask);
756 	path_put(&path);
757 fput_and_out:
758 	fdput(f);
759 	return ret;
760 }
761 
762 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
763 {
764 	struct fsnotify_group *group;
765 	struct inotify_inode_mark *i_mark;
766 	struct fd f;
767 	int ret = -EINVAL;
768 
769 	f = fdget(fd);
770 	if (unlikely(!f.file))
771 		return -EBADF;
772 
773 	/* verify that this is indeed an inotify instance */
774 	if (unlikely(f.file->f_op != &inotify_fops))
775 		goto out;
776 
777 	group = f.file->private_data;
778 
779 	i_mark = inotify_idr_find(group, wd);
780 	if (unlikely(!i_mark))
781 		goto out;
782 
783 	ret = 0;
784 
785 	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
786 
787 	/* match ref taken by inotify_idr_find */
788 	fsnotify_put_mark(&i_mark->fsn_mark);
789 
790 out:
791 	fdput(f);
792 	return ret;
793 }
794 
795 /*
796  * inotify_user_setup - Our initialization function.  Note that we cannot return
797  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
798  * must result in panic().
799  */
800 static int __init inotify_user_setup(void)
801 {
802 	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
803 	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
804 	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
805 	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
806 	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
807 	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
808 	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
809 	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
810 	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
811 	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
812 	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
813 	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
814 	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
815 	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
816 	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
817 	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
818 	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
819 	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
820 
821 	BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
822 
823 	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
824 					       SLAB_PANIC|SLAB_ACCOUNT);
825 
826 	inotify_max_queued_events = 16384;
827 	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
828 	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
829 
830 	return 0;
831 }
832 fs_initcall(inotify_user_setup);
833