xref: /openbmc/linux/fs/notify/inotify/inotify_user.c (revision 4f727ecefefbd180de10e25b3e74c03dce3f1e75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * fs/inotify_user.c - inotify support for userspace
4  *
5  * Authors:
6  *	John McCutchan	<ttb@tentacle.dhs.org>
7  *	Robert Love	<rml@novell.com>
8  *
9  * Copyright (C) 2005 John McCutchan
10  * Copyright 2006 Hewlett-Packard Development Company, L.P.
11  *
12  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13  * inotify was largely rewriten to make use of the fsnotify infrastructure
14  */
15 
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 
34 #include "inotify.h"
35 #include "../fdinfo.h"
36 
37 #include <asm/ioctls.h>
38 
39 /* configurable via /proc/sys/fs/inotify/ */
40 static int inotify_max_queued_events __read_mostly;
41 
42 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
43 
44 #ifdef CONFIG_SYSCTL
45 
46 #include <linux/sysctl.h>
47 
48 static int zero;
49 
50 struct ctl_table inotify_table[] = {
51 	{
52 		.procname	= "max_user_instances",
53 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
54 		.maxlen		= sizeof(int),
55 		.mode		= 0644,
56 		.proc_handler	= proc_dointvec_minmax,
57 		.extra1		= &zero,
58 	},
59 	{
60 		.procname	= "max_user_watches",
61 		.data		= &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
62 		.maxlen		= sizeof(int),
63 		.mode		= 0644,
64 		.proc_handler	= proc_dointvec_minmax,
65 		.extra1		= &zero,
66 	},
67 	{
68 		.procname	= "max_queued_events",
69 		.data		= &inotify_max_queued_events,
70 		.maxlen		= sizeof(int),
71 		.mode		= 0644,
72 		.proc_handler	= proc_dointvec_minmax,
73 		.extra1		= &zero
74 	},
75 	{ }
76 };
77 #endif /* CONFIG_SYSCTL */
78 
79 static inline __u32 inotify_arg_to_mask(u32 arg)
80 {
81 	__u32 mask;
82 
83 	/*
84 	 * everything should accept their own ignored, cares about children,
85 	 * and should receive events when the inode is unmounted
86 	 */
87 	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
88 
89 	/* mask off the flags used to open the fd */
90 	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
91 
92 	return mask;
93 }
94 
95 static inline u32 inotify_mask_to_arg(__u32 mask)
96 {
97 	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
98 		       IN_Q_OVERFLOW);
99 }
100 
101 /* intofiy userspace file descriptor functions */
102 static __poll_t inotify_poll(struct file *file, poll_table *wait)
103 {
104 	struct fsnotify_group *group = file->private_data;
105 	__poll_t ret = 0;
106 
107 	poll_wait(file, &group->notification_waitq, wait);
108 	spin_lock(&group->notification_lock);
109 	if (!fsnotify_notify_queue_is_empty(group))
110 		ret = EPOLLIN | EPOLLRDNORM;
111 	spin_unlock(&group->notification_lock);
112 
113 	return ret;
114 }
115 
116 static int round_event_name_len(struct fsnotify_event *fsn_event)
117 {
118 	struct inotify_event_info *event;
119 
120 	event = INOTIFY_E(fsn_event);
121 	if (!event->name_len)
122 		return 0;
123 	return roundup(event->name_len + 1, sizeof(struct inotify_event));
124 }
125 
126 /*
127  * Get an inotify_kernel_event if one exists and is small
128  * enough to fit in "count". Return an error pointer if
129  * not large enough.
130  *
131  * Called with the group->notification_lock held.
132  */
133 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
134 					    size_t count)
135 {
136 	size_t event_size = sizeof(struct inotify_event);
137 	struct fsnotify_event *event;
138 
139 	if (fsnotify_notify_queue_is_empty(group))
140 		return NULL;
141 
142 	event = fsnotify_peek_first_event(group);
143 
144 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
145 
146 	event_size += round_event_name_len(event);
147 	if (event_size > count)
148 		return ERR_PTR(-EINVAL);
149 
150 	/* held the notification_lock the whole time, so this is the
151 	 * same event we peeked above */
152 	fsnotify_remove_first_event(group);
153 
154 	return event;
155 }
156 
157 /*
158  * Copy an event to user space, returning how much we copied.
159  *
160  * We already checked that the event size is smaller than the
161  * buffer we had in "get_one_event()" above.
162  */
163 static ssize_t copy_event_to_user(struct fsnotify_group *group,
164 				  struct fsnotify_event *fsn_event,
165 				  char __user *buf)
166 {
167 	struct inotify_event inotify_event;
168 	struct inotify_event_info *event;
169 	size_t event_size = sizeof(struct inotify_event);
170 	size_t name_len;
171 	size_t pad_name_len;
172 
173 	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
174 
175 	event = INOTIFY_E(fsn_event);
176 	name_len = event->name_len;
177 	/*
178 	 * round up name length so it is a multiple of event_size
179 	 * plus an extra byte for the terminating '\0'.
180 	 */
181 	pad_name_len = round_event_name_len(fsn_event);
182 	inotify_event.len = pad_name_len;
183 	inotify_event.mask = inotify_mask_to_arg(event->mask);
184 	inotify_event.wd = event->wd;
185 	inotify_event.cookie = event->sync_cookie;
186 
187 	/* send the main event */
188 	if (copy_to_user(buf, &inotify_event, event_size))
189 		return -EFAULT;
190 
191 	buf += event_size;
192 
193 	/*
194 	 * fsnotify only stores the pathname, so here we have to send the pathname
195 	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
196 	 * with zeros.
197 	 */
198 	if (pad_name_len) {
199 		/* copy the path name */
200 		if (copy_to_user(buf, event->name, name_len))
201 			return -EFAULT;
202 		buf += name_len;
203 
204 		/* fill userspace with 0's */
205 		if (clear_user(buf, pad_name_len - name_len))
206 			return -EFAULT;
207 		event_size += pad_name_len;
208 	}
209 
210 	return event_size;
211 }
212 
213 static ssize_t inotify_read(struct file *file, char __user *buf,
214 			    size_t count, loff_t *pos)
215 {
216 	struct fsnotify_group *group;
217 	struct fsnotify_event *kevent;
218 	char __user *start;
219 	int ret;
220 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
221 
222 	start = buf;
223 	group = file->private_data;
224 
225 	add_wait_queue(&group->notification_waitq, &wait);
226 	while (1) {
227 		spin_lock(&group->notification_lock);
228 		kevent = get_one_event(group, count);
229 		spin_unlock(&group->notification_lock);
230 
231 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
232 
233 		if (kevent) {
234 			ret = PTR_ERR(kevent);
235 			if (IS_ERR(kevent))
236 				break;
237 			ret = copy_event_to_user(group, kevent, buf);
238 			fsnotify_destroy_event(group, kevent);
239 			if (ret < 0)
240 				break;
241 			buf += ret;
242 			count -= ret;
243 			continue;
244 		}
245 
246 		ret = -EAGAIN;
247 		if (file->f_flags & O_NONBLOCK)
248 			break;
249 		ret = -ERESTARTSYS;
250 		if (signal_pending(current))
251 			break;
252 
253 		if (start != buf)
254 			break;
255 
256 		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
257 	}
258 	remove_wait_queue(&group->notification_waitq, &wait);
259 
260 	if (start != buf && ret != -EFAULT)
261 		ret = buf - start;
262 	return ret;
263 }
264 
265 static int inotify_release(struct inode *ignored, struct file *file)
266 {
267 	struct fsnotify_group *group = file->private_data;
268 
269 	pr_debug("%s: group=%p\n", __func__, group);
270 
271 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
272 	fsnotify_destroy_group(group);
273 
274 	return 0;
275 }
276 
277 static long inotify_ioctl(struct file *file, unsigned int cmd,
278 			  unsigned long arg)
279 {
280 	struct fsnotify_group *group;
281 	struct fsnotify_event *fsn_event;
282 	void __user *p;
283 	int ret = -ENOTTY;
284 	size_t send_len = 0;
285 
286 	group = file->private_data;
287 	p = (void __user *) arg;
288 
289 	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
290 
291 	switch (cmd) {
292 	case FIONREAD:
293 		spin_lock(&group->notification_lock);
294 		list_for_each_entry(fsn_event, &group->notification_list,
295 				    list) {
296 			send_len += sizeof(struct inotify_event);
297 			send_len += round_event_name_len(fsn_event);
298 		}
299 		spin_unlock(&group->notification_lock);
300 		ret = put_user(send_len, (int __user *) p);
301 		break;
302 #ifdef CONFIG_CHECKPOINT_RESTORE
303 	case INOTIFY_IOC_SETNEXTWD:
304 		ret = -EINVAL;
305 		if (arg >= 1 && arg <= INT_MAX) {
306 			struct inotify_group_private_data *data;
307 
308 			data = &group->inotify_data;
309 			spin_lock(&data->idr_lock);
310 			idr_set_cursor(&data->idr, (unsigned int)arg);
311 			spin_unlock(&data->idr_lock);
312 			ret = 0;
313 		}
314 		break;
315 #endif /* CONFIG_CHECKPOINT_RESTORE */
316 	}
317 
318 	return ret;
319 }
320 
321 static const struct file_operations inotify_fops = {
322 	.show_fdinfo	= inotify_show_fdinfo,
323 	.poll		= inotify_poll,
324 	.read		= inotify_read,
325 	.fasync		= fsnotify_fasync,
326 	.release	= inotify_release,
327 	.unlocked_ioctl	= inotify_ioctl,
328 	.compat_ioctl	= inotify_ioctl,
329 	.llseek		= noop_llseek,
330 };
331 
332 
333 /*
334  * find_inode - resolve a user-given path to a specific inode
335  */
336 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
337 {
338 	int error;
339 
340 	error = user_path_at(AT_FDCWD, dirname, flags, path);
341 	if (error)
342 		return error;
343 	/* you can only watch an inode if you have read permissions on it */
344 	error = inode_permission(path->dentry->d_inode, MAY_READ);
345 	if (error)
346 		path_put(path);
347 	return error;
348 }
349 
350 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
351 			      struct inotify_inode_mark *i_mark)
352 {
353 	int ret;
354 
355 	idr_preload(GFP_KERNEL);
356 	spin_lock(idr_lock);
357 
358 	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
359 	if (ret >= 0) {
360 		/* we added the mark to the idr, take a reference */
361 		i_mark->wd = ret;
362 		fsnotify_get_mark(&i_mark->fsn_mark);
363 	}
364 
365 	spin_unlock(idr_lock);
366 	idr_preload_end();
367 	return ret < 0 ? ret : 0;
368 }
369 
370 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
371 								int wd)
372 {
373 	struct idr *idr = &group->inotify_data.idr;
374 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
375 	struct inotify_inode_mark *i_mark;
376 
377 	assert_spin_locked(idr_lock);
378 
379 	i_mark = idr_find(idr, wd);
380 	if (i_mark) {
381 		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
382 
383 		fsnotify_get_mark(fsn_mark);
384 		/* One ref for being in the idr, one ref we just took */
385 		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
386 	}
387 
388 	return i_mark;
389 }
390 
391 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
392 							 int wd)
393 {
394 	struct inotify_inode_mark *i_mark;
395 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
396 
397 	spin_lock(idr_lock);
398 	i_mark = inotify_idr_find_locked(group, wd);
399 	spin_unlock(idr_lock);
400 
401 	return i_mark;
402 }
403 
404 /*
405  * Remove the mark from the idr (if present) and drop the reference
406  * on the mark because it was in the idr.
407  */
408 static void inotify_remove_from_idr(struct fsnotify_group *group,
409 				    struct inotify_inode_mark *i_mark)
410 {
411 	struct idr *idr = &group->inotify_data.idr;
412 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
413 	struct inotify_inode_mark *found_i_mark = NULL;
414 	int wd;
415 
416 	spin_lock(idr_lock);
417 	wd = i_mark->wd;
418 
419 	/*
420 	 * does this i_mark think it is in the idr?  we shouldn't get called
421 	 * if it wasn't....
422 	 */
423 	if (wd == -1) {
424 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
425 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
426 		goto out;
427 	}
428 
429 	/* Lets look in the idr to see if we find it */
430 	found_i_mark = inotify_idr_find_locked(group, wd);
431 	if (unlikely(!found_i_mark)) {
432 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
433 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
434 		goto out;
435 	}
436 
437 	/*
438 	 * We found an mark in the idr at the right wd, but it's
439 	 * not the mark we were told to remove.  eparis seriously
440 	 * fucked up somewhere.
441 	 */
442 	if (unlikely(found_i_mark != i_mark)) {
443 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
444 			"found_i_mark=%p found_i_mark->wd=%d "
445 			"found_i_mark->group=%p\n", __func__, i_mark,
446 			i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
447 			found_i_mark->wd, found_i_mark->fsn_mark.group);
448 		goto out;
449 	}
450 
451 	/*
452 	 * One ref for being in the idr
453 	 * one ref grabbed by inotify_idr_find
454 	 */
455 	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
456 		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
457 			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
458 		/* we can't really recover with bad ref cnting.. */
459 		BUG();
460 	}
461 
462 	idr_remove(idr, wd);
463 	/* Removed from the idr, drop that ref. */
464 	fsnotify_put_mark(&i_mark->fsn_mark);
465 out:
466 	i_mark->wd = -1;
467 	spin_unlock(idr_lock);
468 	/* match the ref taken by inotify_idr_find_locked() */
469 	if (found_i_mark)
470 		fsnotify_put_mark(&found_i_mark->fsn_mark);
471 }
472 
473 /*
474  * Send IN_IGNORED for this wd, remove this wd from the idr.
475  */
476 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
477 				    struct fsnotify_group *group)
478 {
479 	struct inotify_inode_mark *i_mark;
480 	struct fsnotify_iter_info iter_info = { };
481 
482 	fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE,
483 					   fsn_mark);
484 
485 	/* Queue ignore event for the watch */
486 	inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL,
487 			     FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info);
488 
489 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
490 	/* remove this mark from the idr */
491 	inotify_remove_from_idr(group, i_mark);
492 
493 	dec_inotify_watches(group->inotify_data.ucounts);
494 }
495 
496 static int inotify_update_existing_watch(struct fsnotify_group *group,
497 					 struct inode *inode,
498 					 u32 arg)
499 {
500 	struct fsnotify_mark *fsn_mark;
501 	struct inotify_inode_mark *i_mark;
502 	__u32 old_mask, new_mask;
503 	__u32 mask;
504 	int add = (arg & IN_MASK_ADD);
505 	int create = (arg & IN_MASK_CREATE);
506 	int ret;
507 
508 	mask = inotify_arg_to_mask(arg);
509 
510 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
511 	if (!fsn_mark)
512 		return -ENOENT;
513 	else if (create) {
514 		ret = -EEXIST;
515 		goto out;
516 	}
517 
518 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
519 
520 	spin_lock(&fsn_mark->lock);
521 	old_mask = fsn_mark->mask;
522 	if (add)
523 		fsn_mark->mask |= mask;
524 	else
525 		fsn_mark->mask = mask;
526 	new_mask = fsn_mark->mask;
527 	spin_unlock(&fsn_mark->lock);
528 
529 	if (old_mask != new_mask) {
530 		/* more bits in old than in new? */
531 		int dropped = (old_mask & ~new_mask);
532 		/* more bits in this fsn_mark than the inode's mask? */
533 		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
534 
535 		/* update the inode with this new fsn_mark */
536 		if (dropped || do_inode)
537 			fsnotify_recalc_mask(inode->i_fsnotify_marks);
538 
539 	}
540 
541 	/* return the wd */
542 	ret = i_mark->wd;
543 
544 out:
545 	/* match the get from fsnotify_find_mark() */
546 	fsnotify_put_mark(fsn_mark);
547 
548 	return ret;
549 }
550 
551 static int inotify_new_watch(struct fsnotify_group *group,
552 			     struct inode *inode,
553 			     u32 arg)
554 {
555 	struct inotify_inode_mark *tmp_i_mark;
556 	__u32 mask;
557 	int ret;
558 	struct idr *idr = &group->inotify_data.idr;
559 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
560 
561 	mask = inotify_arg_to_mask(arg);
562 
563 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
564 	if (unlikely(!tmp_i_mark))
565 		return -ENOMEM;
566 
567 	fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
568 	tmp_i_mark->fsn_mark.mask = mask;
569 	tmp_i_mark->wd = -1;
570 
571 	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
572 	if (ret)
573 		goto out_err;
574 
575 	/* increment the number of watches the user has */
576 	if (!inc_inotify_watches(group->inotify_data.ucounts)) {
577 		inotify_remove_from_idr(group, tmp_i_mark);
578 		ret = -ENOSPC;
579 		goto out_err;
580 	}
581 
582 	/* we are on the idr, now get on the inode */
583 	ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
584 	if (ret) {
585 		/* we failed to get on the inode, get off the idr */
586 		inotify_remove_from_idr(group, tmp_i_mark);
587 		goto out_err;
588 	}
589 
590 
591 	/* return the watch descriptor for this new mark */
592 	ret = tmp_i_mark->wd;
593 
594 out_err:
595 	/* match the ref from fsnotify_init_mark() */
596 	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
597 
598 	return ret;
599 }
600 
601 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
602 {
603 	int ret = 0;
604 
605 	mutex_lock(&group->mark_mutex);
606 	/* try to update and existing watch with the new arg */
607 	ret = inotify_update_existing_watch(group, inode, arg);
608 	/* no mark present, try to add a new one */
609 	if (ret == -ENOENT)
610 		ret = inotify_new_watch(group, inode, arg);
611 	mutex_unlock(&group->mark_mutex);
612 
613 	return ret;
614 }
615 
616 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
617 {
618 	struct fsnotify_group *group;
619 	struct inotify_event_info *oevent;
620 
621 	group = fsnotify_alloc_group(&inotify_fsnotify_ops);
622 	if (IS_ERR(group))
623 		return group;
624 
625 	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
626 	if (unlikely(!oevent)) {
627 		fsnotify_destroy_group(group);
628 		return ERR_PTR(-ENOMEM);
629 	}
630 	group->overflow_event = &oevent->fse;
631 	fsnotify_init_event(group->overflow_event, NULL);
632 	oevent->mask = FS_Q_OVERFLOW;
633 	oevent->wd = -1;
634 	oevent->sync_cookie = 0;
635 	oevent->name_len = 0;
636 
637 	group->max_events = max_events;
638 	group->memcg = get_mem_cgroup_from_mm(current->mm);
639 
640 	spin_lock_init(&group->inotify_data.idr_lock);
641 	idr_init(&group->inotify_data.idr);
642 	group->inotify_data.ucounts = inc_ucount(current_user_ns(),
643 						 current_euid(),
644 						 UCOUNT_INOTIFY_INSTANCES);
645 
646 	if (!group->inotify_data.ucounts) {
647 		fsnotify_destroy_group(group);
648 		return ERR_PTR(-EMFILE);
649 	}
650 
651 	return group;
652 }
653 
654 
655 /* inotify syscalls */
656 static int do_inotify_init(int flags)
657 {
658 	struct fsnotify_group *group;
659 	int ret;
660 
661 	/* Check the IN_* constants for consistency.  */
662 	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
663 	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
664 
665 	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
666 		return -EINVAL;
667 
668 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
669 	group = inotify_new_group(inotify_max_queued_events);
670 	if (IS_ERR(group))
671 		return PTR_ERR(group);
672 
673 	ret = anon_inode_getfd("inotify", &inotify_fops, group,
674 				  O_RDONLY | flags);
675 	if (ret < 0)
676 		fsnotify_destroy_group(group);
677 
678 	return ret;
679 }
680 
681 SYSCALL_DEFINE1(inotify_init1, int, flags)
682 {
683 	return do_inotify_init(flags);
684 }
685 
686 SYSCALL_DEFINE0(inotify_init)
687 {
688 	return do_inotify_init(0);
689 }
690 
691 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
692 		u32, mask)
693 {
694 	struct fsnotify_group *group;
695 	struct inode *inode;
696 	struct path path;
697 	struct fd f;
698 	int ret;
699 	unsigned flags = 0;
700 
701 	/*
702 	 * We share a lot of code with fs/dnotify.  We also share
703 	 * the bit layout between inotify's IN_* and the fsnotify
704 	 * FS_*.  This check ensures that only the inotify IN_*
705 	 * bits get passed in and set in watches/events.
706 	 */
707 	if (unlikely(mask & ~ALL_INOTIFY_BITS))
708 		return -EINVAL;
709 	/*
710 	 * Require at least one valid bit set in the mask.
711 	 * Without _something_ set, we would have no events to
712 	 * watch for.
713 	 */
714 	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
715 		return -EINVAL;
716 
717 	f = fdget(fd);
718 	if (unlikely(!f.file))
719 		return -EBADF;
720 
721 	/* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
722 	if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
723 		ret = -EINVAL;
724 		goto fput_and_out;
725 	}
726 
727 	/* verify that this is indeed an inotify instance */
728 	if (unlikely(f.file->f_op != &inotify_fops)) {
729 		ret = -EINVAL;
730 		goto fput_and_out;
731 	}
732 
733 	if (!(mask & IN_DONT_FOLLOW))
734 		flags |= LOOKUP_FOLLOW;
735 	if (mask & IN_ONLYDIR)
736 		flags |= LOOKUP_DIRECTORY;
737 
738 	ret = inotify_find_inode(pathname, &path, flags);
739 	if (ret)
740 		goto fput_and_out;
741 
742 	/* inode held in place by reference to path; group by fget on fd */
743 	inode = path.dentry->d_inode;
744 	group = f.file->private_data;
745 
746 	/* create/update an inode mark */
747 	ret = inotify_update_watch(group, inode, mask);
748 	path_put(&path);
749 fput_and_out:
750 	fdput(f);
751 	return ret;
752 }
753 
754 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
755 {
756 	struct fsnotify_group *group;
757 	struct inotify_inode_mark *i_mark;
758 	struct fd f;
759 	int ret = 0;
760 
761 	f = fdget(fd);
762 	if (unlikely(!f.file))
763 		return -EBADF;
764 
765 	/* verify that this is indeed an inotify instance */
766 	ret = -EINVAL;
767 	if (unlikely(f.file->f_op != &inotify_fops))
768 		goto out;
769 
770 	group = f.file->private_data;
771 
772 	ret = -EINVAL;
773 	i_mark = inotify_idr_find(group, wd);
774 	if (unlikely(!i_mark))
775 		goto out;
776 
777 	ret = 0;
778 
779 	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
780 
781 	/* match ref taken by inotify_idr_find */
782 	fsnotify_put_mark(&i_mark->fsn_mark);
783 
784 out:
785 	fdput(f);
786 	return ret;
787 }
788 
789 /*
790  * inotify_user_setup - Our initialization function.  Note that we cannot return
791  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
792  * must result in panic().
793  */
794 static int __init inotify_user_setup(void)
795 {
796 	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
797 	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
798 	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
799 	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
800 	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
801 	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
802 	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
803 	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
804 	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
805 	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
806 	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
807 	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
808 	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
809 	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
810 	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
811 	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
812 	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
813 	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
814 
815 	BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22);
816 
817 	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
818 					       SLAB_PANIC|SLAB_ACCOUNT);
819 
820 	inotify_max_queued_events = 16384;
821 	init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
822 	init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
823 
824 	return 0;
825 }
826 fs_initcall(inotify_user_setup);
827