xref: /openbmc/linux/fs/notify/inotify/inotify_user.c (revision 12eb4683)
1 /*
2  * fs/inotify_user.c - inotify support for userspace
3  *
4  * Authors:
5  *	John McCutchan	<ttb@tentacle.dhs.org>
6  *	Robert Love	<rml@novell.com>
7  *
8  * Copyright (C) 2005 John McCutchan
9  * Copyright 2006 Hewlett-Packard Development Company, L.P.
10  *
11  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12  * inotify was largely rewriten to make use of the fsnotify infrastructure
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the
16  * Free Software Foundation; either version 2, or (at your option) any
17  * later version.
18  *
19  * This program is distributed in the hope that it will be useful, but
20  * WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  * General Public License for more details.
23  */
24 
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched.h> /* struct user */
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 
42 #include "inotify.h"
43 #include "../fdinfo.h"
44 
45 #include <asm/ioctls.h>
46 
47 /* these are configurable via /proc/sys/fs/inotify/ */
48 static int inotify_max_user_instances __read_mostly;
49 static int inotify_max_queued_events __read_mostly;
50 static int inotify_max_user_watches __read_mostly;
51 
52 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
53 struct kmem_cache *event_priv_cachep __read_mostly;
54 
55 #ifdef CONFIG_SYSCTL
56 
57 #include <linux/sysctl.h>
58 
59 static int zero;
60 
61 ctl_table inotify_table[] = {
62 	{
63 		.procname	= "max_user_instances",
64 		.data		= &inotify_max_user_instances,
65 		.maxlen		= sizeof(int),
66 		.mode		= 0644,
67 		.proc_handler	= proc_dointvec_minmax,
68 		.extra1		= &zero,
69 	},
70 	{
71 		.procname	= "max_user_watches",
72 		.data		= &inotify_max_user_watches,
73 		.maxlen		= sizeof(int),
74 		.mode		= 0644,
75 		.proc_handler	= proc_dointvec_minmax,
76 		.extra1		= &zero,
77 	},
78 	{
79 		.procname	= "max_queued_events",
80 		.data		= &inotify_max_queued_events,
81 		.maxlen		= sizeof(int),
82 		.mode		= 0644,
83 		.proc_handler	= proc_dointvec_minmax,
84 		.extra1		= &zero
85 	},
86 	{ }
87 };
88 #endif /* CONFIG_SYSCTL */
89 
90 static inline __u32 inotify_arg_to_mask(u32 arg)
91 {
92 	__u32 mask;
93 
94 	/*
95 	 * everything should accept their own ignored, cares about children,
96 	 * and should receive events when the inode is unmounted
97 	 */
98 	mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
99 
100 	/* mask off the flags used to open the fd */
101 	mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
102 
103 	return mask;
104 }
105 
106 static inline u32 inotify_mask_to_arg(__u32 mask)
107 {
108 	return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
109 		       IN_Q_OVERFLOW);
110 }
111 
112 /* intofiy userspace file descriptor functions */
113 static unsigned int inotify_poll(struct file *file, poll_table *wait)
114 {
115 	struct fsnotify_group *group = file->private_data;
116 	int ret = 0;
117 
118 	poll_wait(file, &group->notification_waitq, wait);
119 	mutex_lock(&group->notification_mutex);
120 	if (!fsnotify_notify_queue_is_empty(group))
121 		ret = POLLIN | POLLRDNORM;
122 	mutex_unlock(&group->notification_mutex);
123 
124 	return ret;
125 }
126 
127 /*
128  * Get an inotify_kernel_event if one exists and is small
129  * enough to fit in "count". Return an error pointer if
130  * not large enough.
131  *
132  * Called with the group->notification_mutex held.
133  */
134 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
135 					    size_t count)
136 {
137 	size_t event_size = sizeof(struct inotify_event);
138 	struct fsnotify_event *event;
139 
140 	if (fsnotify_notify_queue_is_empty(group))
141 		return NULL;
142 
143 	event = fsnotify_peek_notify_event(group);
144 
145 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
146 
147 	if (event->name_len)
148 		event_size += roundup(event->name_len + 1, event_size);
149 
150 	if (event_size > count)
151 		return ERR_PTR(-EINVAL);
152 
153 	/* held the notification_mutex the whole time, so this is the
154 	 * same event we peeked above */
155 	fsnotify_remove_notify_event(group);
156 
157 	return event;
158 }
159 
160 /*
161  * Copy an event to user space, returning how much we copied.
162  *
163  * We already checked that the event size is smaller than the
164  * buffer we had in "get_one_event()" above.
165  */
166 static ssize_t copy_event_to_user(struct fsnotify_group *group,
167 				  struct fsnotify_event *event,
168 				  char __user *buf)
169 {
170 	struct inotify_event inotify_event;
171 	struct fsnotify_event_private_data *fsn_priv;
172 	struct inotify_event_private_data *priv;
173 	size_t event_size = sizeof(struct inotify_event);
174 	size_t name_len = 0;
175 
176 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
177 
178 	/* we get the inotify watch descriptor from the event private data */
179 	spin_lock(&event->lock);
180 	fsn_priv = fsnotify_remove_priv_from_event(group, event);
181 	spin_unlock(&event->lock);
182 
183 	if (!fsn_priv)
184 		inotify_event.wd = -1;
185 	else {
186 		priv = container_of(fsn_priv, struct inotify_event_private_data,
187 				    fsnotify_event_priv_data);
188 		inotify_event.wd = priv->wd;
189 		inotify_free_event_priv(fsn_priv);
190 	}
191 
192 	/*
193 	 * round up event->name_len so it is a multiple of event_size
194 	 * plus an extra byte for the terminating '\0'.
195 	 */
196 	if (event->name_len)
197 		name_len = roundup(event->name_len + 1, event_size);
198 	inotify_event.len = name_len;
199 
200 	inotify_event.mask = inotify_mask_to_arg(event->mask);
201 	inotify_event.cookie = event->sync_cookie;
202 
203 	/* send the main event */
204 	if (copy_to_user(buf, &inotify_event, event_size))
205 		return -EFAULT;
206 
207 	buf += event_size;
208 
209 	/*
210 	 * fsnotify only stores the pathname, so here we have to send the pathname
211 	 * and then pad that pathname out to a multiple of sizeof(inotify_event)
212 	 * with zeros.  I get my zeros from the nul_inotify_event.
213 	 */
214 	if (name_len) {
215 		unsigned int len_to_zero = name_len - event->name_len;
216 		/* copy the path name */
217 		if (copy_to_user(buf, event->file_name, event->name_len))
218 			return -EFAULT;
219 		buf += event->name_len;
220 
221 		/* fill userspace with 0's */
222 		if (clear_user(buf, len_to_zero))
223 			return -EFAULT;
224 		buf += len_to_zero;
225 		event_size += name_len;
226 	}
227 
228 	return event_size;
229 }
230 
231 static ssize_t inotify_read(struct file *file, char __user *buf,
232 			    size_t count, loff_t *pos)
233 {
234 	struct fsnotify_group *group;
235 	struct fsnotify_event *kevent;
236 	char __user *start;
237 	int ret;
238 	DEFINE_WAIT(wait);
239 
240 	start = buf;
241 	group = file->private_data;
242 
243 	while (1) {
244 		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
245 
246 		mutex_lock(&group->notification_mutex);
247 		kevent = get_one_event(group, count);
248 		mutex_unlock(&group->notification_mutex);
249 
250 		pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
251 
252 		if (kevent) {
253 			ret = PTR_ERR(kevent);
254 			if (IS_ERR(kevent))
255 				break;
256 			ret = copy_event_to_user(group, kevent, buf);
257 			fsnotify_put_event(kevent);
258 			if (ret < 0)
259 				break;
260 			buf += ret;
261 			count -= ret;
262 			continue;
263 		}
264 
265 		ret = -EAGAIN;
266 		if (file->f_flags & O_NONBLOCK)
267 			break;
268 		ret = -ERESTARTSYS;
269 		if (signal_pending(current))
270 			break;
271 
272 		if (start != buf)
273 			break;
274 
275 		schedule();
276 	}
277 
278 	finish_wait(&group->notification_waitq, &wait);
279 	if (start != buf && ret != -EFAULT)
280 		ret = buf - start;
281 	return ret;
282 }
283 
284 static int inotify_release(struct inode *ignored, struct file *file)
285 {
286 	struct fsnotify_group *group = file->private_data;
287 
288 	pr_debug("%s: group=%p\n", __func__, group);
289 
290 	/* free this group, matching get was inotify_init->fsnotify_obtain_group */
291 	fsnotify_destroy_group(group);
292 
293 	return 0;
294 }
295 
296 static long inotify_ioctl(struct file *file, unsigned int cmd,
297 			  unsigned long arg)
298 {
299 	struct fsnotify_group *group;
300 	struct fsnotify_event_holder *holder;
301 	struct fsnotify_event *event;
302 	void __user *p;
303 	int ret = -ENOTTY;
304 	size_t send_len = 0;
305 
306 	group = file->private_data;
307 	p = (void __user *) arg;
308 
309 	pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
310 
311 	switch (cmd) {
312 	case FIONREAD:
313 		mutex_lock(&group->notification_mutex);
314 		list_for_each_entry(holder, &group->notification_list, event_list) {
315 			event = holder->event;
316 			send_len += sizeof(struct inotify_event);
317 			if (event->name_len)
318 				send_len += roundup(event->name_len + 1,
319 						sizeof(struct inotify_event));
320 		}
321 		mutex_unlock(&group->notification_mutex);
322 		ret = put_user(send_len, (int __user *) p);
323 		break;
324 	}
325 
326 	return ret;
327 }
328 
329 static const struct file_operations inotify_fops = {
330 	.show_fdinfo	= inotify_show_fdinfo,
331 	.poll		= inotify_poll,
332 	.read		= inotify_read,
333 	.fasync		= fsnotify_fasync,
334 	.release	= inotify_release,
335 	.unlocked_ioctl	= inotify_ioctl,
336 	.compat_ioctl	= inotify_ioctl,
337 	.llseek		= noop_llseek,
338 };
339 
340 
341 /*
342  * find_inode - resolve a user-given path to a specific inode
343  */
344 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
345 {
346 	int error;
347 
348 	error = user_path_at(AT_FDCWD, dirname, flags, path);
349 	if (error)
350 		return error;
351 	/* you can only watch an inode if you have read permissions on it */
352 	error = inode_permission(path->dentry->d_inode, MAY_READ);
353 	if (error)
354 		path_put(path);
355 	return error;
356 }
357 
358 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
359 			      struct inotify_inode_mark *i_mark)
360 {
361 	int ret;
362 
363 	idr_preload(GFP_KERNEL);
364 	spin_lock(idr_lock);
365 
366 	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
367 	if (ret >= 0) {
368 		/* we added the mark to the idr, take a reference */
369 		i_mark->wd = ret;
370 		fsnotify_get_mark(&i_mark->fsn_mark);
371 	}
372 
373 	spin_unlock(idr_lock);
374 	idr_preload_end();
375 	return ret < 0 ? ret : 0;
376 }
377 
378 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
379 								int wd)
380 {
381 	struct idr *idr = &group->inotify_data.idr;
382 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
383 	struct inotify_inode_mark *i_mark;
384 
385 	assert_spin_locked(idr_lock);
386 
387 	i_mark = idr_find(idr, wd);
388 	if (i_mark) {
389 		struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
390 
391 		fsnotify_get_mark(fsn_mark);
392 		/* One ref for being in the idr, one ref we just took */
393 		BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
394 	}
395 
396 	return i_mark;
397 }
398 
399 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
400 							 int wd)
401 {
402 	struct inotify_inode_mark *i_mark;
403 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
404 
405 	spin_lock(idr_lock);
406 	i_mark = inotify_idr_find_locked(group, wd);
407 	spin_unlock(idr_lock);
408 
409 	return i_mark;
410 }
411 
412 static void do_inotify_remove_from_idr(struct fsnotify_group *group,
413 				       struct inotify_inode_mark *i_mark)
414 {
415 	struct idr *idr = &group->inotify_data.idr;
416 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
417 	int wd = i_mark->wd;
418 
419 	assert_spin_locked(idr_lock);
420 
421 	idr_remove(idr, wd);
422 
423 	/* removed from the idr, drop that ref */
424 	fsnotify_put_mark(&i_mark->fsn_mark);
425 }
426 
427 /*
428  * Remove the mark from the idr (if present) and drop the reference
429  * on the mark because it was in the idr.
430  */
431 static void inotify_remove_from_idr(struct fsnotify_group *group,
432 				    struct inotify_inode_mark *i_mark)
433 {
434 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
435 	struct inotify_inode_mark *found_i_mark = NULL;
436 	int wd;
437 
438 	spin_lock(idr_lock);
439 	wd = i_mark->wd;
440 
441 	/*
442 	 * does this i_mark think it is in the idr?  we shouldn't get called
443 	 * if it wasn't....
444 	 */
445 	if (wd == -1) {
446 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
447 			" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
448 			i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
449 		goto out;
450 	}
451 
452 	/* Lets look in the idr to see if we find it */
453 	found_i_mark = inotify_idr_find_locked(group, wd);
454 	if (unlikely(!found_i_mark)) {
455 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
456 			" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
457 			i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
458 		goto out;
459 	}
460 
461 	/*
462 	 * We found an mark in the idr at the right wd, but it's
463 	 * not the mark we were told to remove.  eparis seriously
464 	 * fucked up somewhere.
465 	 */
466 	if (unlikely(found_i_mark != i_mark)) {
467 		WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
468 			"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
469 			"found_i_mark->group=%p found_i_mark->inode=%p\n",
470 			__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
471 			i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
472 			found_i_mark->fsn_mark.group,
473 			found_i_mark->fsn_mark.i.inode);
474 		goto out;
475 	}
476 
477 	/*
478 	 * One ref for being in the idr
479 	 * one ref held by the caller trying to kill us
480 	 * one ref grabbed by inotify_idr_find
481 	 */
482 	if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
483 		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
484 			" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
485 			i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
486 		/* we can't really recover with bad ref cnting.. */
487 		BUG();
488 	}
489 
490 	do_inotify_remove_from_idr(group, i_mark);
491 out:
492 	/* match the ref taken by inotify_idr_find_locked() */
493 	if (found_i_mark)
494 		fsnotify_put_mark(&found_i_mark->fsn_mark);
495 	i_mark->wd = -1;
496 	spin_unlock(idr_lock);
497 }
498 
499 /*
500  * Send IN_IGNORED for this wd, remove this wd from the idr.
501  */
502 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
503 				    struct fsnotify_group *group)
504 {
505 	struct inotify_inode_mark *i_mark;
506 	struct fsnotify_event *ignored_event, *notify_event;
507 	struct inotify_event_private_data *event_priv;
508 	struct fsnotify_event_private_data *fsn_event_priv;
509 	int ret;
510 
511 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
512 
513 	ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
514 					      FSNOTIFY_EVENT_NONE, NULL, 0,
515 					      GFP_NOFS);
516 	if (!ignored_event)
517 		goto skip_send_ignore;
518 
519 	event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
520 	if (unlikely(!event_priv))
521 		goto skip_send_ignore;
522 
523 	fsn_event_priv = &event_priv->fsnotify_event_priv_data;
524 
525 	fsnotify_get_group(group);
526 	fsn_event_priv->group = group;
527 	event_priv->wd = i_mark->wd;
528 
529 	notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
530 	if (notify_event) {
531 		if (IS_ERR(notify_event))
532 			ret = PTR_ERR(notify_event);
533 		else
534 			fsnotify_put_event(notify_event);
535 		inotify_free_event_priv(fsn_event_priv);
536 	}
537 
538 skip_send_ignore:
539 	/* matches the reference taken when the event was created */
540 	if (ignored_event)
541 		fsnotify_put_event(ignored_event);
542 
543 	/* remove this mark from the idr */
544 	inotify_remove_from_idr(group, i_mark);
545 
546 	atomic_dec(&group->inotify_data.user->inotify_watches);
547 }
548 
549 /* ding dong the mark is dead */
550 static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
551 {
552 	struct inotify_inode_mark *i_mark;
553 
554 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
555 
556 	kmem_cache_free(inotify_inode_mark_cachep, i_mark);
557 }
558 
559 static int inotify_update_existing_watch(struct fsnotify_group *group,
560 					 struct inode *inode,
561 					 u32 arg)
562 {
563 	struct fsnotify_mark *fsn_mark;
564 	struct inotify_inode_mark *i_mark;
565 	__u32 old_mask, new_mask;
566 	__u32 mask;
567 	int add = (arg & IN_MASK_ADD);
568 	int ret;
569 
570 	mask = inotify_arg_to_mask(arg);
571 
572 	fsn_mark = fsnotify_find_inode_mark(group, inode);
573 	if (!fsn_mark)
574 		return -ENOENT;
575 
576 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
577 
578 	spin_lock(&fsn_mark->lock);
579 
580 	old_mask = fsn_mark->mask;
581 	if (add)
582 		fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
583 	else
584 		fsnotify_set_mark_mask_locked(fsn_mark, mask);
585 	new_mask = fsn_mark->mask;
586 
587 	spin_unlock(&fsn_mark->lock);
588 
589 	if (old_mask != new_mask) {
590 		/* more bits in old than in new? */
591 		int dropped = (old_mask & ~new_mask);
592 		/* more bits in this fsn_mark than the inode's mask? */
593 		int do_inode = (new_mask & ~inode->i_fsnotify_mask);
594 
595 		/* update the inode with this new fsn_mark */
596 		if (dropped || do_inode)
597 			fsnotify_recalc_inode_mask(inode);
598 
599 	}
600 
601 	/* return the wd */
602 	ret = i_mark->wd;
603 
604 	/* match the get from fsnotify_find_mark() */
605 	fsnotify_put_mark(fsn_mark);
606 
607 	return ret;
608 }
609 
610 static int inotify_new_watch(struct fsnotify_group *group,
611 			     struct inode *inode,
612 			     u32 arg)
613 {
614 	struct inotify_inode_mark *tmp_i_mark;
615 	__u32 mask;
616 	int ret;
617 	struct idr *idr = &group->inotify_data.idr;
618 	spinlock_t *idr_lock = &group->inotify_data.idr_lock;
619 
620 	mask = inotify_arg_to_mask(arg);
621 
622 	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
623 	if (unlikely(!tmp_i_mark))
624 		return -ENOMEM;
625 
626 	fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
627 	tmp_i_mark->fsn_mark.mask = mask;
628 	tmp_i_mark->wd = -1;
629 
630 	ret = -ENOSPC;
631 	if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
632 		goto out_err;
633 
634 	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
635 	if (ret)
636 		goto out_err;
637 
638 	/* we are on the idr, now get on the inode */
639 	ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, group, inode,
640 				       NULL, 0);
641 	if (ret) {
642 		/* we failed to get on the inode, get off the idr */
643 		inotify_remove_from_idr(group, tmp_i_mark);
644 		goto out_err;
645 	}
646 
647 	/* increment the number of watches the user has */
648 	atomic_inc(&group->inotify_data.user->inotify_watches);
649 
650 	/* return the watch descriptor for this new mark */
651 	ret = tmp_i_mark->wd;
652 
653 out_err:
654 	/* match the ref from fsnotify_init_mark() */
655 	fsnotify_put_mark(&tmp_i_mark->fsn_mark);
656 
657 	return ret;
658 }
659 
660 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
661 {
662 	int ret = 0;
663 
664 	mutex_lock(&group->mark_mutex);
665 	/* try to update and existing watch with the new arg */
666 	ret = inotify_update_existing_watch(group, inode, arg);
667 	/* no mark present, try to add a new one */
668 	if (ret == -ENOENT)
669 		ret = inotify_new_watch(group, inode, arg);
670 	mutex_unlock(&group->mark_mutex);
671 
672 	return ret;
673 }
674 
675 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
676 {
677 	struct fsnotify_group *group;
678 
679 	group = fsnotify_alloc_group(&inotify_fsnotify_ops);
680 	if (IS_ERR(group))
681 		return group;
682 
683 	group->max_events = max_events;
684 
685 	spin_lock_init(&group->inotify_data.idr_lock);
686 	idr_init(&group->inotify_data.idr);
687 	group->inotify_data.user = get_current_user();
688 
689 	if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
690 	    inotify_max_user_instances) {
691 		fsnotify_destroy_group(group);
692 		return ERR_PTR(-EMFILE);
693 	}
694 
695 	return group;
696 }
697 
698 
699 /* inotify syscalls */
700 SYSCALL_DEFINE1(inotify_init1, int, flags)
701 {
702 	struct fsnotify_group *group;
703 	int ret;
704 
705 	/* Check the IN_* constants for consistency.  */
706 	BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
707 	BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
708 
709 	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
710 		return -EINVAL;
711 
712 	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
713 	group = inotify_new_group(inotify_max_queued_events);
714 	if (IS_ERR(group))
715 		return PTR_ERR(group);
716 
717 	ret = anon_inode_getfd("inotify", &inotify_fops, group,
718 				  O_RDONLY | flags);
719 	if (ret < 0)
720 		fsnotify_destroy_group(group);
721 
722 	return ret;
723 }
724 
725 SYSCALL_DEFINE0(inotify_init)
726 {
727 	return sys_inotify_init1(0);
728 }
729 
730 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
731 		u32, mask)
732 {
733 	struct fsnotify_group *group;
734 	struct inode *inode;
735 	struct path path;
736 	struct fd f;
737 	int ret;
738 	unsigned flags = 0;
739 
740 	/* don't allow invalid bits: we don't want flags set */
741 	if (unlikely(!(mask & ALL_INOTIFY_BITS)))
742 		return -EINVAL;
743 
744 	f = fdget(fd);
745 	if (unlikely(!f.file))
746 		return -EBADF;
747 
748 	/* verify that this is indeed an inotify instance */
749 	if (unlikely(f.file->f_op != &inotify_fops)) {
750 		ret = -EINVAL;
751 		goto fput_and_out;
752 	}
753 
754 	if (!(mask & IN_DONT_FOLLOW))
755 		flags |= LOOKUP_FOLLOW;
756 	if (mask & IN_ONLYDIR)
757 		flags |= LOOKUP_DIRECTORY;
758 
759 	ret = inotify_find_inode(pathname, &path, flags);
760 	if (ret)
761 		goto fput_and_out;
762 
763 	/* inode held in place by reference to path; group by fget on fd */
764 	inode = path.dentry->d_inode;
765 	group = f.file->private_data;
766 
767 	/* create/update an inode mark */
768 	ret = inotify_update_watch(group, inode, mask);
769 	path_put(&path);
770 fput_and_out:
771 	fdput(f);
772 	return ret;
773 }
774 
775 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
776 {
777 	struct fsnotify_group *group;
778 	struct inotify_inode_mark *i_mark;
779 	struct fd f;
780 	int ret = 0;
781 
782 	f = fdget(fd);
783 	if (unlikely(!f.file))
784 		return -EBADF;
785 
786 	/* verify that this is indeed an inotify instance */
787 	ret = -EINVAL;
788 	if (unlikely(f.file->f_op != &inotify_fops))
789 		goto out;
790 
791 	group = f.file->private_data;
792 
793 	ret = -EINVAL;
794 	i_mark = inotify_idr_find(group, wd);
795 	if (unlikely(!i_mark))
796 		goto out;
797 
798 	ret = 0;
799 
800 	fsnotify_destroy_mark(&i_mark->fsn_mark, group);
801 
802 	/* match ref taken by inotify_idr_find */
803 	fsnotify_put_mark(&i_mark->fsn_mark);
804 
805 out:
806 	fdput(f);
807 	return ret;
808 }
809 
810 /*
811  * inotify_user_setup - Our initialization function.  Note that we cannot return
812  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
813  * must result in panic().
814  */
815 static int __init inotify_user_setup(void)
816 {
817 	BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
818 	BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
819 	BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
820 	BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
821 	BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
822 	BUILD_BUG_ON(IN_OPEN != FS_OPEN);
823 	BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
824 	BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
825 	BUILD_BUG_ON(IN_CREATE != FS_CREATE);
826 	BUILD_BUG_ON(IN_DELETE != FS_DELETE);
827 	BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
828 	BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
829 	BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
830 	BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
831 	BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
832 	BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
833 	BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
834 	BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
835 
836 	BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
837 
838 	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
839 	event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
840 
841 	inotify_max_queued_events = 16384;
842 	inotify_max_user_instances = 128;
843 	inotify_max_user_watches = 8192;
844 
845 	return 0;
846 }
847 module_init(inotify_user_setup);
848